1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expectRsp: flag indicating whether response is expected. 138 * @cmdSize: size of the ELS command. 139 * @retry: number of retries to the command IOCB when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * context1 of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 164 uint16_t cmdSize, uint8_t retry, 165 struct lpfc_nodelist *ndlp, uint32_t did, 166 uint32_t elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 171 struct ulp_bde64 *bpl; 172 IOCB_t *icmd; 173 174 175 if (!lpfc_is_link_up(phba)) 176 return NULL; 177 178 /* Allocate buffer for command iocb */ 179 elsiocb = lpfc_sli_get_iocbq(phba); 180 181 if (elsiocb == NULL) 182 return NULL; 183 184 /* 185 * If this command is for fabric controller and HBA running 186 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 187 */ 188 if ((did == Fabric_DID) && 189 (phba->hba_flag & HBA_FIP_SUPPORT) && 190 ((elscmd == ELS_CMD_FLOGI) || 191 (elscmd == ELS_CMD_FDISC) || 192 (elscmd == ELS_CMD_LOGO))) 193 switch (elscmd) { 194 case ELS_CMD_FLOGI: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 case ELS_CMD_FDISC: 200 elsiocb->iocb_flag |= 201 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 202 & LPFC_FIP_ELS_ID_MASK); 203 break; 204 case ELS_CMD_LOGO: 205 elsiocb->iocb_flag |= 206 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 207 & LPFC_FIP_ELS_ID_MASK); 208 break; 209 } 210 else 211 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 212 213 icmd = &elsiocb->iocb; 214 215 /* fill in BDEs for command */ 216 /* Allocate buffer for command payload */ 217 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (pcmd) 219 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 220 if (!pcmd || !pcmd->virt) 221 goto els_iocb_free_pcmb_exit; 222 223 INIT_LIST_HEAD(&pcmd->list); 224 225 /* Allocate buffer for response payload */ 226 if (expectRsp) { 227 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 228 if (prsp) 229 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 230 &prsp->phys); 231 if (!prsp || !prsp->virt) 232 goto els_iocb_free_prsp_exit; 233 INIT_LIST_HEAD(&prsp->list); 234 } else 235 prsp = NULL; 236 237 /* Allocate buffer for Buffer ptr list */ 238 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 239 if (pbuflist) 240 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 241 &pbuflist->phys); 242 if (!pbuflist || !pbuflist->virt) 243 goto els_iocb_free_pbuf_exit; 244 245 INIT_LIST_HEAD(&pbuflist->list); 246 247 if (expectRsp) { 248 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 249 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 250 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 251 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 252 253 icmd->un.elsreq64.remoteID = did; /* DID */ 254 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 255 if (elscmd == ELS_CMD_FLOGI) 256 icmd->ulpTimeout = FF_DEF_RATOV * 2; 257 else if (elscmd == ELS_CMD_LOGO) 258 icmd->ulpTimeout = phba->fc_ratov; 259 else 260 icmd->ulpTimeout = phba->fc_ratov * 2; 261 } else { 262 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 263 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 264 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 265 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 266 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 267 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 268 } 269 icmd->ulpBdeCount = 1; 270 icmd->ulpLe = 1; 271 icmd->ulpClass = CLASS3; 272 273 /* 274 * If we have NPIV enabled, we want to send ELS traffic by VPI. 275 * For SLI4, since the driver controls VPIs we also want to include 276 * all ELS pt2pt protocol traffic as well. 277 */ 278 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 279 ((phba->sli_rev == LPFC_SLI_REV4) && 280 (vport->fc_flag & FC_PT2PT))) { 281 282 if (expectRsp) { 283 icmd->un.elsreq64.myID = vport->fc_myDID; 284 285 /* For ELS_REQUEST64_CR, use the VPI by default */ 286 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 287 } 288 289 icmd->ulpCt_h = 0; 290 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 291 if (elscmd == ELS_CMD_ECHO) 292 icmd->ulpCt_l = 0; /* context = invalid RPI */ 293 else 294 icmd->ulpCt_l = 1; /* context = VPI */ 295 } 296 297 bpl = (struct ulp_bde64 *) pbuflist->virt; 298 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 299 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 300 bpl->tus.f.bdeSize = cmdSize; 301 bpl->tus.f.bdeFlags = 0; 302 bpl->tus.w = le32_to_cpu(bpl->tus.w); 303 304 if (expectRsp) { 305 bpl++; 306 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 307 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 308 bpl->tus.f.bdeSize = FCELSSIZE; 309 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 310 bpl->tus.w = le32_to_cpu(bpl->tus.w); 311 } 312 313 elsiocb->context2 = pcmd; 314 elsiocb->context3 = pbuflist; 315 elsiocb->retry = retry; 316 elsiocb->vport = vport; 317 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 318 319 if (prsp) { 320 list_add(&prsp->list, &pcmd->list); 321 } 322 if (expectRsp) { 323 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 325 "0116 Xmit ELS command x%x to remote " 326 "NPORT x%x I/O tag: x%x, port state:x%x " 327 "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n", 328 elscmd, did, elsiocb->iotag, 329 vport->port_state, ndlp->nlp_rpi, 330 vport->fc_flag, ndlp->nlp_flag, vport); 331 } else { 332 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 333 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 "0117 Xmit ELS response x%x to remote " 335 "NPORT x%x I/O tag: x%x, size: x%x " 336 "port_state x%x rpi x%x fc_flag x%x\n", 337 elscmd, ndlp->nlp_DID, elsiocb->iotag, 338 cmdSize, vport->port_state, 339 ndlp->nlp_rpi, vport->fc_flag); 340 } 341 return elsiocb; 342 343 els_iocb_free_pbuf_exit: 344 if (expectRsp) 345 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 346 kfree(pbuflist); 347 348 els_iocb_free_prsp_exit: 349 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 350 kfree(prsp); 351 352 els_iocb_free_pcmb_exit: 353 kfree(pcmd); 354 lpfc_sli_release_iocbq(phba, elsiocb); 355 return NULL; 356 } 357 358 /** 359 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 360 * @vport: pointer to a host virtual N_Port data structure. 361 * 362 * This routine issues a fabric registration login for a @vport. An 363 * active ndlp node with Fabric_DID must already exist for this @vport. 364 * The routine invokes two mailbox commands to carry out fabric registration 365 * login through the HBA firmware: the first mailbox command requests the 366 * HBA to perform link configuration for the @vport; and the second mailbox 367 * command requests the HBA to perform the actual fabric registration login 368 * with the @vport. 369 * 370 * Return code 371 * 0 - successfully issued fabric registration login for @vport 372 * -ENXIO -- failed to issue fabric registration login for @vport 373 **/ 374 int 375 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 376 { 377 struct lpfc_hba *phba = vport->phba; 378 LPFC_MBOXQ_t *mbox; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_nodelist *ndlp; 381 struct serv_parm *sp; 382 int rc; 383 int err = 0; 384 385 sp = &phba->fc_fabparam; 386 ndlp = lpfc_findnode_did(vport, Fabric_DID); 387 if (!ndlp) { 388 err = 1; 389 goto fail; 390 } 391 392 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 393 if (!mbox) { 394 err = 2; 395 goto fail; 396 } 397 398 vport->port_state = LPFC_FABRIC_CFG_LINK; 399 lpfc_config_link(phba, mbox); 400 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 401 mbox->vport = vport; 402 403 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 404 if (rc == MBX_NOT_FINISHED) { 405 err = 3; 406 goto fail_free_mbox; 407 } 408 409 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 410 if (!mbox) { 411 err = 4; 412 goto fail; 413 } 414 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 415 ndlp->nlp_rpi); 416 if (rc) { 417 err = 5; 418 goto fail_free_mbox; 419 } 420 421 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 422 mbox->vport = vport; 423 /* increment the reference count on ndlp to hold reference 424 * for the callback routine. 425 */ 426 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 427 if (!mbox->ctx_ndlp) { 428 err = 6; 429 goto fail_no_ndlp; 430 } 431 432 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 433 if (rc == MBX_NOT_FINISHED) { 434 err = 7; 435 goto fail_issue_reg_login; 436 } 437 438 return 0; 439 440 fail_issue_reg_login: 441 /* decrement the reference count on ndlp just incremented 442 * for the failed mbox command. 443 */ 444 lpfc_nlp_put(ndlp); 445 fail_no_ndlp: 446 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 447 lpfc_mbuf_free(phba, mp->virt, mp->phys); 448 kfree(mp); 449 fail_free_mbox: 450 mempool_free(mbox, phba->mbox_mem_pool); 451 452 fail: 453 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 454 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 455 "0249 Cannot issue Register Fabric login: Err %d\n", 456 err); 457 return -ENXIO; 458 } 459 460 /** 461 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 462 * @vport: pointer to a host virtual N_Port data structure. 463 * 464 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 465 * the @vport. This mailbox command is necessary for SLI4 port only. 466 * 467 * Return code 468 * 0 - successfully issued REG_VFI for @vport 469 * A failure code otherwise. 470 **/ 471 int 472 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 473 { 474 struct lpfc_hba *phba = vport->phba; 475 LPFC_MBOXQ_t *mboxq = NULL; 476 struct lpfc_nodelist *ndlp; 477 struct lpfc_dmabuf *dmabuf = NULL; 478 int rc = 0; 479 480 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 481 if ((phba->sli_rev == LPFC_SLI_REV4) && 482 !(phba->link_flag & LS_LOOPBACK_MODE) && 483 !(vport->fc_flag & FC_PT2PT)) { 484 ndlp = lpfc_findnode_did(vport, Fabric_DID); 485 if (!ndlp) { 486 rc = -ENODEV; 487 goto fail; 488 } 489 } 490 491 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 492 if (!mboxq) { 493 rc = -ENOMEM; 494 goto fail; 495 } 496 497 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 498 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 499 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 500 if (!dmabuf) { 501 rc = -ENOMEM; 502 goto fail; 503 } 504 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 505 if (!dmabuf->virt) { 506 rc = -ENOMEM; 507 goto fail; 508 } 509 memcpy(dmabuf->virt, &phba->fc_fabparam, 510 sizeof(struct serv_parm)); 511 } 512 513 vport->port_state = LPFC_FABRIC_CFG_LINK; 514 if (dmabuf) 515 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 516 else 517 lpfc_reg_vfi(mboxq, vport, 0); 518 519 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 520 mboxq->vport = vport; 521 mboxq->ctx_buf = dmabuf; 522 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 523 if (rc == MBX_NOT_FINISHED) { 524 rc = -ENXIO; 525 goto fail; 526 } 527 return 0; 528 529 fail: 530 if (mboxq) 531 mempool_free(mboxq, phba->mbox_mem_pool); 532 if (dmabuf) { 533 if (dmabuf->virt) 534 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 535 kfree(dmabuf); 536 } 537 538 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 539 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 540 "0289 Issue Register VFI failed: Err %d\n", rc); 541 return rc; 542 } 543 544 /** 545 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 546 * @vport: pointer to a host virtual N_Port data structure. 547 * 548 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 549 * the @vport. This mailbox command is necessary for SLI4 port only. 550 * 551 * Return code 552 * 0 - successfully issued REG_VFI for @vport 553 * A failure code otherwise. 554 **/ 555 int 556 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 557 { 558 struct lpfc_hba *phba = vport->phba; 559 struct Scsi_Host *shost; 560 LPFC_MBOXQ_t *mboxq; 561 int rc; 562 563 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 564 if (!mboxq) { 565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 566 "2556 UNREG_VFI mbox allocation failed" 567 "HBA state x%x\n", phba->pport->port_state); 568 return -ENOMEM; 569 } 570 571 lpfc_unreg_vfi(mboxq, vport); 572 mboxq->vport = vport; 573 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 574 575 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 576 if (rc == MBX_NOT_FINISHED) { 577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 578 "2557 UNREG_VFI issue mbox failed rc x%x " 579 "HBA state x%x\n", 580 rc, phba->pport->port_state); 581 mempool_free(mboxq, phba->mbox_mem_pool); 582 return -EIO; 583 } 584 585 shost = lpfc_shost_from_vport(vport); 586 spin_lock_irq(shost->host_lock); 587 vport->fc_flag &= ~FC_VFI_REGISTERED; 588 spin_unlock_irq(shost->host_lock); 589 return 0; 590 } 591 592 /** 593 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 594 * @vport: pointer to a host virtual N_Port data structure. 595 * @sp: pointer to service parameter data structure. 596 * 597 * This routine is called from FLOGI/FDISC completion handler functions. 598 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 599 * node nodename is changed in the completion service parameter else return 600 * 0. This function also set flag in the vport data structure to delay 601 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 602 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 603 * node nodename is changed in the completion service parameter. 604 * 605 * Return code 606 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 607 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 608 * 609 **/ 610 static uint8_t 611 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 612 struct serv_parm *sp) 613 { 614 struct lpfc_hba *phba = vport->phba; 615 uint8_t fabric_param_changed = 0; 616 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 617 618 if ((vport->fc_prevDID != vport->fc_myDID) || 619 memcmp(&vport->fabric_portname, &sp->portName, 620 sizeof(struct lpfc_name)) || 621 memcmp(&vport->fabric_nodename, &sp->nodeName, 622 sizeof(struct lpfc_name)) || 623 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 624 fabric_param_changed = 1; 625 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 626 } 627 /* 628 * Word 1 Bit 31 in common service parameter is overloaded. 629 * Word 1 Bit 31 in FLOGI request is multiple NPort request 630 * Word 1 Bit 31 in FLOGI response is clean address bit 631 * 632 * If fabric parameter is changed and clean address bit is 633 * cleared delay nport discovery if 634 * - vport->fc_prevDID != 0 (not initial discovery) OR 635 * - lpfc_delay_discovery module parameter is set. 636 */ 637 if (fabric_param_changed && !sp->cmn.clean_address_bit && 638 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 639 spin_lock_irq(shost->host_lock); 640 vport->fc_flag |= FC_DISC_DELAYED; 641 spin_unlock_irq(shost->host_lock); 642 } 643 644 return fabric_param_changed; 645 } 646 647 648 /** 649 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 650 * @vport: pointer to a host virtual N_Port data structure. 651 * @ndlp: pointer to a node-list data structure. 652 * @sp: pointer to service parameter data structure. 653 * @irsp: pointer to the IOCB within the lpfc response IOCB. 654 * 655 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 656 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 657 * port in a fabric topology. It properly sets up the parameters to the @ndlp 658 * from the IOCB response. It also check the newly assigned N_Port ID to the 659 * @vport against the previously assigned N_Port ID. If it is different from 660 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 661 * is invoked on all the remaining nodes with the @vport to unregister the 662 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 663 * is invoked to register login to the fabric. 664 * 665 * Return code 666 * 0 - Success (currently, always return 0) 667 **/ 668 static int 669 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 670 struct serv_parm *sp, IOCB_t *irsp) 671 { 672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 673 struct lpfc_hba *phba = vport->phba; 674 struct lpfc_nodelist *np; 675 struct lpfc_nodelist *next_np; 676 uint8_t fabric_param_changed; 677 678 spin_lock_irq(shost->host_lock); 679 vport->fc_flag |= FC_FABRIC; 680 spin_unlock_irq(shost->host_lock); 681 682 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 683 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 684 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 685 686 phba->fc_edtovResol = sp->cmn.edtovResolution; 687 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 688 689 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 690 spin_lock_irq(shost->host_lock); 691 vport->fc_flag |= FC_PUBLIC_LOOP; 692 spin_unlock_irq(shost->host_lock); 693 } 694 695 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 696 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 697 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 698 ndlp->nlp_class_sup = 0; 699 if (sp->cls1.classValid) 700 ndlp->nlp_class_sup |= FC_COS_CLASS1; 701 if (sp->cls2.classValid) 702 ndlp->nlp_class_sup |= FC_COS_CLASS2; 703 if (sp->cls3.classValid) 704 ndlp->nlp_class_sup |= FC_COS_CLASS3; 705 if (sp->cls4.classValid) 706 ndlp->nlp_class_sup |= FC_COS_CLASS4; 707 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 708 sp->cmn.bbRcvSizeLsb; 709 710 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 711 if (fabric_param_changed) { 712 /* Reset FDMI attribute masks based on config parameter */ 713 if (phba->cfg_enable_SmartSAN || 714 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 715 /* Setup appropriate attribute masks */ 716 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 717 if (phba->cfg_enable_SmartSAN) 718 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 719 else 720 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 721 } else { 722 vport->fdmi_hba_mask = 0; 723 vport->fdmi_port_mask = 0; 724 } 725 726 } 727 memcpy(&vport->fabric_portname, &sp->portName, 728 sizeof(struct lpfc_name)); 729 memcpy(&vport->fabric_nodename, &sp->nodeName, 730 sizeof(struct lpfc_name)); 731 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 732 733 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 734 if (sp->cmn.response_multiple_NPort) { 735 lpfc_printf_vlog(vport, KERN_WARNING, 736 LOG_ELS | LOG_VPORT, 737 "1816 FLOGI NPIV supported, " 738 "response data 0x%x\n", 739 sp->cmn.response_multiple_NPort); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } else { 744 /* Because we asked f/w for NPIV it still expects us 745 to call reg_vnpid at least for the physical host */ 746 lpfc_printf_vlog(vport, KERN_WARNING, 747 LOG_ELS | LOG_VPORT, 748 "1817 Fabric does not support NPIV " 749 "- configuring single port mode.\n"); 750 spin_lock_irq(&phba->hbalock); 751 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 752 spin_unlock_irq(&phba->hbalock); 753 } 754 } 755 756 /* 757 * For FC we need to do some special processing because of the SLI 758 * Port's default settings of the Common Service Parameters. 759 */ 760 if ((phba->sli_rev == LPFC_SLI_REV4) && 761 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 762 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 763 if (fabric_param_changed) 764 lpfc_unregister_fcf_prep(phba); 765 766 /* This should just update the VFI CSPs*/ 767 if (vport->fc_flag & FC_VFI_REGISTERED) 768 lpfc_issue_reg_vfi(vport); 769 } 770 771 if (fabric_param_changed && 772 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 773 774 /* If our NportID changed, we need to ensure all 775 * remaining NPORTs get unreg_login'ed. 776 */ 777 list_for_each_entry_safe(np, next_np, 778 &vport->fc_nodes, nlp_listp) { 779 if ((np->nlp_state != NLP_STE_NPR_NODE) || 780 !(np->nlp_flag & NLP_NPR_ADISC)) 781 continue; 782 spin_lock_irq(&np->lock); 783 np->nlp_flag &= ~NLP_NPR_ADISC; 784 spin_unlock_irq(&np->lock); 785 lpfc_unreg_rpi(vport, np); 786 } 787 lpfc_cleanup_pending_mbox(vport); 788 789 if (phba->sli_rev == LPFC_SLI_REV4) { 790 lpfc_sli4_unreg_all_rpis(vport); 791 lpfc_mbx_unreg_vpi(vport); 792 spin_lock_irq(shost->host_lock); 793 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 794 spin_unlock_irq(shost->host_lock); 795 } 796 797 /* 798 * For SLI3 and SLI4, the VPI needs to be reregistered in 799 * response to this fabric parameter change event. 800 */ 801 spin_lock_irq(shost->host_lock); 802 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 803 spin_unlock_irq(shost->host_lock); 804 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 805 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 806 /* 807 * Driver needs to re-reg VPI in order for f/w 808 * to update the MAC address. 809 */ 810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 811 lpfc_register_new_vport(phba, vport, ndlp); 812 return 0; 813 } 814 815 if (phba->sli_rev < LPFC_SLI_REV4) { 816 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 817 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 818 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 819 lpfc_register_new_vport(phba, vport, ndlp); 820 else 821 lpfc_issue_fabric_reglogin(vport); 822 } else { 823 ndlp->nlp_type |= NLP_FABRIC; 824 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 825 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 826 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 827 lpfc_start_fdiscs(phba); 828 lpfc_do_scr_ns_plogi(phba, vport); 829 } else if (vport->fc_flag & FC_VFI_REGISTERED) 830 lpfc_issue_init_vpi(vport); 831 else { 832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 833 "3135 Need register VFI: (x%x/%x)\n", 834 vport->fc_prevDID, vport->fc_myDID); 835 lpfc_issue_reg_vfi(vport); 836 } 837 } 838 return 0; 839 } 840 841 /** 842 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 843 * @vport: pointer to a host virtual N_Port data structure. 844 * @ndlp: pointer to a node-list data structure. 845 * @sp: pointer to service parameter data structure. 846 * 847 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 848 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 849 * in a point-to-point topology. First, the @vport's N_Port Name is compared 850 * with the received N_Port Name: if the @vport's N_Port Name is greater than 851 * the received N_Port Name lexicographically, this node shall assign local 852 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 853 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 854 * this node shall just wait for the remote node to issue PLOGI and assign 855 * N_Port IDs. 856 * 857 * Return code 858 * 0 - Success 859 * -ENXIO - Fail 860 **/ 861 static int 862 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 863 struct serv_parm *sp) 864 { 865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 866 struct lpfc_hba *phba = vport->phba; 867 LPFC_MBOXQ_t *mbox; 868 int rc; 869 870 spin_lock_irq(shost->host_lock); 871 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 872 vport->fc_flag |= FC_PT2PT; 873 spin_unlock_irq(shost->host_lock); 874 875 /* If we are pt2pt with another NPort, force NPIV off! */ 876 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 877 878 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 879 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 880 lpfc_unregister_fcf_prep(phba); 881 882 spin_lock_irq(shost->host_lock); 883 vport->fc_flag &= ~FC_VFI_REGISTERED; 884 spin_unlock_irq(shost->host_lock); 885 phba->fc_topology_changed = 0; 886 } 887 888 rc = memcmp(&vport->fc_portname, &sp->portName, 889 sizeof(vport->fc_portname)); 890 891 if (rc >= 0) { 892 /* This side will initiate the PLOGI */ 893 spin_lock_irq(shost->host_lock); 894 vport->fc_flag |= FC_PT2PT_PLOGI; 895 spin_unlock_irq(shost->host_lock); 896 897 /* 898 * N_Port ID cannot be 0, set our Id to LocalID 899 * the other side will be RemoteID. 900 */ 901 902 /* not equal */ 903 if (rc) 904 vport->fc_myDID = PT2PT_LocalID; 905 906 /* Decrement ndlp reference count indicating that ndlp can be 907 * safely released when other references to it are done. 908 */ 909 lpfc_nlp_put(ndlp); 910 911 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 912 if (!ndlp) { 913 /* 914 * Cannot find existing Fabric ndlp, so allocate a 915 * new one 916 */ 917 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 918 if (!ndlp) 919 goto fail; 920 } 921 922 memcpy(&ndlp->nlp_portname, &sp->portName, 923 sizeof(struct lpfc_name)); 924 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 925 sizeof(struct lpfc_name)); 926 /* Set state will put ndlp onto node list if not already done */ 927 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 928 spin_lock_irq(&ndlp->lock); 929 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 930 spin_unlock_irq(&ndlp->lock); 931 932 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 933 if (!mbox) 934 goto fail; 935 936 lpfc_config_link(phba, mbox); 937 938 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 939 mbox->vport = vport; 940 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 941 if (rc == MBX_NOT_FINISHED) { 942 mempool_free(mbox, phba->mbox_mem_pool); 943 goto fail; 944 } 945 } else { 946 /* This side will wait for the PLOGI, decrement ndlp reference 947 * count indicating that ndlp can be released when other 948 * references to it are done. 949 */ 950 lpfc_nlp_put(ndlp); 951 952 /* Start discovery - this should just do CLEAR_LA */ 953 lpfc_disc_start(vport); 954 } 955 956 return 0; 957 fail: 958 return -ENXIO; 959 } 960 961 /** 962 * lpfc_cmpl_els_flogi - Completion callback function for flogi 963 * @phba: pointer to lpfc hba data structure. 964 * @cmdiocb: pointer to lpfc command iocb data structure. 965 * @rspiocb: pointer to lpfc response iocb data structure. 966 * 967 * This routine is the top-level completion callback function for issuing 968 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 969 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 970 * retry has been made (either immediately or delayed with lpfc_els_retry() 971 * returning 1), the command IOCB will be released and function returned. 972 * If the retry attempt has been given up (possibly reach the maximum 973 * number of retries), one additional decrement of ndlp reference shall be 974 * invoked before going out after releasing the command IOCB. This will 975 * actually release the remote node (Note, lpfc_els_free_iocb() will also 976 * invoke one decrement of ndlp reference count). If no error reported in 977 * the IOCB status, the command Port ID field is used to determine whether 978 * this is a point-to-point topology or a fabric topology: if the Port ID 979 * field is assigned, it is a fabric topology; otherwise, it is a 980 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 981 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 982 * specific topology completion conditions. 983 **/ 984 static void 985 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 986 struct lpfc_iocbq *rspiocb) 987 { 988 struct lpfc_vport *vport = cmdiocb->vport; 989 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 990 IOCB_t *irsp = &rspiocb->iocb; 991 struct lpfc_nodelist *ndlp = cmdiocb->context1; 992 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 993 struct serv_parm *sp; 994 uint16_t fcf_index; 995 int rc; 996 997 /* Check to see if link went down during discovery */ 998 if (lpfc_els_chk_latt(vport)) { 999 /* One additional decrement on node reference count to 1000 * trigger the release of the node 1001 */ 1002 lpfc_nlp_put(ndlp); 1003 goto out; 1004 } 1005 1006 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1007 "FLOGI cmpl: status:x%x/x%x state:x%x", 1008 irsp->ulpStatus, irsp->un.ulpWord[4], 1009 vport->port_state); 1010 1011 if (irsp->ulpStatus) { 1012 /* 1013 * In case of FIP mode, perform roundrobin FCF failover 1014 * due to new FCF discovery 1015 */ 1016 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1017 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1018 if (phba->link_state < LPFC_LINK_UP) 1019 goto stop_rr_fcf_flogi; 1020 if ((phba->fcoe_cvl_eventtag_attn == 1021 phba->fcoe_cvl_eventtag) && 1022 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1023 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1024 IOERR_SLI_ABORTED)) 1025 goto stop_rr_fcf_flogi; 1026 else 1027 phba->fcoe_cvl_eventtag_attn = 1028 phba->fcoe_cvl_eventtag; 1029 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1030 "2611 FLOGI failed on FCF (x%x), " 1031 "status:x%x/x%x, tmo:x%x, perform " 1032 "roundrobin FCF failover\n", 1033 phba->fcf.current_rec.fcf_indx, 1034 irsp->ulpStatus, irsp->un.ulpWord[4], 1035 irsp->ulpTimeout); 1036 lpfc_sli4_set_fcf_flogi_fail(phba, 1037 phba->fcf.current_rec.fcf_indx); 1038 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1039 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1040 if (rc) 1041 goto out; 1042 } 1043 1044 stop_rr_fcf_flogi: 1045 /* FLOGI failure */ 1046 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1047 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1048 IOERR_LOOP_OPEN_FAILURE))) 1049 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1050 "2858 FLOGI failure Status:x%x/x%x TMO" 1051 ":x%x Data x%x x%x\n", 1052 irsp->ulpStatus, irsp->un.ulpWord[4], 1053 irsp->ulpTimeout, phba->hba_flag, 1054 phba->fcf.fcf_flag); 1055 1056 /* Check for retry */ 1057 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1058 goto out; 1059 1060 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1061 "0150 FLOGI failure Status:x%x/x%x " 1062 "xri x%x TMO:x%x refcnt %d\n", 1063 irsp->ulpStatus, irsp->un.ulpWord[4], 1064 cmdiocb->sli4_xritag, irsp->ulpTimeout, 1065 kref_read(&ndlp->kref)); 1066 1067 /* If this is not a loop open failure, bail out */ 1068 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1069 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1070 IOERR_LOOP_OPEN_FAILURE))) 1071 goto flogifail; 1072 1073 /* FLOGI failed, so there is no fabric */ 1074 spin_lock_irq(shost->host_lock); 1075 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1076 FC_PT2PT_NO_NVME); 1077 spin_unlock_irq(shost->host_lock); 1078 1079 /* If private loop, then allow max outstanding els to be 1080 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1081 * alpa map would take too long otherwise. 1082 */ 1083 if (phba->alpa_map[0] == 0) 1084 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1085 if ((phba->sli_rev == LPFC_SLI_REV4) && 1086 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1087 (vport->fc_prevDID != vport->fc_myDID) || 1088 phba->fc_topology_changed)) { 1089 if (vport->fc_flag & FC_VFI_REGISTERED) { 1090 if (phba->fc_topology_changed) { 1091 lpfc_unregister_fcf_prep(phba); 1092 spin_lock_irq(shost->host_lock); 1093 vport->fc_flag &= ~FC_VFI_REGISTERED; 1094 spin_unlock_irq(shost->host_lock); 1095 phba->fc_topology_changed = 0; 1096 } else { 1097 lpfc_sli4_unreg_all_rpis(vport); 1098 } 1099 } 1100 1101 /* Do not register VFI if the driver aborted FLOGI */ 1102 if (!lpfc_error_lost_link(irsp)) 1103 lpfc_issue_reg_vfi(vport); 1104 1105 lpfc_nlp_put(ndlp); 1106 goto out; 1107 } 1108 goto flogifail; 1109 } 1110 spin_lock_irq(shost->host_lock); 1111 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1112 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1113 spin_unlock_irq(shost->host_lock); 1114 1115 /* 1116 * The FLogI succeeded. Sync the data for the CPU before 1117 * accessing it. 1118 */ 1119 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1120 if (!prsp) 1121 goto out; 1122 sp = prsp->virt + sizeof(uint32_t); 1123 1124 /* FLOGI completes successfully */ 1125 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1126 "0101 FLOGI completes successfully, I/O tag:x%x, " 1127 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1128 cmdiocb->iotag, cmdiocb->sli4_xritag, 1129 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1130 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1131 vport->port_state, vport->fc_flag, 1132 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1133 1134 if (sp->cmn.priority_tagging) 1135 vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; 1136 1137 if (vport->port_state == LPFC_FLOGI) { 1138 /* 1139 * If Common Service Parameters indicate Nport 1140 * we are point to point, if Fport we are Fabric. 1141 */ 1142 if (sp->cmn.fPort) 1143 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1144 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1145 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1146 else { 1147 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1148 "2831 FLOGI response with cleared Fabric " 1149 "bit fcf_index 0x%x " 1150 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1151 "Fabric Name " 1152 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1153 phba->fcf.current_rec.fcf_indx, 1154 phba->fcf.current_rec.switch_name[0], 1155 phba->fcf.current_rec.switch_name[1], 1156 phba->fcf.current_rec.switch_name[2], 1157 phba->fcf.current_rec.switch_name[3], 1158 phba->fcf.current_rec.switch_name[4], 1159 phba->fcf.current_rec.switch_name[5], 1160 phba->fcf.current_rec.switch_name[6], 1161 phba->fcf.current_rec.switch_name[7], 1162 phba->fcf.current_rec.fabric_name[0], 1163 phba->fcf.current_rec.fabric_name[1], 1164 phba->fcf.current_rec.fabric_name[2], 1165 phba->fcf.current_rec.fabric_name[3], 1166 phba->fcf.current_rec.fabric_name[4], 1167 phba->fcf.current_rec.fabric_name[5], 1168 phba->fcf.current_rec.fabric_name[6], 1169 phba->fcf.current_rec.fabric_name[7]); 1170 1171 lpfc_nlp_put(ndlp); 1172 spin_lock_irq(&phba->hbalock); 1173 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1174 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1175 spin_unlock_irq(&phba->hbalock); 1176 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1177 goto out; 1178 } 1179 if (!rc) { 1180 /* Mark the FCF discovery process done */ 1181 if (phba->hba_flag & HBA_FIP_SUPPORT) 1182 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1183 LOG_ELS, 1184 "2769 FLOGI to FCF (x%x) " 1185 "completed successfully\n", 1186 phba->fcf.current_rec.fcf_indx); 1187 spin_lock_irq(&phba->hbalock); 1188 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1189 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1190 spin_unlock_irq(&phba->hbalock); 1191 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1192 goto out; 1193 } 1194 } else if (vport->port_state > LPFC_FLOGI && 1195 vport->fc_flag & FC_PT2PT) { 1196 /* 1197 * In a p2p topology, it is possible that discovery has 1198 * already progressed, and this completion can be ignored. 1199 * Recheck the indicated topology. 1200 */ 1201 if (!sp->cmn.fPort) 1202 goto out; 1203 } 1204 1205 flogifail: 1206 spin_lock_irq(&phba->hbalock); 1207 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1208 spin_unlock_irq(&phba->hbalock); 1209 1210 if (!lpfc_error_lost_link(irsp)) { 1211 /* FLOGI failed, so just use loop map to make discovery list */ 1212 lpfc_disc_list_loopmap(vport); 1213 1214 /* Start discovery */ 1215 lpfc_disc_start(vport); 1216 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1217 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1218 IOERR_SLI_ABORTED) && 1219 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1220 IOERR_SLI_DOWN))) && 1221 (phba->link_state != LPFC_CLEAR_LA)) { 1222 /* If FLOGI failed enable link interrupt. */ 1223 lpfc_issue_clear_la(phba, vport); 1224 } 1225 out: 1226 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1227 lpfc_els_free_iocb(phba, cmdiocb); 1228 lpfc_nlp_put(ndlp); 1229 } 1230 1231 /** 1232 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1233 * aborted during a link down 1234 * @phba: pointer to lpfc hba data structure. 1235 * @cmdiocb: pointer to lpfc command iocb data structure. 1236 * @rspiocb: pointer to lpfc response iocb data structure. 1237 * 1238 */ 1239 static void 1240 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1241 struct lpfc_iocbq *rspiocb) 1242 { 1243 IOCB_t *irsp; 1244 uint32_t *pcmd; 1245 uint32_t cmd; 1246 1247 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1248 cmd = *pcmd; 1249 irsp = &rspiocb->iocb; 1250 1251 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1252 "6445 ELS completes after LINK_DOWN: " 1253 " Status %x/%x cmd x%x flg x%x\n", 1254 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1255 cmdiocb->iocb_flag); 1256 1257 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1258 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1259 atomic_dec(&phba->fabric_iocb_count); 1260 } 1261 lpfc_els_free_iocb(phba, cmdiocb); 1262 } 1263 1264 /** 1265 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1266 * @vport: pointer to a host virtual N_Port data structure. 1267 * @ndlp: pointer to a node-list data structure. 1268 * @retry: number of retries to the command IOCB. 1269 * 1270 * This routine issues a Fabric Login (FLOGI) Request ELS command 1271 * for a @vport. The initiator service parameters are put into the payload 1272 * of the FLOGI Request IOCB and the top-level callback function pointer 1273 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1274 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1275 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1276 * 1277 * Note that the ndlp reference count will be incremented by 1 for holding the 1278 * ndlp and the reference to ndlp will be stored into the context1 field of 1279 * the IOCB for the completion callback function to the FLOGI ELS command. 1280 * 1281 * Return code 1282 * 0 - successfully issued flogi iocb for @vport 1283 * 1 - failed to issue flogi iocb for @vport 1284 **/ 1285 static int 1286 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1287 uint8_t retry) 1288 { 1289 struct lpfc_hba *phba = vport->phba; 1290 struct serv_parm *sp; 1291 IOCB_t *icmd; 1292 struct lpfc_iocbq *elsiocb; 1293 struct lpfc_iocbq defer_flogi_acc; 1294 uint8_t *pcmd; 1295 uint16_t cmdsize; 1296 uint32_t tmo, did; 1297 int rc; 1298 1299 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1300 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1301 ndlp->nlp_DID, ELS_CMD_FLOGI); 1302 1303 if (!elsiocb) 1304 return 1; 1305 1306 icmd = &elsiocb->iocb; 1307 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1308 1309 /* For FLOGI request, remainder of payload is service parameters */ 1310 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1311 pcmd += sizeof(uint32_t); 1312 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1313 sp = (struct serv_parm *) pcmd; 1314 1315 /* Setup CSPs accordingly for Fabric */ 1316 sp->cmn.e_d_tov = 0; 1317 sp->cmn.w2.r_a_tov = 0; 1318 sp->cmn.virtual_fabric_support = 0; 1319 sp->cls1.classValid = 0; 1320 if (sp->cmn.fcphLow < FC_PH3) 1321 sp->cmn.fcphLow = FC_PH3; 1322 if (sp->cmn.fcphHigh < FC_PH3) 1323 sp->cmn.fcphHigh = FC_PH3; 1324 1325 /* Determine if switch supports priority tagging */ 1326 if (phba->cfg_vmid_priority_tagging) { 1327 sp->cmn.priority_tagging = 1; 1328 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1329 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1330 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1331 sizeof(phba->wwpn)); 1332 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1333 sizeof(phba->wwnn)); 1334 } 1335 } 1336 1337 if (phba->sli_rev == LPFC_SLI_REV4) { 1338 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1339 LPFC_SLI_INTF_IF_TYPE_0) { 1340 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1341 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1342 /* FLOGI needs to be 3 for WQE FCFI */ 1343 /* Set the fcfi to the fcfi we registered with */ 1344 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1345 } 1346 /* Can't do SLI4 class2 without support sequence coalescing */ 1347 sp->cls2.classValid = 0; 1348 sp->cls2.seqDelivery = 0; 1349 } else { 1350 /* Historical, setting sequential-delivery bit for SLI3 */ 1351 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1352 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1353 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1354 sp->cmn.request_multiple_Nport = 1; 1355 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1356 icmd->ulpCt_h = 1; 1357 icmd->ulpCt_l = 0; 1358 } else 1359 sp->cmn.request_multiple_Nport = 0; 1360 } 1361 1362 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1363 icmd->un.elsreq64.myID = 0; 1364 icmd->un.elsreq64.fl = 1; 1365 } 1366 1367 tmo = phba->fc_ratov; 1368 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1369 lpfc_set_disctmo(vport); 1370 phba->fc_ratov = tmo; 1371 1372 phba->fc_stat.elsXmitFLOGI++; 1373 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1374 1375 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1376 "Issue FLOGI: opt:x%x", 1377 phba->sli3_options, 0, 0); 1378 1379 elsiocb->context1 = lpfc_nlp_get(ndlp); 1380 if (!elsiocb->context1) { 1381 lpfc_els_free_iocb(phba, elsiocb); 1382 return 1; 1383 } 1384 1385 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1386 if (rc == IOCB_ERROR) { 1387 lpfc_els_free_iocb(phba, elsiocb); 1388 lpfc_nlp_put(ndlp); 1389 return 1; 1390 } 1391 1392 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1393 1394 /* Check for a deferred FLOGI ACC condition */ 1395 if (phba->defer_flogi_acc_flag) { 1396 did = vport->fc_myDID; 1397 vport->fc_myDID = Fabric_DID; 1398 1399 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1400 1401 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1402 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1403 phba->defer_flogi_acc_ox_id; 1404 1405 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1406 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1407 " ox_id: x%x, hba_flag x%x\n", 1408 phba->defer_flogi_acc_rx_id, 1409 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1410 1411 /* Send deferred FLOGI ACC */ 1412 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1413 ndlp, NULL); 1414 1415 phba->defer_flogi_acc_flag = false; 1416 1417 vport->fc_myDID = did; 1418 } 1419 1420 return 0; 1421 } 1422 1423 /** 1424 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1425 * @phba: pointer to lpfc hba data structure. 1426 * 1427 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1428 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1429 * list and issues an abort IOCB commond on each outstanding IOCB that 1430 * contains a active Fabric_DID ndlp. Note that this function is to issue 1431 * the abort IOCB command on all the outstanding IOCBs, thus when this 1432 * function returns, it does not guarantee all the IOCBs are actually aborted. 1433 * 1434 * Return code 1435 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1436 **/ 1437 int 1438 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1439 { 1440 struct lpfc_sli_ring *pring; 1441 struct lpfc_iocbq *iocb, *next_iocb; 1442 struct lpfc_nodelist *ndlp; 1443 IOCB_t *icmd; 1444 1445 /* Abort outstanding I/O on NPort <nlp_DID> */ 1446 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1447 "0201 Abort outstanding I/O on NPort x%x\n", 1448 Fabric_DID); 1449 1450 pring = lpfc_phba_elsring(phba); 1451 if (unlikely(!pring)) 1452 return -EIO; 1453 1454 /* 1455 * Check the txcmplq for an iocb that matches the nport the driver is 1456 * searching for. 1457 */ 1458 spin_lock_irq(&phba->hbalock); 1459 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1460 icmd = &iocb->iocb; 1461 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1462 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1463 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1464 if ((phba->pport->fc_flag & FC_PT2PT) && 1465 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1466 iocb->fabric_iocb_cmpl = 1467 lpfc_ignore_els_cmpl; 1468 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1469 NULL); 1470 } 1471 } 1472 } 1473 /* Make sure HBA is alive */ 1474 lpfc_issue_hb_tmo(phba); 1475 1476 spin_unlock_irq(&phba->hbalock); 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1483 * @vport: pointer to a host virtual N_Port data structure. 1484 * 1485 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1486 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1487 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1488 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1489 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1490 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1491 * @vport. 1492 * 1493 * Return code 1494 * 0 - failed to issue initial flogi for @vport 1495 * 1 - successfully issued initial flogi for @vport 1496 **/ 1497 int 1498 lpfc_initial_flogi(struct lpfc_vport *vport) 1499 { 1500 struct lpfc_nodelist *ndlp; 1501 1502 vport->port_state = LPFC_FLOGI; 1503 lpfc_set_disctmo(vport); 1504 1505 /* First look for the Fabric ndlp */ 1506 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1507 if (!ndlp) { 1508 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1509 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1510 if (!ndlp) 1511 return 0; 1512 /* Set the node type */ 1513 ndlp->nlp_type |= NLP_FABRIC; 1514 1515 /* Put ndlp onto node list */ 1516 lpfc_enqueue_node(vport, ndlp); 1517 } 1518 1519 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1520 /* This decrement of reference count to node shall kick off 1521 * the release of the node. 1522 */ 1523 lpfc_nlp_put(ndlp); 1524 return 0; 1525 } 1526 return 1; 1527 } 1528 1529 /** 1530 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1531 * @vport: pointer to a host virtual N_Port data structure. 1532 * 1533 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1534 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1535 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1536 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1537 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1538 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1539 * @vport. 1540 * 1541 * Return code 1542 * 0 - failed to issue initial fdisc for @vport 1543 * 1 - successfully issued initial fdisc for @vport 1544 **/ 1545 int 1546 lpfc_initial_fdisc(struct lpfc_vport *vport) 1547 { 1548 struct lpfc_nodelist *ndlp; 1549 1550 /* First look for the Fabric ndlp */ 1551 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1552 if (!ndlp) { 1553 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1554 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1555 if (!ndlp) 1556 return 0; 1557 1558 /* NPIV is only supported in Fabrics. */ 1559 ndlp->nlp_type |= NLP_FABRIC; 1560 1561 /* Put ndlp onto node list */ 1562 lpfc_enqueue_node(vport, ndlp); 1563 } 1564 1565 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1566 /* decrement node reference count to trigger the release of 1567 * the node. 1568 */ 1569 lpfc_nlp_put(ndlp); 1570 return 0; 1571 } 1572 return 1; 1573 } 1574 1575 /** 1576 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1577 * @vport: pointer to a host virtual N_Port data structure. 1578 * 1579 * This routine checks whether there are more remaining Port Logins 1580 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1581 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1582 * to issue ELS PLOGIs up to the configured discover threads with the 1583 * @vport (@vport->cfg_discovery_threads). The function also decrement 1584 * the @vport's num_disc_node by 1 if it is not already 0. 1585 **/ 1586 void 1587 lpfc_more_plogi(struct lpfc_vport *vport) 1588 { 1589 if (vport->num_disc_nodes) 1590 vport->num_disc_nodes--; 1591 1592 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1593 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1594 "0232 Continue discovery with %d PLOGIs to go " 1595 "Data: x%x x%x x%x\n", 1596 vport->num_disc_nodes, vport->fc_plogi_cnt, 1597 vport->fc_flag, vport->port_state); 1598 /* Check to see if there are more PLOGIs to be sent */ 1599 if (vport->fc_flag & FC_NLP_MORE) 1600 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1601 lpfc_els_disc_plogi(vport); 1602 1603 return; 1604 } 1605 1606 /** 1607 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1608 * @phba: pointer to lpfc hba data structure. 1609 * @prsp: pointer to response IOCB payload. 1610 * @ndlp: pointer to a node-list data structure. 1611 * 1612 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1613 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1614 * The following cases are considered N_Port confirmed: 1615 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1616 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1617 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1618 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1619 * 1) if there is a node on vport list other than the @ndlp with the same 1620 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1621 * on that node to release the RPI associated with the node; 2) if there is 1622 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1623 * into, a new node shall be allocated (or activated). In either case, the 1624 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1625 * be released and the new_ndlp shall be put on to the vport node list and 1626 * its pointer returned as the confirmed node. 1627 * 1628 * Note that before the @ndlp got "released", the keepDID from not-matching 1629 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1630 * of the @ndlp. This is because the release of @ndlp is actually to put it 1631 * into an inactive state on the vport node list and the vport node list 1632 * management algorithm does not allow two node with a same DID. 1633 * 1634 * Return code 1635 * pointer to the PLOGI N_Port @ndlp 1636 **/ 1637 static struct lpfc_nodelist * 1638 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1639 struct lpfc_nodelist *ndlp) 1640 { 1641 struct lpfc_vport *vport = ndlp->vport; 1642 struct lpfc_nodelist *new_ndlp; 1643 struct serv_parm *sp; 1644 uint8_t name[sizeof(struct lpfc_name)]; 1645 uint32_t keepDID = 0, keep_nlp_flag = 0; 1646 uint32_t keep_new_nlp_flag = 0; 1647 uint16_t keep_nlp_state; 1648 u32 keep_nlp_fc4_type = 0; 1649 struct lpfc_nvme_rport *keep_nrport = NULL; 1650 unsigned long *active_rrqs_xri_bitmap = NULL; 1651 1652 /* Fabric nodes can have the same WWPN so we don't bother searching 1653 * by WWPN. Just return the ndlp that was given to us. 1654 */ 1655 if (ndlp->nlp_type & NLP_FABRIC) 1656 return ndlp; 1657 1658 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1659 memset(name, 0, sizeof(struct lpfc_name)); 1660 1661 /* Now we find out if the NPort we are logging into, matches the WWPN 1662 * we have for that ndlp. If not, we have some work to do. 1663 */ 1664 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1665 1666 /* return immediately if the WWPN matches ndlp */ 1667 if (!new_ndlp || (new_ndlp == ndlp)) 1668 return ndlp; 1669 1670 /* 1671 * Unregister from backend if not done yet. Could have been skipped 1672 * due to ADISC 1673 */ 1674 lpfc_nlp_unreg_node(vport, new_ndlp); 1675 1676 if (phba->sli_rev == LPFC_SLI_REV4) { 1677 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1678 GFP_KERNEL); 1679 if (active_rrqs_xri_bitmap) 1680 memset(active_rrqs_xri_bitmap, 0, 1681 phba->cfg_rrq_xri_bitmap_sz); 1682 } 1683 1684 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1685 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1686 "new_ndlp x%x x%x x%x\n", 1687 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1688 (new_ndlp ? new_ndlp->nlp_DID : 0), 1689 (new_ndlp ? new_ndlp->nlp_flag : 0), 1690 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1691 1692 keepDID = new_ndlp->nlp_DID; 1693 1694 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1695 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1696 phba->cfg_rrq_xri_bitmap_sz); 1697 1698 /* At this point in this routine, we know new_ndlp will be 1699 * returned. however, any previous GID_FTs that were done 1700 * would have updated nlp_fc4_type in ndlp, so we must ensure 1701 * new_ndlp has the right value. 1702 */ 1703 if (vport->fc_flag & FC_FABRIC) { 1704 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1705 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1706 } 1707 1708 lpfc_unreg_rpi(vport, new_ndlp); 1709 new_ndlp->nlp_DID = ndlp->nlp_DID; 1710 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1711 if (phba->sli_rev == LPFC_SLI_REV4) 1712 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1713 ndlp->active_rrqs_xri_bitmap, 1714 phba->cfg_rrq_xri_bitmap_sz); 1715 1716 /* Lock both ndlps */ 1717 spin_lock_irq(&ndlp->lock); 1718 spin_lock_irq(&new_ndlp->lock); 1719 keep_new_nlp_flag = new_ndlp->nlp_flag; 1720 keep_nlp_flag = ndlp->nlp_flag; 1721 new_ndlp->nlp_flag = ndlp->nlp_flag; 1722 1723 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1724 if (keep_new_nlp_flag & NLP_UNREG_INP) 1725 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1726 else 1727 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1728 1729 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1730 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1731 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1732 else 1733 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1734 1735 /* 1736 * Retain the DROPPED flag. This will take care of the init 1737 * refcount when affecting the state change 1738 */ 1739 if (keep_new_nlp_flag & NLP_DROPPED) 1740 new_ndlp->nlp_flag |= NLP_DROPPED; 1741 else 1742 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1743 1744 ndlp->nlp_flag = keep_new_nlp_flag; 1745 1746 /* if ndlp had NLP_UNREG_INP set, keep it */ 1747 if (keep_nlp_flag & NLP_UNREG_INP) 1748 ndlp->nlp_flag |= NLP_UNREG_INP; 1749 else 1750 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1751 1752 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1753 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1754 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1755 else 1756 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1757 1758 /* 1759 * Retain the DROPPED flag. This will take care of the init 1760 * refcount when affecting the state change 1761 */ 1762 if (keep_nlp_flag & NLP_DROPPED) 1763 ndlp->nlp_flag |= NLP_DROPPED; 1764 else 1765 ndlp->nlp_flag &= ~NLP_DROPPED; 1766 1767 spin_unlock_irq(&new_ndlp->lock); 1768 spin_unlock_irq(&ndlp->lock); 1769 1770 /* Set nlp_states accordingly */ 1771 keep_nlp_state = new_ndlp->nlp_state; 1772 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1773 1774 /* interchange the nvme remoteport structs */ 1775 keep_nrport = new_ndlp->nrport; 1776 new_ndlp->nrport = ndlp->nrport; 1777 1778 /* Move this back to NPR state */ 1779 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1780 /* The new_ndlp is replacing ndlp totally, so we need 1781 * to put ndlp on UNUSED list and try to free it. 1782 */ 1783 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1784 "3179 PLOGI confirm NEW: %x %x\n", 1785 new_ndlp->nlp_DID, keepDID); 1786 1787 /* Two ndlps cannot have the same did on the nodelist. 1788 * Note: for this case, ndlp has a NULL WWPN so setting 1789 * the nlp_fc4_type isn't required. 1790 */ 1791 ndlp->nlp_DID = keepDID; 1792 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1793 if (phba->sli_rev == LPFC_SLI_REV4 && 1794 active_rrqs_xri_bitmap) 1795 memcpy(ndlp->active_rrqs_xri_bitmap, 1796 active_rrqs_xri_bitmap, 1797 phba->cfg_rrq_xri_bitmap_sz); 1798 1799 } else { 1800 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1801 "3180 PLOGI confirm SWAP: %x %x\n", 1802 new_ndlp->nlp_DID, keepDID); 1803 1804 lpfc_unreg_rpi(vport, ndlp); 1805 1806 /* Two ndlps cannot have the same did and the fc4 1807 * type must be transferred because the ndlp is in 1808 * flight. 1809 */ 1810 ndlp->nlp_DID = keepDID; 1811 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1812 1813 if (phba->sli_rev == LPFC_SLI_REV4 && 1814 active_rrqs_xri_bitmap) 1815 memcpy(ndlp->active_rrqs_xri_bitmap, 1816 active_rrqs_xri_bitmap, 1817 phba->cfg_rrq_xri_bitmap_sz); 1818 1819 /* Since we are switching over to the new_ndlp, 1820 * reset the old ndlp state 1821 */ 1822 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1823 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1824 keep_nlp_state = NLP_STE_NPR_NODE; 1825 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1826 ndlp->nrport = keep_nrport; 1827 } 1828 1829 /* 1830 * If ndlp is not associated with any rport we can drop it here else 1831 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1832 */ 1833 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1834 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1835 1836 if (phba->sli_rev == LPFC_SLI_REV4 && 1837 active_rrqs_xri_bitmap) 1838 mempool_free(active_rrqs_xri_bitmap, 1839 phba->active_rrq_pool); 1840 1841 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1842 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1843 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1844 new_ndlp->nlp_fc4_type); 1845 1846 return new_ndlp; 1847 } 1848 1849 /** 1850 * lpfc_end_rscn - Check and handle more rscn for a vport 1851 * @vport: pointer to a host virtual N_Port data structure. 1852 * 1853 * This routine checks whether more Registration State Change 1854 * Notifications (RSCNs) came in while the discovery state machine was in 1855 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1856 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1857 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1858 * handling the RSCNs. 1859 **/ 1860 void 1861 lpfc_end_rscn(struct lpfc_vport *vport) 1862 { 1863 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1864 1865 if (vport->fc_flag & FC_RSCN_MODE) { 1866 /* 1867 * Check to see if more RSCNs came in while we were 1868 * processing this one. 1869 */ 1870 if (vport->fc_rscn_id_cnt || 1871 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1872 lpfc_els_handle_rscn(vport); 1873 else { 1874 spin_lock_irq(shost->host_lock); 1875 vport->fc_flag &= ~FC_RSCN_MODE; 1876 spin_unlock_irq(shost->host_lock); 1877 } 1878 } 1879 } 1880 1881 /** 1882 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1883 * @phba: pointer to lpfc hba data structure. 1884 * @cmdiocb: pointer to lpfc command iocb data structure. 1885 * @rspiocb: pointer to lpfc response iocb data structure. 1886 * 1887 * This routine will call the clear rrq function to free the rrq and 1888 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1889 * exist then the clear_rrq is still called because the rrq needs to 1890 * be freed. 1891 **/ 1892 1893 static void 1894 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1895 struct lpfc_iocbq *rspiocb) 1896 { 1897 struct lpfc_vport *vport = cmdiocb->vport; 1898 IOCB_t *irsp; 1899 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1900 struct lpfc_node_rrq *rrq; 1901 1902 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1903 rrq = cmdiocb->context_un.rrq; 1904 cmdiocb->context_un.rsp_iocb = rspiocb; 1905 1906 irsp = &rspiocb->iocb; 1907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1908 "RRQ cmpl: status:x%x/x%x did:x%x", 1909 irsp->ulpStatus, irsp->un.ulpWord[4], 1910 irsp->un.elsreq64.remoteID); 1911 1912 /* rrq completes to NPort <nlp_DID> */ 1913 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1914 "2880 RRQ completes to DID x%x " 1915 "Data: x%x x%x x%x x%x x%x\n", 1916 irsp->un.elsreq64.remoteID, 1917 irsp->ulpStatus, irsp->un.ulpWord[4], 1918 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1919 1920 if (irsp->ulpStatus) { 1921 /* Check for retry */ 1922 /* RRQ failed Don't print the vport to vport rjts */ 1923 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1924 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1925 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1926 (phba)->pport->cfg_log_verbose & LOG_ELS) 1927 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1928 "2881 RRQ failure DID:%06X Status:" 1929 "x%x/x%x\n", 1930 ndlp->nlp_DID, irsp->ulpStatus, 1931 irsp->un.ulpWord[4]); 1932 } 1933 1934 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1935 lpfc_els_free_iocb(phba, cmdiocb); 1936 lpfc_nlp_put(ndlp); 1937 return; 1938 } 1939 /** 1940 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1941 * @phba: pointer to lpfc hba data structure. 1942 * @cmdiocb: pointer to lpfc command iocb data structure. 1943 * @rspiocb: pointer to lpfc response iocb data structure. 1944 * 1945 * This routine is the completion callback function for issuing the Port 1946 * Login (PLOGI) command. For PLOGI completion, there must be an active 1947 * ndlp on the vport node list that matches the remote node ID from the 1948 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1949 * ignored and command IOCB released. The PLOGI response IOCB status is 1950 * checked for error conditions. If there is error status reported, PLOGI 1951 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1952 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1953 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1954 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1955 * there are additional N_Port nodes with the vport that need to perform 1956 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1957 * PLOGIs. 1958 **/ 1959 static void 1960 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1961 struct lpfc_iocbq *rspiocb) 1962 { 1963 struct lpfc_vport *vport = cmdiocb->vport; 1964 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1965 IOCB_t *irsp; 1966 struct lpfc_nodelist *ndlp, *free_ndlp; 1967 struct lpfc_dmabuf *prsp; 1968 int disc; 1969 struct serv_parm *sp = NULL; 1970 1971 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1972 cmdiocb->context_un.rsp_iocb = rspiocb; 1973 1974 irsp = &rspiocb->iocb; 1975 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1976 "PLOGI cmpl: status:x%x/x%x did:x%x", 1977 irsp->ulpStatus, irsp->un.ulpWord[4], 1978 irsp->un.elsreq64.remoteID); 1979 1980 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1981 if (!ndlp) { 1982 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1983 "0136 PLOGI completes to NPort x%x " 1984 "with no ndlp. Data: x%x x%x x%x\n", 1985 irsp->un.elsreq64.remoteID, 1986 irsp->ulpStatus, irsp->un.ulpWord[4], 1987 irsp->ulpIoTag); 1988 goto out_freeiocb; 1989 } 1990 1991 /* Since ndlp can be freed in the disc state machine, note if this node 1992 * is being used during discovery. 1993 */ 1994 spin_lock_irq(&ndlp->lock); 1995 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1996 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1997 spin_unlock_irq(&ndlp->lock); 1998 1999 /* PLOGI completes to NPort <nlp_DID> */ 2000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2001 "0102 PLOGI completes to NPort x%06x " 2002 "Data: x%x x%x x%x x%x x%x\n", 2003 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2004 irsp->ulpStatus, irsp->un.ulpWord[4], 2005 disc, vport->num_disc_nodes); 2006 2007 /* Check to see if link went down during discovery */ 2008 if (lpfc_els_chk_latt(vport)) { 2009 spin_lock_irq(&ndlp->lock); 2010 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2011 spin_unlock_irq(&ndlp->lock); 2012 goto out; 2013 } 2014 2015 if (irsp->ulpStatus) { 2016 /* Check for retry */ 2017 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2018 /* ELS command is being retried */ 2019 if (disc) { 2020 spin_lock_irq(&ndlp->lock); 2021 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2022 spin_unlock_irq(&ndlp->lock); 2023 } 2024 goto out; 2025 } 2026 /* PLOGI failed Don't print the vport to vport rjts */ 2027 if (irsp->ulpStatus != IOSTAT_LS_RJT || 2028 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 2029 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 2030 (phba)->pport->cfg_log_verbose & LOG_ELS) 2031 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2032 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2033 ndlp->nlp_DID, irsp->ulpStatus, 2034 irsp->un.ulpWord[4]); 2035 2036 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2037 if (!lpfc_error_lost_link(irsp)) 2038 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2039 NLP_EVT_CMPL_PLOGI); 2040 2041 /* If a PLOGI collision occurred, the node needs to continue 2042 * with the reglogin process. 2043 */ 2044 spin_lock_irq(&ndlp->lock); 2045 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2046 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2047 spin_unlock_irq(&ndlp->lock); 2048 goto out; 2049 } 2050 spin_unlock_irq(&ndlp->lock); 2051 2052 /* No PLOGI collision and the node is not registered with the 2053 * scsi or nvme transport. It is no longer an active node. Just 2054 * start the device remove process. 2055 */ 2056 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2057 spin_lock_irq(&ndlp->lock); 2058 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2059 spin_unlock_irq(&ndlp->lock); 2060 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2061 NLP_EVT_DEVICE_RM); 2062 } 2063 } else { 2064 /* Good status, call state machine */ 2065 prsp = list_entry(((struct lpfc_dmabuf *) 2066 cmdiocb->context2)->list.next, 2067 struct lpfc_dmabuf, list); 2068 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2069 2070 sp = (struct serv_parm *)((u8 *)prsp->virt + 2071 sizeof(u32)); 2072 2073 ndlp->vmid_support = 0; 2074 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2075 (phba->cfg_vmid_priority_tagging && 2076 sp->cmn.priority_tagging)) { 2077 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2078 "4018 app_hdr_support %d tagging %d DID x%x\n", 2079 sp->cmn.app_hdr_support, 2080 sp->cmn.priority_tagging, 2081 ndlp->nlp_DID); 2082 /* if the dest port supports VMID, mark it in ndlp */ 2083 ndlp->vmid_support = 1; 2084 } 2085 2086 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2087 NLP_EVT_CMPL_PLOGI); 2088 } 2089 2090 if (disc && vport->num_disc_nodes) { 2091 /* Check to see if there are more PLOGIs to be sent */ 2092 lpfc_more_plogi(vport); 2093 2094 if (vport->num_disc_nodes == 0) { 2095 spin_lock_irq(shost->host_lock); 2096 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2097 spin_unlock_irq(shost->host_lock); 2098 2099 lpfc_can_disctmo(vport); 2100 lpfc_end_rscn(vport); 2101 } 2102 } 2103 2104 out: 2105 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2106 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2107 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2108 2109 out_freeiocb: 2110 /* Release the reference on the original I/O request. */ 2111 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2112 2113 lpfc_els_free_iocb(phba, cmdiocb); 2114 lpfc_nlp_put(free_ndlp); 2115 return; 2116 } 2117 2118 /** 2119 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2120 * @vport: pointer to a host virtual N_Port data structure. 2121 * @did: destination port identifier. 2122 * @retry: number of retries to the command IOCB. 2123 * 2124 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2125 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2126 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2127 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2128 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2129 * 2130 * Note that the ndlp reference count will be incremented by 1 for holding 2131 * the ndlp and the reference to ndlp will be stored into the context1 field 2132 * of the IOCB for the completion callback function to the PLOGI ELS command. 2133 * 2134 * Return code 2135 * 0 - Successfully issued a plogi for @vport 2136 * 1 - failed to issue a plogi for @vport 2137 **/ 2138 int 2139 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2140 { 2141 struct lpfc_hba *phba = vport->phba; 2142 struct serv_parm *sp; 2143 struct lpfc_nodelist *ndlp; 2144 struct lpfc_iocbq *elsiocb; 2145 uint8_t *pcmd; 2146 uint16_t cmdsize; 2147 int ret; 2148 2149 ndlp = lpfc_findnode_did(vport, did); 2150 if (!ndlp) 2151 return 1; 2152 2153 /* Defer the processing of the issue PLOGI until after the 2154 * outstanding UNREG_RPI mbox command completes, unless we 2155 * are going offline. This logic does not apply for Fabric DIDs 2156 */ 2157 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2158 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2159 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2160 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2161 "4110 Issue PLOGI x%x deferred " 2162 "on NPort x%x rpi x%x Data: x%px\n", 2163 ndlp->nlp_defer_did, ndlp->nlp_DID, 2164 ndlp->nlp_rpi, ndlp); 2165 2166 /* We can only defer 1st PLOGI */ 2167 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2168 ndlp->nlp_defer_did = did; 2169 return 0; 2170 } 2171 2172 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2173 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2174 ELS_CMD_PLOGI); 2175 if (!elsiocb) 2176 return 1; 2177 2178 spin_lock_irq(&ndlp->lock); 2179 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2180 spin_unlock_irq(&ndlp->lock); 2181 2182 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2183 2184 /* For PLOGI request, remainder of payload is service parameters */ 2185 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2186 pcmd += sizeof(uint32_t); 2187 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2188 sp = (struct serv_parm *) pcmd; 2189 2190 /* 2191 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2192 * to device on remote loops work. 2193 */ 2194 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2195 sp->cmn.altBbCredit = 1; 2196 2197 if (sp->cmn.fcphLow < FC_PH_4_3) 2198 sp->cmn.fcphLow = FC_PH_4_3; 2199 2200 if (sp->cmn.fcphHigh < FC_PH3) 2201 sp->cmn.fcphHigh = FC_PH3; 2202 2203 sp->cmn.valid_vendor_ver_level = 0; 2204 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2205 sp->cmn.bbRcvSizeMsb &= 0xF; 2206 2207 /* Check if the destination port supports VMID */ 2208 ndlp->vmid_support = 0; 2209 if (vport->vmid_priority_tagging) 2210 sp->cmn.priority_tagging = 1; 2211 else if (phba->cfg_vmid_app_header && 2212 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2213 sp->cmn.app_hdr_support = 1; 2214 2215 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2216 "Issue PLOGI: did:x%x", 2217 did, 0, 0); 2218 2219 /* If our firmware supports this feature, convey that 2220 * information to the target using the vendor specific field. 2221 */ 2222 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2223 sp->cmn.valid_vendor_ver_level = 1; 2224 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2225 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2226 } 2227 2228 phba->fc_stat.elsXmitPLOGI++; 2229 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2230 2231 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2232 "Issue PLOGI: did:x%x refcnt %d", 2233 did, kref_read(&ndlp->kref), 0); 2234 elsiocb->context1 = lpfc_nlp_get(ndlp); 2235 if (!elsiocb->context1) { 2236 lpfc_els_free_iocb(phba, elsiocb); 2237 return 1; 2238 } 2239 2240 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2241 if (ret) { 2242 lpfc_els_free_iocb(phba, elsiocb); 2243 lpfc_nlp_put(ndlp); 2244 return 1; 2245 } 2246 2247 return 0; 2248 } 2249 2250 /** 2251 * lpfc_cmpl_els_prli - Completion callback function for prli 2252 * @phba: pointer to lpfc hba data structure. 2253 * @cmdiocb: pointer to lpfc command iocb data structure. 2254 * @rspiocb: pointer to lpfc response iocb data structure. 2255 * 2256 * This routine is the completion callback function for a Process Login 2257 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2258 * status. If there is error status reported, PRLI retry shall be attempted 2259 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2260 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2261 * ndlp to mark the PRLI completion. 2262 **/ 2263 static void 2264 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2265 struct lpfc_iocbq *rspiocb) 2266 { 2267 struct lpfc_vport *vport = cmdiocb->vport; 2268 IOCB_t *irsp; 2269 struct lpfc_nodelist *ndlp; 2270 char *mode; 2271 u32 loglevel; 2272 2273 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2274 cmdiocb->context_un.rsp_iocb = rspiocb; 2275 2276 irsp = &(rspiocb->iocb); 2277 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2278 spin_lock_irq(&ndlp->lock); 2279 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2280 2281 /* Driver supports multiple FC4 types. Counters matter. */ 2282 vport->fc_prli_sent--; 2283 ndlp->fc4_prli_sent--; 2284 spin_unlock_irq(&ndlp->lock); 2285 2286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2287 "PRLI cmpl: status:x%x/x%x did:x%x", 2288 irsp->ulpStatus, irsp->un.ulpWord[4], 2289 ndlp->nlp_DID); 2290 2291 /* PRLI completes to NPort <nlp_DID> */ 2292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2293 "0103 PRLI completes to NPort x%06x " 2294 "Data: x%x x%x x%x x%x\n", 2295 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2296 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2297 2298 /* Check to see if link went down during discovery */ 2299 if (lpfc_els_chk_latt(vport)) 2300 goto out; 2301 2302 if (irsp->ulpStatus) { 2303 /* Check for retry */ 2304 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2305 /* ELS command is being retried */ 2306 goto out; 2307 } 2308 2309 /* If we don't send GFT_ID to Fabric, a PRLI error 2310 * could be expected. 2311 */ 2312 if ((vport->fc_flag & FC_FABRIC) || 2313 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2314 mode = KERN_ERR; 2315 loglevel = LOG_TRACE_EVENT; 2316 } else { 2317 mode = KERN_INFO; 2318 loglevel = LOG_ELS; 2319 } 2320 2321 /* PRLI failed */ 2322 lpfc_printf_vlog(vport, mode, loglevel, 2323 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2324 "data: x%x\n", 2325 ndlp->nlp_DID, irsp->ulpStatus, 2326 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2327 2328 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2329 if (!lpfc_error_lost_link(irsp)) 2330 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2331 NLP_EVT_CMPL_PRLI); 2332 2333 /* 2334 * For P2P topology, retain the node so that PLOGI can be 2335 * attempted on it again. 2336 */ 2337 if (vport->fc_flag & FC_PT2PT) 2338 goto out; 2339 2340 /* As long as this node is not registered with the SCSI 2341 * or NVMe transport and no other PRLIs are outstanding, 2342 * it is no longer an active node. Otherwise devloss 2343 * handles the final cleanup. 2344 */ 2345 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2346 !ndlp->fc4_prli_sent) { 2347 spin_lock_irq(&ndlp->lock); 2348 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2349 spin_unlock_irq(&ndlp->lock); 2350 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2351 NLP_EVT_DEVICE_RM); 2352 } 2353 } else { 2354 /* Good status, call state machine. However, if another 2355 * PRLI is outstanding, don't call the state machine 2356 * because final disposition to Mapped or Unmapped is 2357 * completed there. 2358 */ 2359 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2360 NLP_EVT_CMPL_PRLI); 2361 } 2362 2363 out: 2364 lpfc_els_free_iocb(phba, cmdiocb); 2365 lpfc_nlp_put(ndlp); 2366 return; 2367 } 2368 2369 /** 2370 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2371 * @vport: pointer to a host virtual N_Port data structure. 2372 * @ndlp: pointer to a node-list data structure. 2373 * @retry: number of retries to the command IOCB. 2374 * 2375 * This routine issues a Process Login (PRLI) ELS command for the 2376 * @vport. The PRLI service parameters are set up in the payload of the 2377 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2378 * is put to the IOCB completion callback func field before invoking the 2379 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2380 * 2381 * Note that the ndlp reference count will be incremented by 1 for holding the 2382 * ndlp and the reference to ndlp will be stored into the context1 field of 2383 * the IOCB for the completion callback function to the PRLI ELS command. 2384 * 2385 * Return code 2386 * 0 - successfully issued prli iocb command for @vport 2387 * 1 - failed to issue prli iocb command for @vport 2388 **/ 2389 int 2390 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2391 uint8_t retry) 2392 { 2393 int rc = 0; 2394 struct lpfc_hba *phba = vport->phba; 2395 PRLI *npr; 2396 struct lpfc_nvme_prli *npr_nvme; 2397 struct lpfc_iocbq *elsiocb; 2398 uint8_t *pcmd; 2399 uint16_t cmdsize; 2400 u32 local_nlp_type, elscmd; 2401 2402 /* 2403 * If we are in RSCN mode, the FC4 types supported from a 2404 * previous GFT_ID command may not be accurate. So, if we 2405 * are a NVME Initiator, always look for the possibility of 2406 * the remote NPort beng a NVME Target. 2407 */ 2408 if (phba->sli_rev == LPFC_SLI_REV4 && 2409 vport->fc_flag & FC_RSCN_MODE && 2410 vport->nvmei_support) 2411 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2412 local_nlp_type = ndlp->nlp_fc4_type; 2413 2414 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2415 * fields here before any of them can complete. 2416 */ 2417 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2418 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2419 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2420 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2421 ndlp->nvme_fb_size = 0; 2422 2423 send_next_prli: 2424 if (local_nlp_type & NLP_FC4_FCP) { 2425 /* Payload is 4 + 16 = 20 x14 bytes. */ 2426 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2427 elscmd = ELS_CMD_PRLI; 2428 } else if (local_nlp_type & NLP_FC4_NVME) { 2429 /* Payload is 4 + 20 = 24 x18 bytes. */ 2430 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2431 elscmd = ELS_CMD_NVMEPRLI; 2432 } else { 2433 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2434 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2435 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2436 return 1; 2437 } 2438 2439 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2440 * FC4 type, implicitly LOGO. 2441 */ 2442 if (phba->sli_rev == LPFC_SLI_REV3 && 2443 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2444 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2445 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2446 ndlp->nlp_type); 2447 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2448 return 1; 2449 } 2450 2451 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2452 ndlp->nlp_DID, elscmd); 2453 if (!elsiocb) 2454 return 1; 2455 2456 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2457 2458 /* For PRLI request, remainder of payload is service parameters */ 2459 memset(pcmd, 0, cmdsize); 2460 2461 if (local_nlp_type & NLP_FC4_FCP) { 2462 /* Remainder of payload is FCP PRLI parameter page. 2463 * Note: this data structure is defined as 2464 * BE/LE in the structure definition so no 2465 * byte swap call is made. 2466 */ 2467 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2468 pcmd += sizeof(uint32_t); 2469 npr = (PRLI *)pcmd; 2470 2471 /* 2472 * If our firmware version is 3.20 or later, 2473 * set the following bits for FC-TAPE support. 2474 */ 2475 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2476 npr->ConfmComplAllowed = 1; 2477 npr->Retry = 1; 2478 npr->TaskRetryIdReq = 1; 2479 } 2480 npr->estabImagePair = 1; 2481 npr->readXferRdyDis = 1; 2482 if (vport->cfg_first_burst_size) 2483 npr->writeXferRdyDis = 1; 2484 2485 /* For FCP support */ 2486 npr->prliType = PRLI_FCP_TYPE; 2487 npr->initiatorFunc = 1; 2488 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2489 2490 /* Remove FCP type - processed. */ 2491 local_nlp_type &= ~NLP_FC4_FCP; 2492 } else if (local_nlp_type & NLP_FC4_NVME) { 2493 /* Remainder of payload is NVME PRLI parameter page. 2494 * This data structure is the newer definition that 2495 * uses bf macros so a byte swap is required. 2496 */ 2497 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2498 pcmd += sizeof(uint32_t); 2499 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2500 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2501 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2502 if (phba->nsler) { 2503 bf_set(prli_nsler, npr_nvme, 1); 2504 bf_set(prli_conf, npr_nvme, 1); 2505 } 2506 2507 /* Only initiators request first burst. */ 2508 if ((phba->cfg_nvme_enable_fb) && 2509 !phba->nvmet_support) 2510 bf_set(prli_fba, npr_nvme, 1); 2511 2512 if (phba->nvmet_support) { 2513 bf_set(prli_tgt, npr_nvme, 1); 2514 bf_set(prli_disc, npr_nvme, 1); 2515 } else { 2516 bf_set(prli_init, npr_nvme, 1); 2517 bf_set(prli_conf, npr_nvme, 1); 2518 } 2519 2520 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2521 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2522 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2523 2524 /* Remove NVME type - processed. */ 2525 local_nlp_type &= ~NLP_FC4_NVME; 2526 } 2527 2528 phba->fc_stat.elsXmitPRLI++; 2529 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2530 spin_lock_irq(&ndlp->lock); 2531 ndlp->nlp_flag |= NLP_PRLI_SND; 2532 2533 /* The vport counters are used for lpfc_scan_finished, but 2534 * the ndlp is used to track outstanding PRLIs for different 2535 * FC4 types. 2536 */ 2537 vport->fc_prli_sent++; 2538 ndlp->fc4_prli_sent++; 2539 spin_unlock_irq(&ndlp->lock); 2540 2541 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2542 "Issue PRLI: did:x%x refcnt %d", 2543 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2544 elsiocb->context1 = lpfc_nlp_get(ndlp); 2545 if (!elsiocb->context1) { 2546 lpfc_els_free_iocb(phba, elsiocb); 2547 goto err; 2548 } 2549 2550 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2551 if (rc == IOCB_ERROR) { 2552 lpfc_els_free_iocb(phba, elsiocb); 2553 lpfc_nlp_put(ndlp); 2554 goto err; 2555 } 2556 2557 2558 /* The driver supports 2 FC4 types. Make sure 2559 * a PRLI is issued for all types before exiting. 2560 */ 2561 if (phba->sli_rev == LPFC_SLI_REV4 && 2562 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2563 goto send_next_prli; 2564 else 2565 return 0; 2566 2567 err: 2568 spin_lock_irq(&ndlp->lock); 2569 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2570 spin_unlock_irq(&ndlp->lock); 2571 return 1; 2572 } 2573 2574 /** 2575 * lpfc_rscn_disc - Perform rscn discovery for a vport 2576 * @vport: pointer to a host virtual N_Port data structure. 2577 * 2578 * This routine performs Registration State Change Notification (RSCN) 2579 * discovery for a @vport. If the @vport's node port recovery count is not 2580 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2581 * the nodes that need recovery. If none of the PLOGI were needed through 2582 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2583 * invoked to check and handle possible more RSCN came in during the period 2584 * of processing the current ones. 2585 **/ 2586 static void 2587 lpfc_rscn_disc(struct lpfc_vport *vport) 2588 { 2589 lpfc_can_disctmo(vport); 2590 2591 /* RSCN discovery */ 2592 /* go thru NPR nodes and issue ELS PLOGIs */ 2593 if (vport->fc_npr_cnt) 2594 if (lpfc_els_disc_plogi(vport)) 2595 return; 2596 2597 lpfc_end_rscn(vport); 2598 } 2599 2600 /** 2601 * lpfc_adisc_done - Complete the adisc phase of discovery 2602 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2603 * 2604 * This function is called when the final ADISC is completed during discovery. 2605 * This function handles clearing link attention or issuing reg_vpi depending 2606 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2607 * discovery. 2608 * This function is called with no locks held. 2609 **/ 2610 static void 2611 lpfc_adisc_done(struct lpfc_vport *vport) 2612 { 2613 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2614 struct lpfc_hba *phba = vport->phba; 2615 2616 /* 2617 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2618 * and continue discovery. 2619 */ 2620 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2621 !(vport->fc_flag & FC_RSCN_MODE) && 2622 (phba->sli_rev < LPFC_SLI_REV4)) { 2623 2624 /* 2625 * If link is down, clear_la and reg_vpi will be done after 2626 * flogi following a link up event 2627 */ 2628 if (!lpfc_is_link_up(phba)) 2629 return; 2630 2631 /* The ADISCs are complete. Doesn't matter if they 2632 * succeeded or failed because the ADISC completion 2633 * routine guarantees to call the state machine and 2634 * the RPI is either unregistered (failed ADISC response) 2635 * or the RPI is still valid and the node is marked 2636 * mapped for a target. The exchanges should be in the 2637 * correct state. This code is specific to SLI3. 2638 */ 2639 lpfc_issue_clear_la(phba, vport); 2640 lpfc_issue_reg_vpi(phba, vport); 2641 return; 2642 } 2643 /* 2644 * For SLI2, we need to set port_state to READY 2645 * and continue discovery. 2646 */ 2647 if (vport->port_state < LPFC_VPORT_READY) { 2648 /* If we get here, there is nothing to ADISC */ 2649 lpfc_issue_clear_la(phba, vport); 2650 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2651 vport->num_disc_nodes = 0; 2652 /* go thru NPR list, issue ELS PLOGIs */ 2653 if (vport->fc_npr_cnt) 2654 lpfc_els_disc_plogi(vport); 2655 if (!vport->num_disc_nodes) { 2656 spin_lock_irq(shost->host_lock); 2657 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2658 spin_unlock_irq(shost->host_lock); 2659 lpfc_can_disctmo(vport); 2660 lpfc_end_rscn(vport); 2661 } 2662 } 2663 vport->port_state = LPFC_VPORT_READY; 2664 } else 2665 lpfc_rscn_disc(vport); 2666 } 2667 2668 /** 2669 * lpfc_more_adisc - Issue more adisc as needed 2670 * @vport: pointer to a host virtual N_Port data structure. 2671 * 2672 * This routine determines whether there are more ndlps on a @vport 2673 * node list need to have Address Discover (ADISC) issued. If so, it will 2674 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2675 * remaining nodes which need to have ADISC sent. 2676 **/ 2677 void 2678 lpfc_more_adisc(struct lpfc_vport *vport) 2679 { 2680 if (vport->num_disc_nodes) 2681 vport->num_disc_nodes--; 2682 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2683 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2684 "0210 Continue discovery with %d ADISCs to go " 2685 "Data: x%x x%x x%x\n", 2686 vport->num_disc_nodes, vport->fc_adisc_cnt, 2687 vport->fc_flag, vport->port_state); 2688 /* Check to see if there are more ADISCs to be sent */ 2689 if (vport->fc_flag & FC_NLP_MORE) { 2690 lpfc_set_disctmo(vport); 2691 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2692 lpfc_els_disc_adisc(vport); 2693 } 2694 if (!vport->num_disc_nodes) 2695 lpfc_adisc_done(vport); 2696 return; 2697 } 2698 2699 /** 2700 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2701 * @phba: pointer to lpfc hba data structure. 2702 * @cmdiocb: pointer to lpfc command iocb data structure. 2703 * @rspiocb: pointer to lpfc response iocb data structure. 2704 * 2705 * This routine is the completion function for issuing the Address Discover 2706 * (ADISC) command. It first checks to see whether link went down during 2707 * the discovery process. If so, the node will be marked as node port 2708 * recovery for issuing discover IOCB by the link attention handler and 2709 * exit. Otherwise, the response status is checked. If error was reported 2710 * in the response status, the ADISC command shall be retried by invoking 2711 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2712 * the response status, the state machine is invoked to set transition 2713 * with respect to NLP_EVT_CMPL_ADISC event. 2714 **/ 2715 static void 2716 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2717 struct lpfc_iocbq *rspiocb) 2718 { 2719 struct lpfc_vport *vport = cmdiocb->vport; 2720 IOCB_t *irsp; 2721 struct lpfc_nodelist *ndlp; 2722 int disc; 2723 2724 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2725 cmdiocb->context_un.rsp_iocb = rspiocb; 2726 2727 irsp = &(rspiocb->iocb); 2728 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2729 2730 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2731 "ADISC cmpl: status:x%x/x%x did:x%x", 2732 irsp->ulpStatus, irsp->un.ulpWord[4], 2733 ndlp->nlp_DID); 2734 2735 /* Since ndlp can be freed in the disc state machine, note if this node 2736 * is being used during discovery. 2737 */ 2738 spin_lock_irq(&ndlp->lock); 2739 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2740 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2741 spin_unlock_irq(&ndlp->lock); 2742 /* ADISC completes to NPort <nlp_DID> */ 2743 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2744 "0104 ADISC completes to NPort x%x " 2745 "Data: x%x x%x x%x x%x x%x\n", 2746 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2747 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2748 /* Check to see if link went down during discovery */ 2749 if (lpfc_els_chk_latt(vport)) { 2750 spin_lock_irq(&ndlp->lock); 2751 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2752 spin_unlock_irq(&ndlp->lock); 2753 goto out; 2754 } 2755 2756 if (irsp->ulpStatus) { 2757 /* Check for retry */ 2758 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2759 /* ELS command is being retried */ 2760 if (disc) { 2761 spin_lock_irq(&ndlp->lock); 2762 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2763 spin_unlock_irq(&ndlp->lock); 2764 lpfc_set_disctmo(vport); 2765 } 2766 goto out; 2767 } 2768 /* ADISC failed */ 2769 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2770 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2771 ndlp->nlp_DID, irsp->ulpStatus, 2772 irsp->un.ulpWord[4]); 2773 2774 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2775 NLP_EVT_CMPL_ADISC); 2776 2777 /* As long as this node is not registered with the SCSI or NVMe 2778 * transport, it is no longer an active node. Otherwise 2779 * devloss handles the final cleanup. 2780 */ 2781 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2782 spin_lock_irq(&ndlp->lock); 2783 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2784 spin_unlock_irq(&ndlp->lock); 2785 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2786 NLP_EVT_DEVICE_RM); 2787 } 2788 } else 2789 /* Good status, call state machine */ 2790 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2791 NLP_EVT_CMPL_ADISC); 2792 2793 /* Check to see if there are more ADISCs to be sent */ 2794 if (disc && vport->num_disc_nodes) 2795 lpfc_more_adisc(vport); 2796 out: 2797 lpfc_els_free_iocb(phba, cmdiocb); 2798 lpfc_nlp_put(ndlp); 2799 return; 2800 } 2801 2802 /** 2803 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2804 * @vport: pointer to a virtual N_Port data structure. 2805 * @ndlp: pointer to a node-list data structure. 2806 * @retry: number of retries to the command IOCB. 2807 * 2808 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2809 * @vport. It prepares the payload of the ADISC ELS command, updates the 2810 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2811 * to issue the ADISC ELS command. 2812 * 2813 * Note that the ndlp reference count will be incremented by 1 for holding the 2814 * ndlp and the reference to ndlp will be stored into the context1 field of 2815 * the IOCB for the completion callback function to the ADISC ELS command. 2816 * 2817 * Return code 2818 * 0 - successfully issued adisc 2819 * 1 - failed to issue adisc 2820 **/ 2821 int 2822 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2823 uint8_t retry) 2824 { 2825 int rc = 0; 2826 struct lpfc_hba *phba = vport->phba; 2827 ADISC *ap; 2828 struct lpfc_iocbq *elsiocb; 2829 uint8_t *pcmd; 2830 uint16_t cmdsize; 2831 2832 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2833 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2834 ndlp->nlp_DID, ELS_CMD_ADISC); 2835 if (!elsiocb) 2836 return 1; 2837 2838 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2839 2840 /* For ADISC request, remainder of payload is service parameters */ 2841 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2842 pcmd += sizeof(uint32_t); 2843 2844 /* Fill in ADISC payload */ 2845 ap = (ADISC *) pcmd; 2846 ap->hardAL_PA = phba->fc_pref_ALPA; 2847 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2848 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2849 ap->DID = be32_to_cpu(vport->fc_myDID); 2850 2851 phba->fc_stat.elsXmitADISC++; 2852 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2853 spin_lock_irq(&ndlp->lock); 2854 ndlp->nlp_flag |= NLP_ADISC_SND; 2855 spin_unlock_irq(&ndlp->lock); 2856 elsiocb->context1 = lpfc_nlp_get(ndlp); 2857 if (!elsiocb->context1) { 2858 lpfc_els_free_iocb(phba, elsiocb); 2859 goto err; 2860 } 2861 2862 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2863 "Issue ADISC: did:x%x refcnt %d", 2864 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2865 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2866 if (rc == IOCB_ERROR) { 2867 lpfc_els_free_iocb(phba, elsiocb); 2868 lpfc_nlp_put(ndlp); 2869 goto err; 2870 } 2871 2872 return 0; 2873 2874 err: 2875 spin_lock_irq(&ndlp->lock); 2876 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2877 spin_unlock_irq(&ndlp->lock); 2878 return 1; 2879 } 2880 2881 /** 2882 * lpfc_cmpl_els_logo - Completion callback function for logo 2883 * @phba: pointer to lpfc hba data structure. 2884 * @cmdiocb: pointer to lpfc command iocb data structure. 2885 * @rspiocb: pointer to lpfc response iocb data structure. 2886 * 2887 * This routine is the completion function for issuing the ELS Logout (LOGO) 2888 * command. If no error status was reported from the LOGO response, the 2889 * state machine of the associated ndlp shall be invoked for transition with 2890 * respect to NLP_EVT_CMPL_LOGO event. 2891 **/ 2892 static void 2893 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2894 struct lpfc_iocbq *rspiocb) 2895 { 2896 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2897 struct lpfc_vport *vport = ndlp->vport; 2898 IOCB_t *irsp; 2899 unsigned long flags; 2900 uint32_t skip_recovery = 0; 2901 int wake_up_waiter = 0; 2902 2903 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2904 cmdiocb->context_un.rsp_iocb = rspiocb; 2905 2906 irsp = &(rspiocb->iocb); 2907 spin_lock_irq(&ndlp->lock); 2908 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2909 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2910 wake_up_waiter = 1; 2911 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2912 } 2913 spin_unlock_irq(&ndlp->lock); 2914 2915 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2916 "LOGO cmpl: status:x%x/x%x did:x%x", 2917 irsp->ulpStatus, irsp->un.ulpWord[4], 2918 ndlp->nlp_DID); 2919 2920 /* LOGO completes to NPort <nlp_DID> */ 2921 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2922 "0105 LOGO completes to NPort x%x " 2923 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2924 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2925 irsp->ulpStatus, irsp->un.ulpWord[4], 2926 irsp->ulpTimeout, vport->num_disc_nodes); 2927 2928 if (lpfc_els_chk_latt(vport)) { 2929 skip_recovery = 1; 2930 goto out; 2931 } 2932 2933 /* The LOGO will not be retried on failure. A LOGO was 2934 * issued to the remote rport and a ACC or RJT or no Answer are 2935 * all acceptable. Note the failure and move forward with 2936 * discovery. The PLOGI will retry. 2937 */ 2938 if (irsp->ulpStatus) { 2939 /* LOGO failed */ 2940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2941 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2942 ndlp->nlp_DID, irsp->ulpStatus, 2943 irsp->un.ulpWord[4]); 2944 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2945 if (lpfc_error_lost_link(irsp)) { 2946 skip_recovery = 1; 2947 goto out; 2948 } 2949 } 2950 2951 /* Call state machine. This will unregister the rpi if needed. */ 2952 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2953 2954 /* The driver sets this flag for an NPIV instance that doesn't want to 2955 * log into the remote port. 2956 */ 2957 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2958 spin_lock_irq(&ndlp->lock); 2959 if (phba->sli_rev == LPFC_SLI_REV4) 2960 ndlp->nlp_flag |= NLP_RELEASE_RPI; 2961 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2962 spin_unlock_irq(&ndlp->lock); 2963 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2964 NLP_EVT_DEVICE_RM); 2965 lpfc_els_free_iocb(phba, cmdiocb); 2966 lpfc_nlp_put(ndlp); 2967 2968 /* Presume the node was released. */ 2969 return; 2970 } 2971 2972 out: 2973 /* Driver is done with the IO. */ 2974 lpfc_els_free_iocb(phba, cmdiocb); 2975 lpfc_nlp_put(ndlp); 2976 2977 /* At this point, the LOGO processing is complete. NOTE: For a 2978 * pt2pt topology, we are assuming the NPortID will only change 2979 * on link up processing. For a LOGO / PLOGI initiated by the 2980 * Initiator, we are assuming the NPortID is not going to change. 2981 */ 2982 2983 if (wake_up_waiter && ndlp->logo_waitq) 2984 wake_up(ndlp->logo_waitq); 2985 /* 2986 * If the node is a target, the handling attempts to recover the port. 2987 * For any other port type, the rpi is unregistered as an implicit 2988 * LOGO. 2989 */ 2990 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2991 skip_recovery == 0) { 2992 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2993 spin_lock_irqsave(&ndlp->lock, flags); 2994 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2995 spin_unlock_irqrestore(&ndlp->lock, flags); 2996 2997 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2998 "3187 LOGO completes to NPort x%x: Start " 2999 "Recovery Data: x%x x%x x%x x%x\n", 3000 ndlp->nlp_DID, irsp->ulpStatus, 3001 irsp->un.ulpWord[4], irsp->ulpTimeout, 3002 vport->num_disc_nodes); 3003 lpfc_disc_start(vport); 3004 return; 3005 } 3006 3007 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3008 * driver sends a LOGO to the rport to cleanup. For fabric and 3009 * initiator ports cleanup the node as long as it the node is not 3010 * register with the transport. 3011 */ 3012 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3013 spin_lock_irq(&ndlp->lock); 3014 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3015 spin_unlock_irq(&ndlp->lock); 3016 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3017 NLP_EVT_DEVICE_RM); 3018 } 3019 } 3020 3021 /** 3022 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3023 * @vport: pointer to a virtual N_Port data structure. 3024 * @ndlp: pointer to a node-list data structure. 3025 * @retry: number of retries to the command IOCB. 3026 * 3027 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3028 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3029 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3030 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3031 * 3032 * Note that the ndlp reference count will be incremented by 1 for holding the 3033 * ndlp and the reference to ndlp will be stored into the context1 field of 3034 * the IOCB for the completion callback function to the LOGO ELS command. 3035 * 3036 * Callers of this routine are expected to unregister the RPI first 3037 * 3038 * Return code 3039 * 0 - successfully issued logo 3040 * 1 - failed to issue logo 3041 **/ 3042 int 3043 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3044 uint8_t retry) 3045 { 3046 struct lpfc_hba *phba = vport->phba; 3047 struct lpfc_iocbq *elsiocb; 3048 uint8_t *pcmd; 3049 uint16_t cmdsize; 3050 int rc; 3051 3052 spin_lock_irq(&ndlp->lock); 3053 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3054 spin_unlock_irq(&ndlp->lock); 3055 return 0; 3056 } 3057 spin_unlock_irq(&ndlp->lock); 3058 3059 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3060 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3061 ndlp->nlp_DID, ELS_CMD_LOGO); 3062 if (!elsiocb) 3063 return 1; 3064 3065 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3066 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3067 pcmd += sizeof(uint32_t); 3068 3069 /* Fill in LOGO payload */ 3070 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3071 pcmd += sizeof(uint32_t); 3072 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3073 3074 phba->fc_stat.elsXmitLOGO++; 3075 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 3076 spin_lock_irq(&ndlp->lock); 3077 ndlp->nlp_flag |= NLP_LOGO_SND; 3078 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3079 spin_unlock_irq(&ndlp->lock); 3080 elsiocb->context1 = lpfc_nlp_get(ndlp); 3081 if (!elsiocb->context1) { 3082 lpfc_els_free_iocb(phba, elsiocb); 3083 goto err; 3084 } 3085 3086 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3087 "Issue LOGO: did:x%x refcnt %d", 3088 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3089 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3090 if (rc == IOCB_ERROR) { 3091 lpfc_els_free_iocb(phba, elsiocb); 3092 lpfc_nlp_put(ndlp); 3093 goto err; 3094 } 3095 3096 spin_lock_irq(&ndlp->lock); 3097 ndlp->nlp_prev_state = ndlp->nlp_state; 3098 spin_unlock_irq(&ndlp->lock); 3099 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3100 return 0; 3101 3102 err: 3103 spin_lock_irq(&ndlp->lock); 3104 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3105 spin_unlock_irq(&ndlp->lock); 3106 return 1; 3107 } 3108 3109 /** 3110 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3111 * @phba: pointer to lpfc hba data structure. 3112 * @cmdiocb: pointer to lpfc command iocb data structure. 3113 * @rspiocb: pointer to lpfc response iocb data structure. 3114 * 3115 * This routine is a generic completion callback function for ELS commands. 3116 * Specifically, it is the callback function which does not need to perform 3117 * any command specific operations. It is currently used by the ELS command 3118 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3119 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3120 * Other than certain debug loggings, this callback function simply invokes the 3121 * lpfc_els_chk_latt() routine to check whether link went down during the 3122 * discovery process. 3123 **/ 3124 static void 3125 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3126 struct lpfc_iocbq *rspiocb) 3127 { 3128 struct lpfc_vport *vport = cmdiocb->vport; 3129 struct lpfc_nodelist *free_ndlp; 3130 IOCB_t *irsp; 3131 3132 irsp = &rspiocb->iocb; 3133 3134 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3135 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3136 irsp->ulpStatus, irsp->un.ulpWord[4], 3137 irsp->un.elsreq64.remoteID); 3138 3139 /* ELS cmd tag <ulpIoTag> completes */ 3140 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3141 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3142 irsp->ulpIoTag, irsp->ulpStatus, 3143 irsp->un.ulpWord[4], irsp->ulpTimeout); 3144 3145 /* Check to see if link went down during discovery */ 3146 lpfc_els_chk_latt(vport); 3147 3148 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3149 3150 lpfc_els_free_iocb(phba, cmdiocb); 3151 lpfc_nlp_put(free_ndlp); 3152 } 3153 3154 /** 3155 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3156 * @vport: pointer to lpfc_vport data structure. 3157 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3158 * 3159 * This routine registers the rpi assigned to the fabric controller 3160 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3161 * state triggering a registration with the SCSI transport. 3162 * 3163 * This routine is single out because the fabric controller node 3164 * does not receive a PLOGI. This routine is consumed by the 3165 * SCR and RDF ELS commands. Callers are expected to qualify 3166 * with SLI4 first. 3167 **/ 3168 static int 3169 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3170 { 3171 int rc = 0; 3172 struct lpfc_hba *phba = vport->phba; 3173 struct lpfc_nodelist *ns_ndlp; 3174 LPFC_MBOXQ_t *mbox; 3175 struct lpfc_dmabuf *mp; 3176 3177 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3178 return rc; 3179 3180 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3181 if (!ns_ndlp) 3182 return -ENODEV; 3183 3184 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3185 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3186 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3187 ns_ndlp->nlp_state); 3188 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3189 return -ENODEV; 3190 3191 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3192 if (!mbox) { 3193 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3194 "0936 %s: no memory for reg_login " 3195 "Data: x%x x%x x%x x%x\n", __func__, 3196 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3197 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3198 return -ENOMEM; 3199 } 3200 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3201 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3202 if (rc) { 3203 rc = -EACCES; 3204 goto out; 3205 } 3206 3207 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3208 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3209 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3210 if (!mbox->ctx_ndlp) { 3211 rc = -ENOMEM; 3212 goto out_mem; 3213 } 3214 3215 mbox->vport = vport; 3216 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3217 if (rc == MBX_NOT_FINISHED) { 3218 rc = -ENODEV; 3219 lpfc_nlp_put(fc_ndlp); 3220 goto out_mem; 3221 } 3222 /* Success path. Exit. */ 3223 lpfc_nlp_set_state(vport, fc_ndlp, 3224 NLP_STE_REG_LOGIN_ISSUE); 3225 return 0; 3226 3227 out_mem: 3228 fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3229 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 3230 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3231 kfree(mp); 3232 3233 out: 3234 mempool_free(mbox, phba->mbox_mem_pool); 3235 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3236 "0938 %s: failed to format reg_login " 3237 "Data: x%x x%x x%x x%x\n", __func__, 3238 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3239 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3240 return rc; 3241 } 3242 3243 /** 3244 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3245 * @phba: pointer to lpfc hba data structure. 3246 * @cmdiocb: pointer to lpfc command iocb data structure. 3247 * @rspiocb: pointer to lpfc response iocb data structure. 3248 * 3249 * This routine is a generic completion callback function for Discovery ELS cmd. 3250 * Currently used by the ELS command issuing routines for the ELS State Change 3251 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3252 * These commands will be retried once only for ELS timeout errors. 3253 **/ 3254 static void 3255 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3256 struct lpfc_iocbq *rspiocb) 3257 { 3258 struct lpfc_vport *vport = cmdiocb->vport; 3259 IOCB_t *irsp; 3260 struct lpfc_els_rdf_rsp *prdf; 3261 struct lpfc_dmabuf *pcmd, *prsp; 3262 u32 *pdata; 3263 u32 cmd; 3264 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3265 3266 irsp = &rspiocb->iocb; 3267 3268 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3269 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3270 irsp->ulpStatus, irsp->un.ulpWord[4], 3271 irsp->un.elsreq64.remoteID); 3272 /* ELS cmd tag <ulpIoTag> completes */ 3273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3274 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3275 "x%x\n", 3276 irsp->ulpIoTag, irsp->ulpStatus, 3277 irsp->un.ulpWord[4], irsp->ulpTimeout, 3278 cmdiocb->retry); 3279 3280 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3281 if (!pcmd) 3282 goto out; 3283 3284 pdata = (u32 *)pcmd->virt; 3285 if (!pdata) 3286 goto out; 3287 cmd = *pdata; 3288 3289 /* Only 1 retry for ELS Timeout only */ 3290 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3291 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3292 IOERR_SEQUENCE_TIMEOUT)) { 3293 cmdiocb->retry++; 3294 if (cmdiocb->retry <= 1) { 3295 switch (cmd) { 3296 case ELS_CMD_SCR: 3297 lpfc_issue_els_scr(vport, cmdiocb->retry); 3298 break; 3299 case ELS_CMD_EDC: 3300 lpfc_issue_els_edc(vport, cmdiocb->retry); 3301 break; 3302 case ELS_CMD_RDF: 3303 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3304 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3305 break; 3306 } 3307 goto out; 3308 } 3309 phba->fc_stat.elsRetryExceeded++; 3310 } 3311 if (cmd == ELS_CMD_EDC) { 3312 /* must be called before checking uplStatus and returning */ 3313 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3314 return; 3315 } 3316 if (irsp->ulpStatus) { 3317 /* ELS discovery cmd completes with error */ 3318 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3319 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3320 irsp->ulpStatus, irsp->un.ulpWord[4]); 3321 goto out; 3322 } 3323 3324 /* The RDF response doesn't have any impact on the running driver 3325 * but the notification descriptors are dumped here for support. 3326 */ 3327 if (cmd == ELS_CMD_RDF) { 3328 int i; 3329 3330 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3331 if (!prsp) 3332 goto out; 3333 3334 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3335 if (!prdf) 3336 goto out; 3337 3338 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3339 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3340 lpfc_printf_vlog(vport, KERN_INFO, 3341 LOG_ELS | LOG_CGN_MGMT, 3342 "4677 Fabric RDF Notification Grant " 3343 "Data: 0x%08x Reg: %x %x\n", 3344 be32_to_cpu( 3345 prdf->reg_d1.desc_tags[i]), 3346 phba->cgn_reg_signal, 3347 phba->cgn_reg_fpin); 3348 } 3349 3350 out: 3351 /* Check to see if link went down during discovery */ 3352 lpfc_els_chk_latt(vport); 3353 lpfc_els_free_iocb(phba, cmdiocb); 3354 lpfc_nlp_put(ndlp); 3355 return; 3356 } 3357 3358 /** 3359 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3360 * @vport: pointer to a host virtual N_Port data structure. 3361 * @retry: retry counter for the command IOCB. 3362 * 3363 * This routine issues a State Change Request (SCR) to a fabric node 3364 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3365 * first search the @vport node list to find the matching ndlp. If no such 3366 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3367 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3368 * routine is invoked to send the SCR IOCB. 3369 * 3370 * Note that the ndlp reference count will be incremented by 1 for holding the 3371 * ndlp and the reference to ndlp will be stored into the context1 field of 3372 * the IOCB for the completion callback function to the SCR ELS command. 3373 * 3374 * Return code 3375 * 0 - Successfully issued scr command 3376 * 1 - Failed to issue scr command 3377 **/ 3378 int 3379 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3380 { 3381 int rc = 0; 3382 struct lpfc_hba *phba = vport->phba; 3383 struct lpfc_iocbq *elsiocb; 3384 uint8_t *pcmd; 3385 uint16_t cmdsize; 3386 struct lpfc_nodelist *ndlp; 3387 3388 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3389 3390 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3391 if (!ndlp) { 3392 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3393 if (!ndlp) 3394 return 1; 3395 lpfc_enqueue_node(vport, ndlp); 3396 } 3397 3398 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3399 ndlp->nlp_DID, ELS_CMD_SCR); 3400 if (!elsiocb) 3401 return 1; 3402 3403 if (phba->sli_rev == LPFC_SLI_REV4) { 3404 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3405 if (rc) { 3406 lpfc_els_free_iocb(phba, elsiocb); 3407 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3408 "0937 %s: Failed to reg fc node, rc %d\n", 3409 __func__, rc); 3410 return 1; 3411 } 3412 } 3413 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3414 3415 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3416 pcmd += sizeof(uint32_t); 3417 3418 /* For SCR, remainder of payload is SCR parameter page */ 3419 memset(pcmd, 0, sizeof(SCR)); 3420 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3421 3422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3423 "Issue SCR: did:x%x", 3424 ndlp->nlp_DID, 0, 0); 3425 3426 phba->fc_stat.elsXmitSCR++; 3427 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3428 elsiocb->context1 = lpfc_nlp_get(ndlp); 3429 if (!elsiocb->context1) { 3430 lpfc_els_free_iocb(phba, elsiocb); 3431 return 1; 3432 } 3433 3434 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3435 "Issue SCR: did:x%x refcnt %d", 3436 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3437 3438 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3439 if (rc == IOCB_ERROR) { 3440 lpfc_els_free_iocb(phba, elsiocb); 3441 lpfc_nlp_put(ndlp); 3442 return 1; 3443 } 3444 3445 return 0; 3446 } 3447 3448 /** 3449 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3450 * or the other nport (pt2pt). 3451 * @vport: pointer to a host virtual N_Port data structure. 3452 * @retry: number of retries to the command IOCB. 3453 * 3454 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3455 * when connected to a fabric, or to the remote port when connected 3456 * in point-to-point mode. When sent to the Fabric Controller, it will 3457 * replay the RSCN to registered recipients. 3458 * 3459 * Note that the ndlp reference count will be incremented by 1 for holding the 3460 * ndlp and the reference to ndlp will be stored into the context1 field of 3461 * the IOCB for the completion callback function to the RSCN ELS command. 3462 * 3463 * Return code 3464 * 0 - Successfully issued RSCN command 3465 * 1 - Failed to issue RSCN command 3466 **/ 3467 int 3468 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3469 { 3470 int rc = 0; 3471 struct lpfc_hba *phba = vport->phba; 3472 struct lpfc_iocbq *elsiocb; 3473 struct lpfc_nodelist *ndlp; 3474 struct { 3475 struct fc_els_rscn rscn; 3476 struct fc_els_rscn_page portid; 3477 } *event; 3478 uint32_t nportid; 3479 uint16_t cmdsize = sizeof(*event); 3480 3481 /* Not supported for private loop */ 3482 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3483 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3484 return 1; 3485 3486 if (vport->fc_flag & FC_PT2PT) { 3487 /* find any mapped nport - that would be the other nport */ 3488 ndlp = lpfc_findnode_mapped(vport); 3489 if (!ndlp) 3490 return 1; 3491 } else { 3492 nportid = FC_FID_FCTRL; 3493 /* find the fabric controller node */ 3494 ndlp = lpfc_findnode_did(vport, nportid); 3495 if (!ndlp) { 3496 /* if one didn't exist, make one */ 3497 ndlp = lpfc_nlp_init(vport, nportid); 3498 if (!ndlp) 3499 return 1; 3500 lpfc_enqueue_node(vport, ndlp); 3501 } 3502 } 3503 3504 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3505 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3506 3507 if (!elsiocb) 3508 return 1; 3509 3510 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3511 3512 event->rscn.rscn_cmd = ELS_RSCN; 3513 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3514 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3515 3516 nportid = vport->fc_myDID; 3517 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3518 event->portid.rscn_page_flags = 0; 3519 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3520 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3521 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3522 3523 phba->fc_stat.elsXmitRSCN++; 3524 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3525 elsiocb->context1 = lpfc_nlp_get(ndlp); 3526 if (!elsiocb->context1) { 3527 lpfc_els_free_iocb(phba, elsiocb); 3528 return 1; 3529 } 3530 3531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3532 "Issue RSCN: did:x%x", 3533 ndlp->nlp_DID, 0, 0); 3534 3535 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3536 if (rc == IOCB_ERROR) { 3537 lpfc_els_free_iocb(phba, elsiocb); 3538 lpfc_nlp_put(ndlp); 3539 return 1; 3540 } 3541 3542 return 0; 3543 } 3544 3545 /** 3546 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3547 * @vport: pointer to a host virtual N_Port data structure. 3548 * @nportid: N_Port identifier to the remote node. 3549 * @retry: number of retries to the command IOCB. 3550 * 3551 * This routine issues a Fibre Channel Address Resolution Response 3552 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3553 * is passed into the function. It first search the @vport node list to find 3554 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3555 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3556 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3557 * 3558 * Note that the ndlp reference count will be incremented by 1 for holding the 3559 * ndlp and the reference to ndlp will be stored into the context1 field of 3560 * the IOCB for the completion callback function to the FARPR ELS command. 3561 * 3562 * Return code 3563 * 0 - Successfully issued farpr command 3564 * 1 - Failed to issue farpr command 3565 **/ 3566 static int 3567 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3568 { 3569 int rc = 0; 3570 struct lpfc_hba *phba = vport->phba; 3571 struct lpfc_iocbq *elsiocb; 3572 FARP *fp; 3573 uint8_t *pcmd; 3574 uint32_t *lp; 3575 uint16_t cmdsize; 3576 struct lpfc_nodelist *ondlp; 3577 struct lpfc_nodelist *ndlp; 3578 3579 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3580 3581 ndlp = lpfc_findnode_did(vport, nportid); 3582 if (!ndlp) { 3583 ndlp = lpfc_nlp_init(vport, nportid); 3584 if (!ndlp) 3585 return 1; 3586 lpfc_enqueue_node(vport, ndlp); 3587 } 3588 3589 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3590 ndlp->nlp_DID, ELS_CMD_RNID); 3591 if (!elsiocb) 3592 return 1; 3593 3594 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3595 3596 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3597 pcmd += sizeof(uint32_t); 3598 3599 /* Fill in FARPR payload */ 3600 fp = (FARP *) (pcmd); 3601 memset(fp, 0, sizeof(FARP)); 3602 lp = (uint32_t *) pcmd; 3603 *lp++ = be32_to_cpu(nportid); 3604 *lp++ = be32_to_cpu(vport->fc_myDID); 3605 fp->Rflags = 0; 3606 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3607 3608 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3609 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3610 ondlp = lpfc_findnode_did(vport, nportid); 3611 if (ondlp) { 3612 memcpy(&fp->OportName, &ondlp->nlp_portname, 3613 sizeof(struct lpfc_name)); 3614 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3615 sizeof(struct lpfc_name)); 3616 } 3617 3618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3619 "Issue FARPR: did:x%x", 3620 ndlp->nlp_DID, 0, 0); 3621 3622 phba->fc_stat.elsXmitFARPR++; 3623 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3624 elsiocb->context1 = lpfc_nlp_get(ndlp); 3625 if (!elsiocb->context1) { 3626 lpfc_els_free_iocb(phba, elsiocb); 3627 return 1; 3628 } 3629 3630 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3631 if (rc == IOCB_ERROR) { 3632 /* The additional lpfc_nlp_put will cause the following 3633 * lpfc_els_free_iocb routine to trigger the release of 3634 * the node. 3635 */ 3636 lpfc_els_free_iocb(phba, elsiocb); 3637 lpfc_nlp_put(ndlp); 3638 return 1; 3639 } 3640 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3641 * trigger the release of the node. 3642 */ 3643 /* Don't release reference count as RDF is likely outstanding */ 3644 return 0; 3645 } 3646 3647 /** 3648 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3649 * @vport: pointer to a host virtual N_Port data structure. 3650 * @retry: retry counter for the command IOCB. 3651 * 3652 * This routine issues an ELS RDF to the Fabric Controller to register 3653 * for diagnostic functions. 3654 * 3655 * Note that the ndlp reference count will be incremented by 1 for holding the 3656 * ndlp and the reference to ndlp will be stored into the context1 field of 3657 * the IOCB for the completion callback function to the RDF ELS command. 3658 * 3659 * Return code 3660 * 0 - Successfully issued rdf command 3661 * 1 - Failed to issue rdf command 3662 **/ 3663 int 3664 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3665 { 3666 struct lpfc_hba *phba = vport->phba; 3667 struct lpfc_iocbq *elsiocb; 3668 struct lpfc_els_rdf_req *prdf; 3669 struct lpfc_nodelist *ndlp; 3670 uint16_t cmdsize; 3671 int rc; 3672 3673 cmdsize = sizeof(*prdf); 3674 3675 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3676 if (!ndlp) { 3677 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3678 if (!ndlp) 3679 return -ENODEV; 3680 lpfc_enqueue_node(vport, ndlp); 3681 } 3682 3683 /* RDF ELS is not required on an NPIV VN_Port. */ 3684 if (vport->port_type == LPFC_NPIV_PORT) 3685 return -EACCES; 3686 3687 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3688 ndlp->nlp_DID, ELS_CMD_RDF); 3689 if (!elsiocb) 3690 return -ENOMEM; 3691 3692 /* Configure the payload for the supported FPIN events. */ 3693 prdf = (struct lpfc_els_rdf_req *) 3694 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3695 memset(prdf, 0, cmdsize); 3696 prdf->rdf.fpin_cmd = ELS_RDF; 3697 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3698 sizeof(struct fc_els_rdf)); 3699 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3700 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3701 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3702 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3703 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3704 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3705 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3706 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3707 3708 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3709 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3710 ndlp->nlp_DID, phba->cgn_reg_signal, 3711 phba->cgn_reg_fpin); 3712 3713 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3714 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3715 elsiocb->context1 = lpfc_nlp_get(ndlp); 3716 if (!elsiocb->context1) { 3717 lpfc_els_free_iocb(phba, elsiocb); 3718 return -EIO; 3719 } 3720 3721 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3722 "Issue RDF: did:x%x refcnt %d", 3723 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3724 3725 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3726 if (rc == IOCB_ERROR) { 3727 lpfc_els_free_iocb(phba, elsiocb); 3728 lpfc_nlp_put(ndlp); 3729 return -EIO; 3730 } 3731 return 0; 3732 } 3733 3734 /** 3735 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3736 * @vport: pointer to a host virtual N_Port data structure. 3737 * @cmdiocb: pointer to lpfc command iocb data structure. 3738 * @ndlp: pointer to a node-list data structure. 3739 * 3740 * A received RDF implies a possible change to fabric supported diagnostic 3741 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3742 * RDF request to reregister for supported diagnostic functions. 3743 * 3744 * Return code 3745 * 0 - Success 3746 * -EIO - Failed to process received RDF 3747 **/ 3748 static int 3749 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3750 struct lpfc_nodelist *ndlp) 3751 { 3752 /* Send LS_ACC */ 3753 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3754 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3755 "1623 Failed to RDF_ACC from x%x for x%x\n", 3756 ndlp->nlp_DID, vport->fc_myDID); 3757 return -EIO; 3758 } 3759 3760 /* Issue new RDF for reregistering */ 3761 if (lpfc_issue_els_rdf(vport, 0)) { 3762 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3763 "2623 Failed to re register RDF for x%x\n", 3764 vport->fc_myDID); 3765 return -EIO; 3766 } 3767 3768 return 0; 3769 } 3770 3771 /** 3772 * lpfc_least_capable_settings - helper function for EDC rsp processing 3773 * @phba: pointer to lpfc hba data structure. 3774 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3775 * 3776 * This helper routine determines the least capable setting for 3777 * congestion signals, signal freq, including scale, from the 3778 * congestion detection descriptor in the EDC rsp. The routine 3779 * sets @phba values in preparation for a set_featues mailbox. 3780 **/ 3781 static void 3782 lpfc_least_capable_settings(struct lpfc_hba *phba, 3783 struct fc_diag_cg_sig_desc *pcgd) 3784 { 3785 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3786 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3787 struct lpfc_cgn_info *cp; 3788 u32 crc; 3789 u16 sig_freq; 3790 3791 /* Get rsp signal and frequency capabilities. */ 3792 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3793 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3794 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3795 3796 /* If the Fport does not support signals. Set FPIN only */ 3797 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3798 goto out_no_support; 3799 3800 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3801 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3802 * to milliSeconds. 3803 */ 3804 switch (rsp_sig_freq_scale) { 3805 case EDC_CG_SIGFREQ_SEC: 3806 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3807 break; 3808 case EDC_CG_SIGFREQ_MSEC: 3809 rsp_sig_freq_cyc = 1; 3810 break; 3811 default: 3812 goto out_no_support; 3813 } 3814 3815 /* Convenient shorthand. */ 3816 drv_sig_cap = phba->cgn_reg_signal; 3817 3818 /* Choose the least capable frequency. */ 3819 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3820 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3821 3822 /* Should be some common signals support. Settle on least capable 3823 * signal and adjust FPIN values. Initialize defaults to ease the 3824 * decision. 3825 */ 3826 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3827 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3828 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3829 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3830 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3831 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3832 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3833 } 3834 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3835 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3836 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3837 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3838 } 3839 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3840 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3841 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3842 } 3843 } 3844 3845 if (!phba->cgn_i) 3846 return; 3847 3848 /* Update signal frequency in congestion info buffer */ 3849 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 3850 3851 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 3852 * are received by the HBA 3853 */ 3854 sig_freq = phba->cgn_sig_freq; 3855 3856 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 3857 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3858 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 3859 cp->cgn_alarm_freq = cpu_to_le16(sig_freq); 3860 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3861 } 3862 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 3863 cp->cgn_info_crc = cpu_to_le32(crc); 3864 return; 3865 3866 out_no_support: 3867 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3868 phba->cgn_sig_freq = 0; 3869 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3870 } 3871 3872 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3873 FC_LS_TLV_DTAG_INIT); 3874 3875 /** 3876 * lpfc_cmpl_els_edc - Completion callback function for EDC 3877 * @phba: pointer to lpfc hba data structure. 3878 * @cmdiocb: pointer to lpfc command iocb data structure. 3879 * @rspiocb: pointer to lpfc response iocb data structure. 3880 * 3881 * This routine is the completion callback function for issuing the Exchange 3882 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3883 * notify the FPort of its Congestion and Link Fault capabilities. This 3884 * routine parses the FPort's response and decides on the least common 3885 * values applicable to both FPort and NPort for Warnings and Alarms that 3886 * are communicated via hardware signals. 3887 **/ 3888 static void 3889 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3890 struct lpfc_iocbq *rspiocb) 3891 { 3892 IOCB_t *irsp; 3893 struct fc_els_edc_resp *edc_rsp; 3894 struct fc_tlv_desc *tlv; 3895 struct fc_diag_cg_sig_desc *pcgd; 3896 struct fc_diag_lnkflt_desc *plnkflt; 3897 struct lpfc_dmabuf *pcmd, *prsp; 3898 const char *dtag_nm; 3899 u32 *pdata, dtag; 3900 int desc_cnt = 0, bytes_remain; 3901 bool rcv_cap_desc = false; 3902 struct lpfc_nodelist *ndlp; 3903 3904 irsp = &rspiocb->iocb; 3905 ndlp = cmdiocb->context1; 3906 3907 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3908 "EDC cmpl: status:x%x/x%x did:x%x", 3909 irsp->ulpStatus, irsp->un.ulpWord[4], 3910 irsp->un.elsreq64.remoteID); 3911 3912 /* ELS cmd tag <ulpIoTag> completes */ 3913 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3914 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3915 irsp->ulpIoTag, irsp->ulpStatus, 3916 irsp->un.ulpWord[4], irsp->ulpTimeout); 3917 3918 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3919 if (!pcmd) 3920 goto out; 3921 3922 pdata = (u32 *)pcmd->virt; 3923 if (!pdata) 3924 goto out; 3925 3926 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3927 if (irsp->ulpStatus) 3928 goto out; 3929 3930 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3931 if (!prsp) 3932 goto out; 3933 3934 edc_rsp = prsp->virt; 3935 if (!edc_rsp) 3936 goto out; 3937 3938 /* ELS cmd tag <ulpIoTag> completes */ 3939 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3940 "4676 Fabric EDC Rsp: " 3941 "0x%02x, 0x%08x\n", 3942 edc_rsp->acc_hdr.la_cmd, 3943 be32_to_cpu(edc_rsp->desc_list_len)); 3944 3945 /* 3946 * Payload length in bytes is the response descriptor list 3947 * length minus the 12 bytes of Link Service Request 3948 * Information descriptor in the reply. 3949 */ 3950 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 3951 sizeof(struct fc_els_lsri_desc); 3952 if (bytes_remain <= 0) 3953 goto out; 3954 3955 tlv = edc_rsp->desc; 3956 3957 /* 3958 * cycle through EDC diagnostic descriptors to find the 3959 * congestion signaling capability descriptor 3960 */ 3961 while (bytes_remain) { 3962 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 3963 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 3964 "6461 Truncated TLV hdr on " 3965 "Diagnostic descriptor[%d]\n", 3966 desc_cnt); 3967 goto out; 3968 } 3969 3970 dtag = be32_to_cpu(tlv->desc_tag); 3971 switch (dtag) { 3972 case ELS_DTAG_LNK_FAULT_CAP: 3973 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 3974 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 3975 sizeof(struct fc_diag_lnkflt_desc)) { 3976 lpfc_printf_log( 3977 phba, KERN_WARNING, LOG_CGN_MGMT, 3978 "6462 Truncated Link Fault Diagnostic " 3979 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 3980 desc_cnt, bytes_remain, 3981 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 3982 sizeof(struct fc_diag_cg_sig_desc)); 3983 goto out; 3984 } 3985 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 3986 lpfc_printf_log( 3987 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3988 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 3989 "0x%08x 0x%08x 0x%08x\n", 3990 be32_to_cpu(plnkflt->desc_tag), 3991 be32_to_cpu(plnkflt->desc_len), 3992 be32_to_cpu( 3993 plnkflt->degrade_activate_threshold), 3994 be32_to_cpu( 3995 plnkflt->degrade_deactivate_threshold), 3996 be32_to_cpu(plnkflt->fec_degrade_interval)); 3997 break; 3998 case ELS_DTAG_CG_SIGNAL_CAP: 3999 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4000 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4001 sizeof(struct fc_diag_cg_sig_desc)) { 4002 lpfc_printf_log( 4003 phba, KERN_WARNING, LOG_CGN_MGMT, 4004 "6463 Truncated Cgn Signal Diagnostic " 4005 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4006 desc_cnt, bytes_remain, 4007 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4008 sizeof(struct fc_diag_cg_sig_desc)); 4009 goto out; 4010 } 4011 4012 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4013 lpfc_printf_log( 4014 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4015 "4616 CGN Desc Data: 0x%08x 0x%08x " 4016 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4017 be32_to_cpu(pcgd->desc_tag), 4018 be32_to_cpu(pcgd->desc_len), 4019 be32_to_cpu(pcgd->xmt_signal_capability), 4020 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4021 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4022 be32_to_cpu(pcgd->rcv_signal_capability), 4023 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4024 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4025 4026 /* Compare driver and Fport capabilities and choose 4027 * least common. 4028 */ 4029 lpfc_least_capable_settings(phba, pcgd); 4030 rcv_cap_desc = true; 4031 break; 4032 default: 4033 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4034 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4035 "4919 unknown Diagnostic " 4036 "Descriptor[%d]: tag x%x (%s)\n", 4037 desc_cnt, dtag, dtag_nm); 4038 } 4039 4040 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4041 tlv = fc_tlv_next_desc(tlv); 4042 desc_cnt++; 4043 } 4044 4045 out: 4046 if (!rcv_cap_desc) { 4047 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4048 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4049 phba->cgn_sig_freq = 0; 4050 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4051 "4202 EDC rsp error - sending RDF " 4052 "for FPIN only.\n"); 4053 } 4054 4055 lpfc_config_cgn_signal(phba); 4056 4057 /* Check to see if link went down during discovery */ 4058 lpfc_els_chk_latt(phba->pport); 4059 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4060 "EDC Cmpl: did:x%x refcnt %d", 4061 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4062 lpfc_els_free_iocb(phba, cmdiocb); 4063 lpfc_nlp_put(ndlp); 4064 } 4065 4066 static void 4067 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4068 { 4069 /* We are assuming cgd was zero'ed before calling this routine */ 4070 4071 /* Configure the congestion detection capability */ 4072 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4073 4074 /* Descriptor len doesn't include the tag or len fields. */ 4075 cgd->desc_len = cpu_to_be32( 4076 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4077 4078 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4079 * xmt_signal_frequency.count already set to 0. 4080 * xmt_signal_frequency.units already set to 0. 4081 */ 4082 4083 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4084 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4085 * rcv_signal_frequency.count already set to 0. 4086 * rcv_signal_frequency.units already set to 0. 4087 */ 4088 phba->cgn_sig_freq = 0; 4089 return; 4090 } 4091 switch (phba->cgn_reg_signal) { 4092 case EDC_CG_SIG_WARN_ONLY: 4093 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4094 break; 4095 case EDC_CG_SIG_WARN_ALARM: 4096 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4097 break; 4098 default: 4099 /* rcv_signal_capability left 0 thus no support */ 4100 break; 4101 } 4102 4103 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4104 * the completion we settle on the higher frequency. 4105 */ 4106 cgd->rcv_signal_frequency.count = 4107 cpu_to_be16(lpfc_fabric_cgn_frequency); 4108 cgd->rcv_signal_frequency.units = 4109 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4110 } 4111 4112 /** 4113 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4114 * @vport: pointer to a host virtual N_Port data structure. 4115 * @retry: retry counter for the command iocb. 4116 * 4117 * This routine issues an ELS EDC to the F-Port Controller to communicate 4118 * this N_Port's support of hardware signals in its Congestion 4119 * Capabilities Descriptor. 4120 * 4121 * Note: This routine does not check if one or more signals are 4122 * set in the cgn_reg_signal parameter. The caller makes the 4123 * decision to enforce cgn_reg_signal as nonzero or zero depending 4124 * on the conditions. During Fabric requests, the driver 4125 * requires cgn_reg_signals to be nonzero. But a dynamic request 4126 * to set the congestion mode to OFF from Monitor or Manage 4127 * would correctly issue an EDC with no signals enabled to 4128 * turn off switch functionality and then update the FW. 4129 * 4130 * Return code 4131 * 0 - Successfully issued edc command 4132 * 1 - Failed to issue edc command 4133 **/ 4134 int 4135 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4136 { 4137 struct lpfc_hba *phba = vport->phba; 4138 struct lpfc_iocbq *elsiocb; 4139 struct lpfc_els_edc_req *edc_req; 4140 struct fc_diag_cg_sig_desc *cgn_desc; 4141 u16 cmdsize; 4142 struct lpfc_nodelist *ndlp; 4143 u8 *pcmd = NULL; 4144 u32 edc_req_size, cgn_desc_size; 4145 int rc; 4146 4147 if (vport->port_type == LPFC_NPIV_PORT) 4148 return -EACCES; 4149 4150 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4151 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4152 return -ENODEV; 4153 4154 /* If HBA doesn't support signals, drop into RDF */ 4155 if (!phba->cgn_init_reg_signal) 4156 goto try_rdf; 4157 4158 edc_req_size = sizeof(struct fc_els_edc); 4159 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4160 cmdsize = edc_req_size + cgn_desc_size; 4161 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4162 ndlp->nlp_DID, ELS_CMD_EDC); 4163 if (!elsiocb) 4164 goto try_rdf; 4165 4166 /* Configure the payload for the supported Diagnostics capabilities. */ 4167 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 4168 memset(pcmd, 0, cmdsize); 4169 edc_req = (struct lpfc_els_edc_req *)pcmd; 4170 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4171 edc_req->edc.edc_cmd = ELS_EDC; 4172 4173 cgn_desc = &edc_req->cgn_desc; 4174 4175 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4176 4177 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4178 4179 lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, 4180 "4623 Xmit EDC to remote " 4181 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4182 ndlp->nlp_DID, phba->cgn_reg_signal, 4183 phba->cgn_reg_fpin); 4184 4185 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 4186 elsiocb->context1 = lpfc_nlp_get(ndlp); 4187 if (!elsiocb->context1) { 4188 lpfc_els_free_iocb(phba, elsiocb); 4189 return -EIO; 4190 } 4191 4192 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4193 "Issue EDC: did:x%x refcnt %d", 4194 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4195 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4196 if (rc == IOCB_ERROR) { 4197 /* The additional lpfc_nlp_put will cause the following 4198 * lpfc_els_free_iocb routine to trigger the rlease of 4199 * the node. 4200 */ 4201 lpfc_els_free_iocb(phba, elsiocb); 4202 lpfc_nlp_put(ndlp); 4203 goto try_rdf; 4204 } 4205 return 0; 4206 try_rdf: 4207 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4208 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4209 rc = lpfc_issue_els_rdf(vport, 0); 4210 return rc; 4211 } 4212 4213 /** 4214 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4215 * @vport: pointer to a host virtual N_Port data structure. 4216 * @nlp: pointer to a node-list data structure. 4217 * 4218 * This routine cancels the timer with a delayed IOCB-command retry for 4219 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4220 * removes the ELS retry event if it presents. In addition, if the 4221 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4222 * commands are sent for the @vport's nodes that require issuing discovery 4223 * ADISC. 4224 **/ 4225 void 4226 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4227 { 4228 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4229 struct lpfc_work_evt *evtp; 4230 4231 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4232 return; 4233 spin_lock_irq(&nlp->lock); 4234 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4235 spin_unlock_irq(&nlp->lock); 4236 del_timer_sync(&nlp->nlp_delayfunc); 4237 nlp->nlp_last_elscmd = 0; 4238 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4239 list_del_init(&nlp->els_retry_evt.evt_listp); 4240 /* Decrement nlp reference count held for the delayed retry */ 4241 evtp = &nlp->els_retry_evt; 4242 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4243 } 4244 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4245 spin_lock_irq(&nlp->lock); 4246 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4247 spin_unlock_irq(&nlp->lock); 4248 if (vport->num_disc_nodes) { 4249 if (vport->port_state < LPFC_VPORT_READY) { 4250 /* Check if there are more ADISCs to be sent */ 4251 lpfc_more_adisc(vport); 4252 } else { 4253 /* Check if there are more PLOGIs to be sent */ 4254 lpfc_more_plogi(vport); 4255 if (vport->num_disc_nodes == 0) { 4256 spin_lock_irq(shost->host_lock); 4257 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4258 spin_unlock_irq(shost->host_lock); 4259 lpfc_can_disctmo(vport); 4260 lpfc_end_rscn(vport); 4261 } 4262 } 4263 } 4264 } 4265 return; 4266 } 4267 4268 /** 4269 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4270 * @t: pointer to the timer function associated data (ndlp). 4271 * 4272 * This routine is invoked by the ndlp delayed-function timer to check 4273 * whether there is any pending ELS retry event(s) with the node. If not, it 4274 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4275 * adds the delayed events to the HBA work list and invokes the 4276 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4277 * event. Note that lpfc_nlp_get() is called before posting the event to 4278 * the work list to hold reference count of ndlp so that it guarantees the 4279 * reference to ndlp will still be available when the worker thread gets 4280 * to the event associated with the ndlp. 4281 **/ 4282 void 4283 lpfc_els_retry_delay(struct timer_list *t) 4284 { 4285 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4286 struct lpfc_vport *vport = ndlp->vport; 4287 struct lpfc_hba *phba = vport->phba; 4288 unsigned long flags; 4289 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4290 4291 spin_lock_irqsave(&phba->hbalock, flags); 4292 if (!list_empty(&evtp->evt_listp)) { 4293 spin_unlock_irqrestore(&phba->hbalock, flags); 4294 return; 4295 } 4296 4297 /* We need to hold the node by incrementing the reference 4298 * count until the queued work is done 4299 */ 4300 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4301 if (evtp->evt_arg1) { 4302 evtp->evt = LPFC_EVT_ELS_RETRY; 4303 list_add_tail(&evtp->evt_listp, &phba->work_list); 4304 lpfc_worker_wake_up(phba); 4305 } 4306 spin_unlock_irqrestore(&phba->hbalock, flags); 4307 return; 4308 } 4309 4310 /** 4311 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4312 * @ndlp: pointer to a node-list data structure. 4313 * 4314 * This routine is the worker-thread handler for processing the @ndlp delayed 4315 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4316 * the last ELS command from the associated ndlp and invokes the proper ELS 4317 * function according to the delayed ELS command to retry the command. 4318 **/ 4319 void 4320 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4321 { 4322 struct lpfc_vport *vport = ndlp->vport; 4323 uint32_t cmd, retry; 4324 4325 spin_lock_irq(&ndlp->lock); 4326 cmd = ndlp->nlp_last_elscmd; 4327 ndlp->nlp_last_elscmd = 0; 4328 4329 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4330 spin_unlock_irq(&ndlp->lock); 4331 return; 4332 } 4333 4334 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4335 spin_unlock_irq(&ndlp->lock); 4336 /* 4337 * If a discovery event readded nlp_delayfunc after timer 4338 * firing and before processing the timer, cancel the 4339 * nlp_delayfunc. 4340 */ 4341 del_timer_sync(&ndlp->nlp_delayfunc); 4342 retry = ndlp->nlp_retry; 4343 ndlp->nlp_retry = 0; 4344 4345 switch (cmd) { 4346 case ELS_CMD_FLOGI: 4347 lpfc_issue_els_flogi(vport, ndlp, retry); 4348 break; 4349 case ELS_CMD_PLOGI: 4350 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4351 ndlp->nlp_prev_state = ndlp->nlp_state; 4352 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4353 } 4354 break; 4355 case ELS_CMD_ADISC: 4356 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4357 ndlp->nlp_prev_state = ndlp->nlp_state; 4358 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4359 } 4360 break; 4361 case ELS_CMD_PRLI: 4362 case ELS_CMD_NVMEPRLI: 4363 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4364 ndlp->nlp_prev_state = ndlp->nlp_state; 4365 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4366 } 4367 break; 4368 case ELS_CMD_LOGO: 4369 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4370 ndlp->nlp_prev_state = ndlp->nlp_state; 4371 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4372 } 4373 break; 4374 case ELS_CMD_FDISC: 4375 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4376 lpfc_issue_els_fdisc(vport, ndlp, retry); 4377 break; 4378 } 4379 return; 4380 } 4381 4382 /** 4383 * lpfc_link_reset - Issue link reset 4384 * @vport: pointer to a virtual N_Port data structure. 4385 * 4386 * This routine performs link reset by sending INIT_LINK mailbox command. 4387 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4388 * INIT_LINK mailbox command. 4389 * 4390 * Return code 4391 * 0 - Link reset initiated successfully 4392 * 1 - Failed to initiate link reset 4393 **/ 4394 int 4395 lpfc_link_reset(struct lpfc_vport *vport) 4396 { 4397 struct lpfc_hba *phba = vport->phba; 4398 LPFC_MBOXQ_t *mbox; 4399 uint32_t control; 4400 int rc; 4401 4402 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4403 "2851 Attempt link reset\n"); 4404 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4405 if (!mbox) { 4406 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4407 "2852 Failed to allocate mbox memory"); 4408 return 1; 4409 } 4410 4411 /* Enable Link attention interrupts */ 4412 if (phba->sli_rev <= LPFC_SLI_REV3) { 4413 spin_lock_irq(&phba->hbalock); 4414 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4415 control = readl(phba->HCregaddr); 4416 control |= HC_LAINT_ENA; 4417 writel(control, phba->HCregaddr); 4418 readl(phba->HCregaddr); /* flush */ 4419 spin_unlock_irq(&phba->hbalock); 4420 } 4421 4422 lpfc_init_link(phba, mbox, phba->cfg_topology, 4423 phba->cfg_link_speed); 4424 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4425 mbox->vport = vport; 4426 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4427 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4429 "2853 Failed to issue INIT_LINK " 4430 "mbox command, rc:x%x\n", rc); 4431 mempool_free(mbox, phba->mbox_mem_pool); 4432 return 1; 4433 } 4434 4435 return 0; 4436 } 4437 4438 /** 4439 * lpfc_els_retry - Make retry decision on an els command iocb 4440 * @phba: pointer to lpfc hba data structure. 4441 * @cmdiocb: pointer to lpfc command iocb data structure. 4442 * @rspiocb: pointer to lpfc response iocb data structure. 4443 * 4444 * This routine makes a retry decision on an ELS command IOCB, which has 4445 * failed. The following ELS IOCBs use this function for retrying the command 4446 * when previously issued command responsed with error status: FLOGI, PLOGI, 4447 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4448 * returned error status, it makes the decision whether a retry shall be 4449 * issued for the command, and whether a retry shall be made immediately or 4450 * delayed. In the former case, the corresponding ELS command issuing-function 4451 * is called to retry the command. In the later case, the ELS command shall 4452 * be posted to the ndlp delayed event and delayed function timer set to the 4453 * ndlp for the delayed command issusing. 4454 * 4455 * Return code 4456 * 0 - No retry of els command is made 4457 * 1 - Immediate or delayed retry of els command is made 4458 **/ 4459 static int 4460 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4461 struct lpfc_iocbq *rspiocb) 4462 { 4463 struct lpfc_vport *vport = cmdiocb->vport; 4464 IOCB_t *irsp = &rspiocb->iocb; 4465 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4466 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4467 uint32_t *elscmd; 4468 struct ls_rjt stat; 4469 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4470 int logerr = 0; 4471 uint32_t cmd = 0; 4472 uint32_t did; 4473 int link_reset = 0, rc; 4474 4475 4476 /* Note: context2 may be 0 for internal driver abort 4477 * of delays ELS command. 4478 */ 4479 4480 if (pcmd && pcmd->virt) { 4481 elscmd = (uint32_t *) (pcmd->virt); 4482 cmd = *elscmd++; 4483 } 4484 4485 if (ndlp) 4486 did = ndlp->nlp_DID; 4487 else { 4488 /* We should only hit this case for retrying PLOGI */ 4489 did = irsp->un.elsreq64.remoteID; 4490 ndlp = lpfc_findnode_did(vport, did); 4491 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4492 return 0; 4493 } 4494 4495 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4496 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4497 *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did); 4498 4499 switch (irsp->ulpStatus) { 4500 case IOSTAT_FCP_RSP_ERROR: 4501 break; 4502 case IOSTAT_REMOTE_STOP: 4503 if (phba->sli_rev == LPFC_SLI_REV4) { 4504 /* This IO was aborted by the target, we don't 4505 * know the rxid and because we did not send the 4506 * ABTS we cannot generate and RRQ. 4507 */ 4508 lpfc_set_rrq_active(phba, ndlp, 4509 cmdiocb->sli4_lxritag, 0, 0); 4510 } 4511 break; 4512 case IOSTAT_LOCAL_REJECT: 4513 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 4514 case IOERR_LOOP_OPEN_FAILURE: 4515 if (cmd == ELS_CMD_FLOGI) { 4516 if (PCI_DEVICE_ID_HORNET == 4517 phba->pcidev->device) { 4518 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4519 phba->pport->fc_myDID = 0; 4520 phba->alpa_map[0] = 0; 4521 phba->alpa_map[1] = 0; 4522 } 4523 } 4524 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4525 delay = 1000; 4526 retry = 1; 4527 break; 4528 4529 case IOERR_ILLEGAL_COMMAND: 4530 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4531 "0124 Retry illegal cmd x%x " 4532 "retry:x%x delay:x%x\n", 4533 cmd, cmdiocb->retry, delay); 4534 retry = 1; 4535 /* All command's retry policy */ 4536 maxretry = 8; 4537 if (cmdiocb->retry > 2) 4538 delay = 1000; 4539 break; 4540 4541 case IOERR_NO_RESOURCES: 4542 logerr = 1; /* HBA out of resources */ 4543 retry = 1; 4544 if (cmdiocb->retry > 100) 4545 delay = 100; 4546 maxretry = 250; 4547 break; 4548 4549 case IOERR_ILLEGAL_FRAME: 4550 delay = 100; 4551 retry = 1; 4552 break; 4553 4554 case IOERR_INVALID_RPI: 4555 if (cmd == ELS_CMD_PLOGI && 4556 did == NameServer_DID) { 4557 /* Continue forever if plogi to */ 4558 /* the nameserver fails */ 4559 maxretry = 0; 4560 delay = 100; 4561 } 4562 retry = 1; 4563 break; 4564 4565 case IOERR_SEQUENCE_TIMEOUT: 4566 if (cmd == ELS_CMD_PLOGI && 4567 did == NameServer_DID && 4568 (cmdiocb->retry + 1) == maxretry) { 4569 /* Reset the Link */ 4570 link_reset = 1; 4571 break; 4572 } 4573 retry = 1; 4574 delay = 100; 4575 break; 4576 case IOERR_SLI_ABORTED: 4577 /* Retry ELS PLOGI command? 4578 * Possibly the rport just wasn't ready. 4579 */ 4580 if (cmd == ELS_CMD_PLOGI) { 4581 /* No retry if state change */ 4582 if (ndlp && 4583 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4584 goto out_retry; 4585 retry = 1; 4586 maxretry = 2; 4587 } 4588 break; 4589 } 4590 break; 4591 4592 case IOSTAT_NPORT_RJT: 4593 case IOSTAT_FABRIC_RJT: 4594 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 4595 retry = 1; 4596 break; 4597 } 4598 break; 4599 4600 case IOSTAT_NPORT_BSY: 4601 case IOSTAT_FABRIC_BSY: 4602 logerr = 1; /* Fabric / Remote NPort out of resources */ 4603 retry = 1; 4604 break; 4605 4606 case IOSTAT_LS_RJT: 4607 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 4608 /* Added for Vendor specifc support 4609 * Just keep retrying for these Rsn / Exp codes 4610 */ 4611 if ((vport->fc_flag & FC_PT2PT) && 4612 cmd == ELS_CMD_NVMEPRLI) { 4613 switch (stat.un.b.lsRjtRsnCode) { 4614 case LSRJT_UNABLE_TPC: 4615 case LSRJT_INVALID_CMD: 4616 case LSRJT_LOGICAL_ERR: 4617 case LSRJT_CMD_UNSUPPORTED: 4618 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4619 "0168 NVME PRLI LS_RJT " 4620 "reason %x port doesn't " 4621 "support NVME, disabling NVME\n", 4622 stat.un.b.lsRjtRsnCode); 4623 retry = 0; 4624 vport->fc_flag |= FC_PT2PT_NO_NVME; 4625 goto out_retry; 4626 } 4627 } 4628 switch (stat.un.b.lsRjtRsnCode) { 4629 case LSRJT_UNABLE_TPC: 4630 /* The driver has a VALID PLOGI but the rport has 4631 * rejected the PRLI - can't do it now. Delay 4632 * for 1 second and try again. 4633 * 4634 * However, if explanation is REQ_UNSUPPORTED there's 4635 * no point to retry PRLI. 4636 */ 4637 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4638 stat.un.b.lsRjtRsnCodeExp != 4639 LSEXP_REQ_UNSUPPORTED) { 4640 delay = 1000; 4641 maxretry = lpfc_max_els_tries + 1; 4642 retry = 1; 4643 break; 4644 } 4645 4646 /* Legacy bug fix code for targets with PLOGI delays. */ 4647 if (stat.un.b.lsRjtRsnCodeExp == 4648 LSEXP_CMD_IN_PROGRESS) { 4649 if (cmd == ELS_CMD_PLOGI) { 4650 delay = 1000; 4651 maxretry = 48; 4652 } 4653 retry = 1; 4654 break; 4655 } 4656 if (stat.un.b.lsRjtRsnCodeExp == 4657 LSEXP_CANT_GIVE_DATA) { 4658 if (cmd == ELS_CMD_PLOGI) { 4659 delay = 1000; 4660 maxretry = 48; 4661 } 4662 retry = 1; 4663 break; 4664 } 4665 if (cmd == ELS_CMD_PLOGI) { 4666 delay = 1000; 4667 maxretry = lpfc_max_els_tries + 1; 4668 retry = 1; 4669 break; 4670 } 4671 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4672 (cmd == ELS_CMD_FDISC) && 4673 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4674 lpfc_printf_vlog(vport, KERN_ERR, 4675 LOG_TRACE_EVENT, 4676 "0125 FDISC Failed (x%x). " 4677 "Fabric out of resources\n", 4678 stat.un.lsRjtError); 4679 lpfc_vport_set_state(vport, 4680 FC_VPORT_NO_FABRIC_RSCS); 4681 } 4682 break; 4683 4684 case LSRJT_LOGICAL_BSY: 4685 if ((cmd == ELS_CMD_PLOGI) || 4686 (cmd == ELS_CMD_PRLI) || 4687 (cmd == ELS_CMD_NVMEPRLI)) { 4688 delay = 1000; 4689 maxretry = 48; 4690 } else if (cmd == ELS_CMD_FDISC) { 4691 /* FDISC retry policy */ 4692 maxretry = 48; 4693 if (cmdiocb->retry >= 32) 4694 delay = 1000; 4695 } 4696 retry = 1; 4697 break; 4698 4699 case LSRJT_LOGICAL_ERR: 4700 /* There are some cases where switches return this 4701 * error when they are not ready and should be returning 4702 * Logical Busy. We should delay every time. 4703 */ 4704 if (cmd == ELS_CMD_FDISC && 4705 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4706 maxretry = 3; 4707 delay = 1000; 4708 retry = 1; 4709 } else if (cmd == ELS_CMD_FLOGI && 4710 stat.un.b.lsRjtRsnCodeExp == 4711 LSEXP_NOTHING_MORE) { 4712 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4713 retry = 1; 4714 lpfc_printf_vlog(vport, KERN_ERR, 4715 LOG_TRACE_EVENT, 4716 "0820 FLOGI Failed (x%x). " 4717 "BBCredit Not Supported\n", 4718 stat.un.lsRjtError); 4719 } 4720 break; 4721 4722 case LSRJT_PROTOCOL_ERR: 4723 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4724 (cmd == ELS_CMD_FDISC) && 4725 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4726 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4727 ) { 4728 lpfc_printf_vlog(vport, KERN_ERR, 4729 LOG_TRACE_EVENT, 4730 "0122 FDISC Failed (x%x). " 4731 "Fabric Detected Bad WWN\n", 4732 stat.un.lsRjtError); 4733 lpfc_vport_set_state(vport, 4734 FC_VPORT_FABRIC_REJ_WWN); 4735 } 4736 break; 4737 case LSRJT_VENDOR_UNIQUE: 4738 if ((stat.un.b.vendorUnique == 0x45) && 4739 (cmd == ELS_CMD_FLOGI)) { 4740 goto out_retry; 4741 } 4742 break; 4743 case LSRJT_CMD_UNSUPPORTED: 4744 /* lpfc nvmet returns this type of LS_RJT when it 4745 * receives an FCP PRLI because lpfc nvmet only 4746 * support NVME. ELS request is terminated for FCP4 4747 * on this rport. 4748 */ 4749 if (stat.un.b.lsRjtRsnCodeExp == 4750 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4751 spin_lock_irq(&ndlp->lock); 4752 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4753 spin_unlock_irq(&ndlp->lock); 4754 retry = 0; 4755 goto out_retry; 4756 } 4757 break; 4758 } 4759 break; 4760 4761 case IOSTAT_INTERMED_RSP: 4762 case IOSTAT_BA_RJT: 4763 break; 4764 4765 default: 4766 break; 4767 } 4768 4769 if (link_reset) { 4770 rc = lpfc_link_reset(vport); 4771 if (rc) { 4772 /* Do not give up. Retry PLOGI one more time and attempt 4773 * link reset if PLOGI fails again. 4774 */ 4775 retry = 1; 4776 delay = 100; 4777 goto out_retry; 4778 } 4779 return 1; 4780 } 4781 4782 if (did == FDMI_DID) 4783 retry = 1; 4784 4785 if ((cmd == ELS_CMD_FLOGI) && 4786 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4787 !lpfc_error_lost_link(irsp)) { 4788 /* FLOGI retry policy */ 4789 retry = 1; 4790 /* retry FLOGI forever */ 4791 if (phba->link_flag != LS_LOOPBACK_MODE) 4792 maxretry = 0; 4793 else 4794 maxretry = 2; 4795 4796 if (cmdiocb->retry >= 100) 4797 delay = 5000; 4798 else if (cmdiocb->retry >= 32) 4799 delay = 1000; 4800 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4801 /* retry FDISCs every second up to devloss */ 4802 retry = 1; 4803 maxretry = vport->cfg_devloss_tmo; 4804 delay = 1000; 4805 } 4806 4807 cmdiocb->retry++; 4808 if (maxretry && (cmdiocb->retry >= maxretry)) { 4809 phba->fc_stat.elsRetryExceeded++; 4810 retry = 0; 4811 } 4812 4813 if ((vport->load_flag & FC_UNLOADING) != 0) 4814 retry = 0; 4815 4816 out_retry: 4817 if (retry) { 4818 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4819 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4820 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4821 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4822 "2849 Stop retry ELS command " 4823 "x%x to remote NPORT x%x, " 4824 "Data: x%x x%x\n", cmd, did, 4825 cmdiocb->retry, delay); 4826 return 0; 4827 } 4828 } 4829 4830 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4831 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4832 "0107 Retry ELS command x%x to remote " 4833 "NPORT x%x Data: x%x x%x\n", 4834 cmd, did, cmdiocb->retry, delay); 4835 4836 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4837 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4838 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4839 IOERR_NO_RESOURCES))) { 4840 /* Don't reset timer for no resources */ 4841 4842 /* If discovery / RSCN timer is running, reset it */ 4843 if (timer_pending(&vport->fc_disctmo) || 4844 (vport->fc_flag & FC_RSCN_MODE)) 4845 lpfc_set_disctmo(vport); 4846 } 4847 4848 phba->fc_stat.elsXmitRetry++; 4849 if (ndlp && delay) { 4850 phba->fc_stat.elsDelayRetry++; 4851 ndlp->nlp_retry = cmdiocb->retry; 4852 4853 /* delay is specified in milliseconds */ 4854 mod_timer(&ndlp->nlp_delayfunc, 4855 jiffies + msecs_to_jiffies(delay)); 4856 spin_lock_irq(&ndlp->lock); 4857 ndlp->nlp_flag |= NLP_DELAY_TMO; 4858 spin_unlock_irq(&ndlp->lock); 4859 4860 ndlp->nlp_prev_state = ndlp->nlp_state; 4861 if ((cmd == ELS_CMD_PRLI) || 4862 (cmd == ELS_CMD_NVMEPRLI)) 4863 lpfc_nlp_set_state(vport, ndlp, 4864 NLP_STE_PRLI_ISSUE); 4865 else if (cmd != ELS_CMD_ADISC) 4866 lpfc_nlp_set_state(vport, ndlp, 4867 NLP_STE_NPR_NODE); 4868 ndlp->nlp_last_elscmd = cmd; 4869 4870 return 1; 4871 } 4872 switch (cmd) { 4873 case ELS_CMD_FLOGI: 4874 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4875 return 1; 4876 case ELS_CMD_FDISC: 4877 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4878 return 1; 4879 case ELS_CMD_PLOGI: 4880 if (ndlp) { 4881 ndlp->nlp_prev_state = ndlp->nlp_state; 4882 lpfc_nlp_set_state(vport, ndlp, 4883 NLP_STE_PLOGI_ISSUE); 4884 } 4885 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4886 return 1; 4887 case ELS_CMD_ADISC: 4888 ndlp->nlp_prev_state = ndlp->nlp_state; 4889 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4890 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4891 return 1; 4892 case ELS_CMD_PRLI: 4893 case ELS_CMD_NVMEPRLI: 4894 ndlp->nlp_prev_state = ndlp->nlp_state; 4895 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4896 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4897 return 1; 4898 case ELS_CMD_LOGO: 4899 ndlp->nlp_prev_state = ndlp->nlp_state; 4900 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4901 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4902 return 1; 4903 } 4904 } 4905 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4906 if (logerr) { 4907 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4908 "0137 No retry ELS command x%x to remote " 4909 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4910 cmd, did, irsp->ulpStatus, 4911 irsp->un.ulpWord[4]); 4912 } 4913 else { 4914 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4915 "0108 No retry ELS command x%x to remote " 4916 "NPORT x%x Retried:%d Error:x%x/%x\n", 4917 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4918 irsp->un.ulpWord[4]); 4919 } 4920 return 0; 4921 } 4922 4923 /** 4924 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4925 * @phba: pointer to lpfc hba data structure. 4926 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4927 * 4928 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4929 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4930 * checks to see whether there is a lpfc DMA buffer associated with the 4931 * response of the command IOCB. If so, it will be released before releasing 4932 * the lpfc DMA buffer associated with the IOCB itself. 4933 * 4934 * Return code 4935 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4936 **/ 4937 static int 4938 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4939 { 4940 struct lpfc_dmabuf *buf_ptr; 4941 4942 /* Free the response before processing the command. */ 4943 if (!list_empty(&buf_ptr1->list)) { 4944 list_remove_head(&buf_ptr1->list, buf_ptr, 4945 struct lpfc_dmabuf, 4946 list); 4947 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4948 kfree(buf_ptr); 4949 } 4950 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4951 kfree(buf_ptr1); 4952 return 0; 4953 } 4954 4955 /** 4956 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4957 * @phba: pointer to lpfc hba data structure. 4958 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4959 * 4960 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4961 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4962 * pool. 4963 * 4964 * Return code 4965 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4966 **/ 4967 static int 4968 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4969 { 4970 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4971 kfree(buf_ptr); 4972 return 0; 4973 } 4974 4975 /** 4976 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4977 * @phba: pointer to lpfc hba data structure. 4978 * @elsiocb: pointer to lpfc els command iocb data structure. 4979 * 4980 * This routine frees a command IOCB and its associated resources. The 4981 * command IOCB data structure contains the reference to various associated 4982 * resources, these fields must be set to NULL if the associated reference 4983 * not present: 4984 * context1 - reference to ndlp 4985 * context2 - reference to cmd 4986 * context2->next - reference to rsp 4987 * context3 - reference to bpl 4988 * 4989 * It first properly decrements the reference count held on ndlp for the 4990 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4991 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4992 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4993 * adds the DMA buffer the @phba data structure for the delayed release. 4994 * If reference to the Buffer Pointer List (BPL) is present, the 4995 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4996 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4997 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4998 * 4999 * Return code 5000 * 0 - Success (currently, always return 0) 5001 **/ 5002 int 5003 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5004 { 5005 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5006 5007 /* The I/O iocb is complete. Clear the context1 data. */ 5008 elsiocb->context1 = NULL; 5009 5010 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 5011 if (elsiocb->context2) { 5012 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 5013 /* Firmware could still be in progress of DMAing 5014 * payload, so don't free data buffer till after 5015 * a hbeat. 5016 */ 5017 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 5018 buf_ptr = elsiocb->context2; 5019 elsiocb->context2 = NULL; 5020 if (buf_ptr) { 5021 buf_ptr1 = NULL; 5022 spin_lock_irq(&phba->hbalock); 5023 if (!list_empty(&buf_ptr->list)) { 5024 list_remove_head(&buf_ptr->list, 5025 buf_ptr1, struct lpfc_dmabuf, 5026 list); 5027 INIT_LIST_HEAD(&buf_ptr1->list); 5028 list_add_tail(&buf_ptr1->list, 5029 &phba->elsbuf); 5030 phba->elsbuf_cnt++; 5031 } 5032 INIT_LIST_HEAD(&buf_ptr->list); 5033 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5034 phba->elsbuf_cnt++; 5035 spin_unlock_irq(&phba->hbalock); 5036 } 5037 } else { 5038 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 5039 lpfc_els_free_data(phba, buf_ptr1); 5040 elsiocb->context2 = NULL; 5041 } 5042 } 5043 5044 if (elsiocb->context3) { 5045 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 5046 lpfc_els_free_bpl(phba, buf_ptr); 5047 elsiocb->context3 = NULL; 5048 } 5049 lpfc_sli_release_iocbq(phba, elsiocb); 5050 return 0; 5051 } 5052 5053 /** 5054 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5055 * @phba: pointer to lpfc hba data structure. 5056 * @cmdiocb: pointer to lpfc command iocb data structure. 5057 * @rspiocb: pointer to lpfc response iocb data structure. 5058 * 5059 * This routine is the completion callback function to the Logout (LOGO) 5060 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5061 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5062 * release the ndlp if it has the last reference remaining (reference count 5063 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 5064 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5065 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5066 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5067 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5068 * IOCB data structure. 5069 **/ 5070 static void 5071 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5072 struct lpfc_iocbq *rspiocb) 5073 { 5074 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5075 struct lpfc_vport *vport = cmdiocb->vport; 5076 IOCB_t *irsp; 5077 5078 irsp = &rspiocb->iocb; 5079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5080 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5081 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 5082 /* ACC to LOGO completes to NPort <nlp_DID> */ 5083 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5084 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5085 "Data: x%x x%x x%x\n", 5086 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5087 ndlp->nlp_state, ndlp->nlp_rpi); 5088 5089 /* This clause allows the LOGO ACC to complete and free resources 5090 * for the Fabric Domain Controller. It does deliberately skip 5091 * the unreg_rpi and release rpi because some fabrics send RDP 5092 * requests after logging out from the initiator. 5093 */ 5094 if (ndlp->nlp_type & NLP_FABRIC && 5095 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5096 goto out; 5097 5098 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5099 5100 /* If PLOGI is being retried, PLOGI completion will cleanup the 5101 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5102 * progress on nodes discovered from last RSCN. 5103 */ 5104 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5105 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5106 goto out; 5107 5108 /* NPort Recovery mode or node is just allocated */ 5109 if (!lpfc_nlp_not_used(ndlp)) { 5110 /* A LOGO is completing and the node is in NPR state. 5111 * Just unregister the RPI because the node is still 5112 * required. 5113 */ 5114 lpfc_unreg_rpi(vport, ndlp); 5115 } else { 5116 /* Indicate the node has already released, should 5117 * not reference to it from within lpfc_els_free_iocb. 5118 */ 5119 cmdiocb->context1 = NULL; 5120 } 5121 } 5122 out: 5123 /* 5124 * The driver received a LOGO from the rport and has ACK'd it. 5125 * At this point, the driver is done so release the IOCB 5126 */ 5127 lpfc_els_free_iocb(phba, cmdiocb); 5128 lpfc_nlp_put(ndlp); 5129 } 5130 5131 /** 5132 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5133 * @phba: pointer to lpfc hba data structure. 5134 * @pmb: pointer to the driver internal queue element for mailbox command. 5135 * 5136 * This routine is the completion callback function for unregister default 5137 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5138 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5139 * decrements the ndlp reference count held for this completion callback 5140 * function. After that, it invokes the lpfc_nlp_not_used() to check 5141 * whether there is only one reference left on the ndlp. If so, it will 5142 * perform one more decrement and trigger the release of the ndlp. 5143 **/ 5144 void 5145 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5146 { 5147 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 5148 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 5149 u32 mbx_flag = pmb->mbox_flag; 5150 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5151 5152 pmb->ctx_buf = NULL; 5153 pmb->ctx_ndlp = NULL; 5154 5155 if (ndlp) { 5156 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5157 "0006 rpi x%x DID:%x flg:%x %d x%px " 5158 "mbx_cmd x%x mbx_flag x%x x%px\n", 5159 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5160 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5161 mbx_flag, pmb); 5162 5163 /* This ends the default/temporary RPI cleanup logic for this 5164 * ndlp and the node and rpi needs to be released. Free the rpi 5165 * first on an UNREG_LOGIN and then release the final 5166 * references. 5167 */ 5168 spin_lock_irq(&ndlp->lock); 5169 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5170 if (mbx_cmd == MBX_UNREG_LOGIN) 5171 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5172 spin_unlock_irq(&ndlp->lock); 5173 lpfc_nlp_put(ndlp); 5174 lpfc_drop_node(ndlp->vport, ndlp); 5175 } 5176 5177 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5178 kfree(mp); 5179 mempool_free(pmb, phba->mbox_mem_pool); 5180 return; 5181 } 5182 5183 /** 5184 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5185 * @phba: pointer to lpfc hba data structure. 5186 * @cmdiocb: pointer to lpfc command iocb data structure. 5187 * @rspiocb: pointer to lpfc response iocb data structure. 5188 * 5189 * This routine is the completion callback function for ELS Response IOCB 5190 * command. In normal case, this callback function just properly sets the 5191 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5192 * field in the command IOCB is not NULL, the referred mailbox command will 5193 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5194 * the IOCB. 5195 **/ 5196 static void 5197 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5198 struct lpfc_iocbq *rspiocb) 5199 { 5200 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5201 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5202 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5203 IOCB_t *irsp; 5204 LPFC_MBOXQ_t *mbox = NULL; 5205 struct lpfc_dmabuf *mp = NULL; 5206 5207 irsp = &rspiocb->iocb; 5208 5209 if (!vport) { 5210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5211 "3177 ELS response failed\n"); 5212 goto out; 5213 } 5214 if (cmdiocb->context_un.mbox) 5215 mbox = cmdiocb->context_un.mbox; 5216 5217 /* Check to see if link went down during discovery */ 5218 if (!ndlp || lpfc_els_chk_latt(vport)) { 5219 if (mbox) { 5220 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5221 if (mp) { 5222 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5223 kfree(mp); 5224 } 5225 mempool_free(mbox, phba->mbox_mem_pool); 5226 } 5227 goto out; 5228 } 5229 5230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5231 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5232 irsp->ulpStatus, irsp->un.ulpWord[4], 5233 cmdiocb->iocb.un.elsreq64.remoteID); 5234 /* ELS response tag <ulpIoTag> completes */ 5235 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5236 "0110 ELS response tag x%x completes " 5237 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n", 5238 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 5239 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 5240 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5241 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox); 5242 if (mbox) { 5243 if ((rspiocb->iocb.ulpStatus == 0) && 5244 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5245 if (!lpfc_unreg_rpi(vport, ndlp) && 5246 (!(vport->fc_flag & FC_PT2PT))) { 5247 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 5248 lpfc_printf_vlog(vport, KERN_INFO, 5249 LOG_DISCOVERY, 5250 "0314 PLOGI recov " 5251 "DID x%x " 5252 "Data: x%x x%x x%x\n", 5253 ndlp->nlp_DID, 5254 ndlp->nlp_state, 5255 ndlp->nlp_rpi, 5256 ndlp->nlp_flag); 5257 mp = mbox->ctx_buf; 5258 if (mp) { 5259 lpfc_mbuf_free(phba, mp->virt, 5260 mp->phys); 5261 kfree(mp); 5262 } 5263 mempool_free(mbox, phba->mbox_mem_pool); 5264 goto out; 5265 } 5266 } 5267 5268 /* Increment reference count to ndlp to hold the 5269 * reference to ndlp for the callback function. 5270 */ 5271 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5272 if (!mbox->ctx_ndlp) 5273 goto out; 5274 5275 mbox->vport = vport; 5276 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5277 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5278 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5279 } 5280 else { 5281 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5282 ndlp->nlp_prev_state = ndlp->nlp_state; 5283 lpfc_nlp_set_state(vport, ndlp, 5284 NLP_STE_REG_LOGIN_ISSUE); 5285 } 5286 5287 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5288 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5289 != MBX_NOT_FINISHED) 5290 goto out; 5291 5292 /* Decrement the ndlp reference count we 5293 * set for this failed mailbox command. 5294 */ 5295 lpfc_nlp_put(ndlp); 5296 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5297 5298 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5299 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5300 "0138 ELS rsp: Cannot issue reg_login for x%x " 5301 "Data: x%x x%x x%x\n", 5302 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5303 ndlp->nlp_rpi); 5304 } 5305 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5306 if (mp) { 5307 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5308 kfree(mp); 5309 } 5310 mempool_free(mbox, phba->mbox_mem_pool); 5311 } 5312 out: 5313 if (ndlp && shost) { 5314 spin_lock_irq(&ndlp->lock); 5315 if (mbox) 5316 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5317 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5318 spin_unlock_irq(&ndlp->lock); 5319 } 5320 5321 /* An SLI4 NPIV instance wants to drop the node at this point under 5322 * these conditions and release the RPI. 5323 */ 5324 if (phba->sli_rev == LPFC_SLI_REV4 && 5325 (vport && vport->port_type == LPFC_NPIV_PORT) && 5326 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5327 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5328 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5329 spin_lock_irq(&ndlp->lock); 5330 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5331 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5332 spin_unlock_irq(&ndlp->lock); 5333 lpfc_drop_node(vport, ndlp); 5334 } 5335 5336 /* Release the originating I/O reference. */ 5337 lpfc_els_free_iocb(phba, cmdiocb); 5338 lpfc_nlp_put(ndlp); 5339 return; 5340 } 5341 5342 /** 5343 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5344 * @vport: pointer to a host virtual N_Port data structure. 5345 * @flag: the els command code to be accepted. 5346 * @oldiocb: pointer to the original lpfc command iocb data structure. 5347 * @ndlp: pointer to a node-list data structure. 5348 * @mbox: pointer to the driver internal queue element for mailbox command. 5349 * 5350 * This routine prepares and issues an Accept (ACC) response IOCB 5351 * command. It uses the @flag to properly set up the IOCB field for the 5352 * specific ACC response command to be issued and invokes the 5353 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5354 * @mbox pointer is passed in, it will be put into the context_un.mbox 5355 * field of the IOCB for the completion callback function to issue the 5356 * mailbox command to the HBA later when callback is invoked. 5357 * 5358 * Note that the ndlp reference count will be incremented by 1 for holding the 5359 * ndlp and the reference to ndlp will be stored into the context1 field of 5360 * the IOCB for the completion callback function to the corresponding 5361 * response ELS IOCB command. 5362 * 5363 * Return code 5364 * 0 - Successfully issued acc response 5365 * 1 - Failed to issue acc response 5366 **/ 5367 int 5368 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5369 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5370 LPFC_MBOXQ_t *mbox) 5371 { 5372 struct lpfc_hba *phba = vport->phba; 5373 IOCB_t *icmd; 5374 IOCB_t *oldcmd; 5375 struct lpfc_iocbq *elsiocb; 5376 uint8_t *pcmd; 5377 struct serv_parm *sp; 5378 uint16_t cmdsize; 5379 int rc; 5380 ELS_PKT *els_pkt_ptr; 5381 struct fc_els_rdf_resp *rdf_resp; 5382 5383 oldcmd = &oldiocb->iocb; 5384 5385 switch (flag) { 5386 case ELS_CMD_ACC: 5387 cmdsize = sizeof(uint32_t); 5388 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5389 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5390 if (!elsiocb) { 5391 spin_lock_irq(&ndlp->lock); 5392 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5393 spin_unlock_irq(&ndlp->lock); 5394 return 1; 5395 } 5396 5397 icmd = &elsiocb->iocb; 5398 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5399 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5400 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5401 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5402 pcmd += sizeof(uint32_t); 5403 5404 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5405 "Issue ACC: did:x%x flg:x%x", 5406 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5407 break; 5408 case ELS_CMD_FLOGI: 5409 case ELS_CMD_PLOGI: 5410 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5411 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5412 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5413 if (!elsiocb) 5414 return 1; 5415 5416 icmd = &elsiocb->iocb; 5417 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5418 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5419 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5420 5421 if (mbox) 5422 elsiocb->context_un.mbox = mbox; 5423 5424 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5425 pcmd += sizeof(uint32_t); 5426 sp = (struct serv_parm *)pcmd; 5427 5428 if (flag == ELS_CMD_FLOGI) { 5429 /* Copy the received service parameters back */ 5430 memcpy(sp, &phba->fc_fabparam, 5431 sizeof(struct serv_parm)); 5432 5433 /* Clear the F_Port bit */ 5434 sp->cmn.fPort = 0; 5435 5436 /* Mark all class service parameters as invalid */ 5437 sp->cls1.classValid = 0; 5438 sp->cls2.classValid = 0; 5439 sp->cls3.classValid = 0; 5440 sp->cls4.classValid = 0; 5441 5442 /* Copy our worldwide names */ 5443 memcpy(&sp->portName, &vport->fc_sparam.portName, 5444 sizeof(struct lpfc_name)); 5445 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5446 sizeof(struct lpfc_name)); 5447 } else { 5448 memcpy(pcmd, &vport->fc_sparam, 5449 sizeof(struct serv_parm)); 5450 5451 sp->cmn.valid_vendor_ver_level = 0; 5452 memset(sp->un.vendorVersion, 0, 5453 sizeof(sp->un.vendorVersion)); 5454 sp->cmn.bbRcvSizeMsb &= 0xF; 5455 5456 /* If our firmware supports this feature, convey that 5457 * info to the target using the vendor specific field. 5458 */ 5459 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5460 sp->cmn.valid_vendor_ver_level = 1; 5461 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5462 sp->un.vv.flags = 5463 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5464 } 5465 } 5466 5467 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5468 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5469 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5470 break; 5471 case ELS_CMD_PRLO: 5472 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5473 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5474 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5475 if (!elsiocb) 5476 return 1; 5477 5478 icmd = &elsiocb->iocb; 5479 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5480 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5481 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5482 5483 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 5484 sizeof(uint32_t) + sizeof(PRLO)); 5485 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5486 els_pkt_ptr = (ELS_PKT *) pcmd; 5487 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5488 5489 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5490 "Issue ACC PRLO: did:x%x flg:x%x", 5491 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5492 break; 5493 case ELS_CMD_RDF: 5494 cmdsize = sizeof(*rdf_resp); 5495 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5496 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5497 if (!elsiocb) 5498 return 1; 5499 5500 icmd = &elsiocb->iocb; 5501 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5502 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5503 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5504 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5505 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5506 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5507 5508 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5509 rdf_resp->desc_list_len = cpu_to_be32(12); 5510 5511 /* FC-LS-5 specifies LS REQ Information descriptor */ 5512 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5513 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5514 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5515 break; 5516 default: 5517 return 1; 5518 } 5519 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5520 spin_lock_irq(&ndlp->lock); 5521 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5522 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5523 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5524 spin_unlock_irq(&ndlp->lock); 5525 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 5526 } else { 5527 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5528 } 5529 5530 phba->fc_stat.elsXmitACC++; 5531 elsiocb->context1 = lpfc_nlp_get(ndlp); 5532 if (!elsiocb->context1) { 5533 lpfc_els_free_iocb(phba, elsiocb); 5534 return 1; 5535 } 5536 5537 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5538 if (rc == IOCB_ERROR) { 5539 lpfc_els_free_iocb(phba, elsiocb); 5540 lpfc_nlp_put(ndlp); 5541 return 1; 5542 } 5543 5544 /* Xmit ELS ACC response tag <ulpIoTag> */ 5545 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5546 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5547 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5548 "RPI: x%x, fc_flag x%x refcnt %d\n", 5549 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5550 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5551 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5552 return 0; 5553 } 5554 5555 /** 5556 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5557 * @vport: pointer to a virtual N_Port data structure. 5558 * @rejectError: reject response to issue 5559 * @oldiocb: pointer to the original lpfc command iocb data structure. 5560 * @ndlp: pointer to a node-list data structure. 5561 * @mbox: pointer to the driver internal queue element for mailbox command. 5562 * 5563 * This routine prepares and issue an Reject (RJT) response IOCB 5564 * command. If a @mbox pointer is passed in, it will be put into the 5565 * context_un.mbox field of the IOCB for the completion callback function 5566 * to issue to the HBA later. 5567 * 5568 * Note that the ndlp reference count will be incremented by 1 for holding the 5569 * ndlp and the reference to ndlp will be stored into the context1 field of 5570 * the IOCB for the completion callback function to the reject response 5571 * ELS IOCB command. 5572 * 5573 * Return code 5574 * 0 - Successfully issued reject response 5575 * 1 - Failed to issue reject response 5576 **/ 5577 int 5578 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5579 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5580 LPFC_MBOXQ_t *mbox) 5581 { 5582 int rc; 5583 struct lpfc_hba *phba = vport->phba; 5584 IOCB_t *icmd; 5585 IOCB_t *oldcmd; 5586 struct lpfc_iocbq *elsiocb; 5587 uint8_t *pcmd; 5588 uint16_t cmdsize; 5589 5590 cmdsize = 2 * sizeof(uint32_t); 5591 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5592 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5593 if (!elsiocb) 5594 return 1; 5595 5596 icmd = &elsiocb->iocb; 5597 oldcmd = &oldiocb->iocb; 5598 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5599 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5600 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5601 5602 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5603 pcmd += sizeof(uint32_t); 5604 *((uint32_t *) (pcmd)) = rejectError; 5605 5606 if (mbox) 5607 elsiocb->context_un.mbox = mbox; 5608 5609 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5610 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5611 "0129 Xmit ELS RJT x%x response tag x%x " 5612 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5613 "rpi x%x\n", 5614 rejectError, elsiocb->iotag, 5615 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 5616 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5618 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5619 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5620 5621 phba->fc_stat.elsXmitLSRJT++; 5622 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5623 elsiocb->context1 = lpfc_nlp_get(ndlp); 5624 if (!elsiocb->context1) { 5625 lpfc_els_free_iocb(phba, elsiocb); 5626 return 1; 5627 } 5628 5629 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5630 * node's assigned RPI gets released provided this node is not already 5631 * registered with the transport. 5632 */ 5633 if (phba->sli_rev == LPFC_SLI_REV4 && 5634 vport->port_type == LPFC_NPIV_PORT && 5635 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5636 spin_lock_irq(&ndlp->lock); 5637 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5638 spin_unlock_irq(&ndlp->lock); 5639 } 5640 5641 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5642 if (rc == IOCB_ERROR) { 5643 lpfc_els_free_iocb(phba, elsiocb); 5644 lpfc_nlp_put(ndlp); 5645 return 1; 5646 } 5647 5648 return 0; 5649 } 5650 5651 /** 5652 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5653 * @vport: pointer to a host virtual N_Port data structure. 5654 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5655 * @ndlp: NPort to where rsp is directed 5656 * 5657 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5658 * this N_Port's support of hardware signals in its Congestion 5659 * Capabilities Descriptor. 5660 * 5661 * Return code 5662 * 0 - Successfully issued edc rsp command 5663 * 1 - Failed to issue edc rsp command 5664 **/ 5665 static int 5666 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5667 struct lpfc_nodelist *ndlp) 5668 { 5669 struct lpfc_hba *phba = vport->phba; 5670 struct lpfc_els_edc_rsp *edc_rsp; 5671 struct lpfc_iocbq *elsiocb; 5672 IOCB_t *icmd, *cmd; 5673 uint8_t *pcmd; 5674 int cmdsize, rc; 5675 5676 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5677 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5678 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5679 if (!elsiocb) 5680 return 1; 5681 5682 icmd = &elsiocb->iocb; 5683 cmd = &cmdiocb->iocb; 5684 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5685 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5686 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5687 memset(pcmd, 0, cmdsize); 5688 5689 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5690 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5691 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5692 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5693 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5694 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5695 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5696 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5697 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5698 5699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5700 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5701 ndlp->nlp_DID, ndlp->nlp_flag, 5702 kref_read(&ndlp->kref)); 5703 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5704 5705 phba->fc_stat.elsXmitACC++; 5706 elsiocb->context1 = lpfc_nlp_get(ndlp); 5707 if (!elsiocb->context1) { 5708 lpfc_els_free_iocb(phba, elsiocb); 5709 return 1; 5710 } 5711 5712 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5713 if (rc == IOCB_ERROR) { 5714 lpfc_els_free_iocb(phba, elsiocb); 5715 lpfc_nlp_put(ndlp); 5716 return 1; 5717 } 5718 5719 /* Xmit ELS ACC response tag <ulpIoTag> */ 5720 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5721 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5722 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5723 "RPI: x%x, fc_flag x%x\n", 5724 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5725 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5726 ndlp->nlp_rpi, vport->fc_flag); 5727 5728 return 0; 5729 } 5730 5731 /** 5732 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5733 * @vport: pointer to a virtual N_Port data structure. 5734 * @oldiocb: pointer to the original lpfc command iocb data structure. 5735 * @ndlp: pointer to a node-list data structure. 5736 * 5737 * This routine prepares and issues an Accept (ACC) response to Address 5738 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5739 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5740 * 5741 * Note that the ndlp reference count will be incremented by 1 for holding the 5742 * ndlp and the reference to ndlp will be stored into the context1 field of 5743 * the IOCB for the completion callback function to the ADISC Accept response 5744 * ELS IOCB command. 5745 * 5746 * Return code 5747 * 0 - Successfully issued acc adisc response 5748 * 1 - Failed to issue adisc acc response 5749 **/ 5750 int 5751 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5752 struct lpfc_nodelist *ndlp) 5753 { 5754 struct lpfc_hba *phba = vport->phba; 5755 ADISC *ap; 5756 IOCB_t *icmd, *oldcmd; 5757 struct lpfc_iocbq *elsiocb; 5758 uint8_t *pcmd; 5759 uint16_t cmdsize; 5760 int rc; 5761 5762 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5763 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5764 ndlp->nlp_DID, ELS_CMD_ACC); 5765 if (!elsiocb) 5766 return 1; 5767 5768 icmd = &elsiocb->iocb; 5769 oldcmd = &oldiocb->iocb; 5770 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5771 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5772 5773 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5774 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5775 "0130 Xmit ADISC ACC response iotag x%x xri: " 5776 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5777 elsiocb->iotag, elsiocb->iocb.ulpContext, 5778 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5779 ndlp->nlp_rpi); 5780 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5781 5782 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5783 pcmd += sizeof(uint32_t); 5784 5785 ap = (ADISC *) (pcmd); 5786 ap->hardAL_PA = phba->fc_pref_ALPA; 5787 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5788 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5789 ap->DID = be32_to_cpu(vport->fc_myDID); 5790 5791 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5792 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5793 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5794 5795 phba->fc_stat.elsXmitACC++; 5796 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5797 elsiocb->context1 = lpfc_nlp_get(ndlp); 5798 if (!elsiocb->context1) { 5799 lpfc_els_free_iocb(phba, elsiocb); 5800 return 1; 5801 } 5802 5803 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5804 if (rc == IOCB_ERROR) { 5805 lpfc_els_free_iocb(phba, elsiocb); 5806 lpfc_nlp_put(ndlp); 5807 return 1; 5808 } 5809 5810 /* Xmit ELS ACC response tag <ulpIoTag> */ 5811 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5812 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5813 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5814 "RPI: x%x, fc_flag x%x\n", 5815 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5816 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5817 ndlp->nlp_rpi, vport->fc_flag); 5818 return 0; 5819 } 5820 5821 /** 5822 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5823 * @vport: pointer to a virtual N_Port data structure. 5824 * @oldiocb: pointer to the original lpfc command iocb data structure. 5825 * @ndlp: pointer to a node-list data structure. 5826 * 5827 * This routine prepares and issues an Accept (ACC) response to Process 5828 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5829 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5830 * 5831 * Note that the ndlp reference count will be incremented by 1 for holding the 5832 * ndlp and the reference to ndlp will be stored into the context1 field of 5833 * the IOCB for the completion callback function to the PRLI Accept response 5834 * ELS IOCB command. 5835 * 5836 * Return code 5837 * 0 - Successfully issued acc prli response 5838 * 1 - Failed to issue acc prli response 5839 **/ 5840 int 5841 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5842 struct lpfc_nodelist *ndlp) 5843 { 5844 struct lpfc_hba *phba = vport->phba; 5845 PRLI *npr; 5846 struct lpfc_nvme_prli *npr_nvme; 5847 lpfc_vpd_t *vpd; 5848 IOCB_t *icmd; 5849 IOCB_t *oldcmd; 5850 struct lpfc_iocbq *elsiocb; 5851 uint8_t *pcmd; 5852 uint16_t cmdsize; 5853 uint32_t prli_fc4_req, *req_payload; 5854 struct lpfc_dmabuf *req_buf; 5855 int rc; 5856 u32 elsrspcmd; 5857 5858 /* Need the incoming PRLI payload to determine if the ACC is for an 5859 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5860 */ 5861 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5862 req_payload = (((uint32_t *)req_buf->virt) + 1); 5863 5864 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5865 prli_fc4_req = be32_to_cpu(*req_payload); 5866 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5867 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5868 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5869 prli_fc4_req, *((uint32_t *)req_payload)); 5870 5871 if (prli_fc4_req == PRLI_FCP_TYPE) { 5872 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5873 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5874 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5875 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5876 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5877 } else { 5878 return 1; 5879 } 5880 5881 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5882 ndlp->nlp_DID, elsrspcmd); 5883 if (!elsiocb) 5884 return 1; 5885 5886 icmd = &elsiocb->iocb; 5887 oldcmd = &oldiocb->iocb; 5888 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5889 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5890 5891 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5892 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5893 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5894 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5895 elsiocb->iotag, elsiocb->iocb.ulpContext, 5896 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5897 ndlp->nlp_rpi); 5898 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5899 memset(pcmd, 0, cmdsize); 5900 5901 *((uint32_t *)(pcmd)) = elsrspcmd; 5902 pcmd += sizeof(uint32_t); 5903 5904 /* For PRLI, remainder of payload is PRLI parameter page */ 5905 vpd = &phba->vpd; 5906 5907 if (prli_fc4_req == PRLI_FCP_TYPE) { 5908 /* 5909 * If the remote port is a target and our firmware version 5910 * is 3.20 or later, set the following bits for FC-TAPE 5911 * support. 5912 */ 5913 npr = (PRLI *) pcmd; 5914 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5915 (vpd->rev.feaLevelHigh >= 0x02)) { 5916 npr->ConfmComplAllowed = 1; 5917 npr->Retry = 1; 5918 npr->TaskRetryIdReq = 1; 5919 } 5920 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5921 npr->estabImagePair = 1; 5922 npr->readXferRdyDis = 1; 5923 npr->ConfmComplAllowed = 1; 5924 npr->prliType = PRLI_FCP_TYPE; 5925 npr->initiatorFunc = 1; 5926 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5927 /* Respond with an NVME PRLI Type */ 5928 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5929 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5930 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5931 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5932 if (phba->nvmet_support) { 5933 bf_set(prli_tgt, npr_nvme, 1); 5934 bf_set(prli_disc, npr_nvme, 1); 5935 if (phba->cfg_nvme_enable_fb) { 5936 bf_set(prli_fba, npr_nvme, 1); 5937 5938 /* TBD. Target mode needs to post buffers 5939 * that support the configured first burst 5940 * byte size. 5941 */ 5942 bf_set(prli_fb_sz, npr_nvme, 5943 phba->cfg_nvmet_fb_size); 5944 } 5945 } else { 5946 bf_set(prli_init, npr_nvme, 1); 5947 } 5948 5949 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5950 "6015 NVME issue PRLI ACC word1 x%08x " 5951 "word4 x%08x word5 x%08x flag x%x, " 5952 "fcp_info x%x nlp_type x%x\n", 5953 npr_nvme->word1, npr_nvme->word4, 5954 npr_nvme->word5, ndlp->nlp_flag, 5955 ndlp->nlp_fcp_info, ndlp->nlp_type); 5956 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5957 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5958 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5959 } else 5960 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5961 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5962 prli_fc4_req, ndlp->nlp_fc4_type, 5963 ndlp->nlp_DID); 5964 5965 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5966 "Issue ACC PRLI: did:x%x flg:x%x", 5967 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5968 5969 phba->fc_stat.elsXmitACC++; 5970 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5971 elsiocb->context1 = lpfc_nlp_get(ndlp); 5972 if (!elsiocb->context1) { 5973 lpfc_els_free_iocb(phba, elsiocb); 5974 return 1; 5975 } 5976 5977 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5978 if (rc == IOCB_ERROR) { 5979 lpfc_els_free_iocb(phba, elsiocb); 5980 lpfc_nlp_put(ndlp); 5981 return 1; 5982 } 5983 5984 return 0; 5985 } 5986 5987 /** 5988 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5989 * @vport: pointer to a virtual N_Port data structure. 5990 * @format: rnid command format. 5991 * @oldiocb: pointer to the original lpfc command iocb data structure. 5992 * @ndlp: pointer to a node-list data structure. 5993 * 5994 * This routine issues a Request Node Identification Data (RNID) Accept 5995 * (ACC) response. It constructs the RNID ACC response command according to 5996 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5997 * issue the response. 5998 * 5999 * Note that the ndlp reference count will be incremented by 1 for holding the 6000 * ndlp and the reference to ndlp will be stored into the context1 field of 6001 * the IOCB for the completion callback function. 6002 * 6003 * Return code 6004 * 0 - Successfully issued acc rnid response 6005 * 1 - Failed to issue acc rnid response 6006 **/ 6007 static int 6008 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6009 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6010 { 6011 struct lpfc_hba *phba = vport->phba; 6012 RNID *rn; 6013 IOCB_t *icmd, *oldcmd; 6014 struct lpfc_iocbq *elsiocb; 6015 uint8_t *pcmd; 6016 uint16_t cmdsize; 6017 int rc; 6018 6019 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6020 + (2 * sizeof(struct lpfc_name)); 6021 if (format) 6022 cmdsize += sizeof(RNID_TOP_DISC); 6023 6024 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6025 ndlp->nlp_DID, ELS_CMD_ACC); 6026 if (!elsiocb) 6027 return 1; 6028 6029 icmd = &elsiocb->iocb; 6030 oldcmd = &oldiocb->iocb; 6031 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6032 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 6033 6034 /* Xmit RNID ACC response tag <ulpIoTag> */ 6035 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6036 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6037 elsiocb->iotag, elsiocb->iocb.ulpContext); 6038 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6039 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6040 pcmd += sizeof(uint32_t); 6041 6042 memset(pcmd, 0, sizeof(RNID)); 6043 rn = (RNID *) (pcmd); 6044 rn->Format = format; 6045 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6046 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6047 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6048 switch (format) { 6049 case 0: 6050 rn->SpecificLen = 0; 6051 break; 6052 case RNID_TOPOLOGY_DISC: 6053 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6054 memcpy(&rn->un.topologyDisc.portName, 6055 &vport->fc_portname, sizeof(struct lpfc_name)); 6056 rn->un.topologyDisc.unitType = RNID_HBA; 6057 rn->un.topologyDisc.physPort = 0; 6058 rn->un.topologyDisc.attachedNodes = 0; 6059 break; 6060 default: 6061 rn->CommonLen = 0; 6062 rn->SpecificLen = 0; 6063 break; 6064 } 6065 6066 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6067 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6068 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6069 6070 phba->fc_stat.elsXmitACC++; 6071 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6072 elsiocb->context1 = lpfc_nlp_get(ndlp); 6073 if (!elsiocb->context1) { 6074 lpfc_els_free_iocb(phba, elsiocb); 6075 return 1; 6076 } 6077 6078 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6079 if (rc == IOCB_ERROR) { 6080 lpfc_els_free_iocb(phba, elsiocb); 6081 lpfc_nlp_put(ndlp); 6082 return 1; 6083 } 6084 6085 return 0; 6086 } 6087 6088 /** 6089 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6090 * @vport: pointer to a virtual N_Port data structure. 6091 * @iocb: pointer to the lpfc command iocb data structure. 6092 * @ndlp: pointer to a node-list data structure. 6093 * 6094 * Return 6095 **/ 6096 static void 6097 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6098 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6099 { 6100 struct lpfc_hba *phba = vport->phba; 6101 uint8_t *pcmd; 6102 struct RRQ *rrq; 6103 uint16_t rxid; 6104 uint16_t xri; 6105 struct lpfc_node_rrq *prrq; 6106 6107 6108 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 6109 pcmd += sizeof(uint32_t); 6110 rrq = (struct RRQ *)pcmd; 6111 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6112 rxid = bf_get(rrq_rxid, rrq); 6113 6114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6115 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6116 " x%x x%x\n", 6117 be32_to_cpu(bf_get(rrq_did, rrq)), 6118 bf_get(rrq_oxid, rrq), 6119 rxid, 6120 iocb->iotag, iocb->iocb.ulpContext); 6121 6122 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6123 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6124 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6125 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6126 xri = bf_get(rrq_oxid, rrq); 6127 else 6128 xri = rxid; 6129 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6130 if (prrq) 6131 lpfc_clr_rrq_active(phba, xri, prrq); 6132 return; 6133 } 6134 6135 /** 6136 * lpfc_els_rsp_echo_acc - Issue echo acc response 6137 * @vport: pointer to a virtual N_Port data structure. 6138 * @data: pointer to echo data to return in the accept. 6139 * @oldiocb: pointer to the original lpfc command iocb data structure. 6140 * @ndlp: pointer to a node-list data structure. 6141 * 6142 * Return code 6143 * 0 - Successfully issued acc echo response 6144 * 1 - Failed to issue acc echo response 6145 **/ 6146 static int 6147 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6148 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6149 { 6150 struct lpfc_hba *phba = vport->phba; 6151 struct lpfc_iocbq *elsiocb; 6152 uint8_t *pcmd; 6153 uint16_t cmdsize; 6154 int rc; 6155 6156 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6157 6158 /* The accumulated length can exceed the BPL_SIZE. For 6159 * now, use this as the limit 6160 */ 6161 if (cmdsize > LPFC_BPL_SIZE) 6162 cmdsize = LPFC_BPL_SIZE; 6163 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6164 ndlp->nlp_DID, ELS_CMD_ACC); 6165 if (!elsiocb) 6166 return 1; 6167 6168 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 6169 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 6170 6171 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6172 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6173 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6174 elsiocb->iotag, elsiocb->iocb.ulpContext); 6175 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6176 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6177 pcmd += sizeof(uint32_t); 6178 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6179 6180 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6181 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6182 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6183 6184 phba->fc_stat.elsXmitACC++; 6185 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6186 elsiocb->context1 = lpfc_nlp_get(ndlp); 6187 if (!elsiocb->context1) { 6188 lpfc_els_free_iocb(phba, elsiocb); 6189 return 1; 6190 } 6191 6192 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6193 if (rc == IOCB_ERROR) { 6194 lpfc_els_free_iocb(phba, elsiocb); 6195 lpfc_nlp_put(ndlp); 6196 return 1; 6197 } 6198 6199 return 0; 6200 } 6201 6202 /** 6203 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6204 * @vport: pointer to a host virtual N_Port data structure. 6205 * 6206 * This routine issues Address Discover (ADISC) ELS commands to those 6207 * N_Ports which are in node port recovery state and ADISC has not been issued 6208 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6209 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6210 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6211 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6212 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6213 * IOCBs quit for later pick up. On the other hand, after walking through 6214 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6215 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6216 * no more ADISC need to be sent. 6217 * 6218 * Return code 6219 * The number of N_Ports with adisc issued. 6220 **/ 6221 int 6222 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6223 { 6224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6225 struct lpfc_nodelist *ndlp, *next_ndlp; 6226 int sentadisc = 0; 6227 6228 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6229 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6230 6231 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6232 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6233 continue; 6234 6235 spin_lock_irq(&ndlp->lock); 6236 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6237 spin_unlock_irq(&ndlp->lock); 6238 6239 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6240 /* This node was marked for ADISC but was not picked 6241 * for discovery. This is possible if the node was 6242 * missing in gidft response. 6243 * 6244 * At time of marking node for ADISC, we skipped unreg 6245 * from backend 6246 */ 6247 lpfc_nlp_unreg_node(vport, ndlp); 6248 lpfc_unreg_rpi(vport, ndlp); 6249 continue; 6250 } 6251 6252 ndlp->nlp_prev_state = ndlp->nlp_state; 6253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6254 lpfc_issue_els_adisc(vport, ndlp, 0); 6255 sentadisc++; 6256 vport->num_disc_nodes++; 6257 if (vport->num_disc_nodes >= 6258 vport->cfg_discovery_threads) { 6259 spin_lock_irq(shost->host_lock); 6260 vport->fc_flag |= FC_NLP_MORE; 6261 spin_unlock_irq(shost->host_lock); 6262 break; 6263 } 6264 6265 } 6266 if (sentadisc == 0) { 6267 spin_lock_irq(shost->host_lock); 6268 vport->fc_flag &= ~FC_NLP_MORE; 6269 spin_unlock_irq(shost->host_lock); 6270 } 6271 return sentadisc; 6272 } 6273 6274 /** 6275 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6276 * @vport: pointer to a host virtual N_Port data structure. 6277 * 6278 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6279 * which are in node port recovery state, with a @vport. Each time an ELS 6280 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6281 * the per @vport number of discover count (num_disc_nodes) shall be 6282 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6283 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6284 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6285 * later pick up. On the other hand, after walking through all the ndlps with 6286 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6287 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6288 * PLOGI need to be sent. 6289 * 6290 * Return code 6291 * The number of N_Ports with plogi issued. 6292 **/ 6293 int 6294 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6295 { 6296 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6297 struct lpfc_nodelist *ndlp, *next_ndlp; 6298 int sentplogi = 0; 6299 6300 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6301 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6302 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6303 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6304 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6305 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6306 ndlp->nlp_prev_state = ndlp->nlp_state; 6307 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6308 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6309 sentplogi++; 6310 vport->num_disc_nodes++; 6311 if (vport->num_disc_nodes >= 6312 vport->cfg_discovery_threads) { 6313 spin_lock_irq(shost->host_lock); 6314 vport->fc_flag |= FC_NLP_MORE; 6315 spin_unlock_irq(shost->host_lock); 6316 break; 6317 } 6318 } 6319 } 6320 6321 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6322 "6452 Discover PLOGI %d flag x%x\n", 6323 sentplogi, vport->fc_flag); 6324 6325 if (sentplogi) { 6326 lpfc_set_disctmo(vport); 6327 } 6328 else { 6329 spin_lock_irq(shost->host_lock); 6330 vport->fc_flag &= ~FC_NLP_MORE; 6331 spin_unlock_irq(shost->host_lock); 6332 } 6333 return sentplogi; 6334 } 6335 6336 static uint32_t 6337 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6338 uint32_t word0) 6339 { 6340 6341 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6342 desc->payload.els_req = word0; 6343 desc->length = cpu_to_be32(sizeof(desc->payload)); 6344 6345 return sizeof(struct fc_rdp_link_service_desc); 6346 } 6347 6348 static uint32_t 6349 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6350 uint8_t *page_a0, uint8_t *page_a2) 6351 { 6352 uint16_t wavelength; 6353 uint16_t temperature; 6354 uint16_t rx_power; 6355 uint16_t tx_bias; 6356 uint16_t tx_power; 6357 uint16_t vcc; 6358 uint16_t flag = 0; 6359 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6360 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6361 6362 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6363 6364 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6365 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6366 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6367 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6368 6369 if ((trasn_code_byte4->fc_sw_laser) || 6370 (trasn_code_byte5->fc_sw_laser_sl) || 6371 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6372 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6373 } else if (trasn_code_byte4->fc_lw_laser) { 6374 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6375 page_a0[SSF_WAVELENGTH_B0]; 6376 if (wavelength == SFP_WAVELENGTH_LC1310) 6377 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6378 if (wavelength == SFP_WAVELENGTH_LL1550) 6379 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6380 } 6381 /* check if its SFP+ */ 6382 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6383 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6384 << SFP_FLAG_CT_SHIFT; 6385 6386 /* check if its OPTICAL */ 6387 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6388 SFP_FLAG_IS_OPTICAL_PORT : 0) 6389 << SFP_FLAG_IS_OPTICAL_SHIFT; 6390 6391 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6392 page_a2[SFF_TEMPERATURE_B0]); 6393 vcc = (page_a2[SFF_VCC_B1] << 8 | 6394 page_a2[SFF_VCC_B0]); 6395 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6396 page_a2[SFF_TXPOWER_B0]); 6397 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6398 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6399 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6400 page_a2[SFF_RXPOWER_B0]); 6401 desc->sfp_info.temperature = cpu_to_be16(temperature); 6402 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6403 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6404 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6405 desc->sfp_info.vcc = cpu_to_be16(vcc); 6406 6407 desc->sfp_info.flags = cpu_to_be16(flag); 6408 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6409 6410 return sizeof(struct fc_rdp_sfp_desc); 6411 } 6412 6413 static uint32_t 6414 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6415 READ_LNK_VAR *stat) 6416 { 6417 uint32_t type; 6418 6419 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6420 6421 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6422 6423 desc->info.port_type = cpu_to_be32(type); 6424 6425 desc->info.link_status.link_failure_cnt = 6426 cpu_to_be32(stat->linkFailureCnt); 6427 desc->info.link_status.loss_of_synch_cnt = 6428 cpu_to_be32(stat->lossSyncCnt); 6429 desc->info.link_status.loss_of_signal_cnt = 6430 cpu_to_be32(stat->lossSignalCnt); 6431 desc->info.link_status.primitive_seq_proto_err = 6432 cpu_to_be32(stat->primSeqErrCnt); 6433 desc->info.link_status.invalid_trans_word = 6434 cpu_to_be32(stat->invalidXmitWord); 6435 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6436 6437 desc->length = cpu_to_be32(sizeof(desc->info)); 6438 6439 return sizeof(struct fc_rdp_link_error_status_desc); 6440 } 6441 6442 static uint32_t 6443 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6444 struct lpfc_vport *vport) 6445 { 6446 uint32_t bbCredit; 6447 6448 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6449 6450 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6451 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6452 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6453 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6454 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6455 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6456 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6457 } else { 6458 desc->bbc_info.attached_port_bbc = 0; 6459 } 6460 6461 desc->bbc_info.rtt = 0; 6462 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6463 6464 return sizeof(struct fc_rdp_bbc_desc); 6465 } 6466 6467 static uint32_t 6468 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6469 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6470 { 6471 uint32_t flags = 0; 6472 6473 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6474 6475 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6476 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6477 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6478 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6479 6480 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6481 flags |= RDP_OET_HIGH_ALARM; 6482 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6483 flags |= RDP_OET_LOW_ALARM; 6484 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6485 flags |= RDP_OET_HIGH_WARNING; 6486 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6487 flags |= RDP_OET_LOW_WARNING; 6488 6489 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6490 desc->oed_info.function_flags = cpu_to_be32(flags); 6491 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6492 return sizeof(struct fc_rdp_oed_sfp_desc); 6493 } 6494 6495 static uint32_t 6496 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6497 struct fc_rdp_oed_sfp_desc *desc, 6498 uint8_t *page_a2) 6499 { 6500 uint32_t flags = 0; 6501 6502 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6503 6504 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6505 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6506 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6507 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6508 6509 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6510 flags |= RDP_OET_HIGH_ALARM; 6511 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6512 flags |= RDP_OET_LOW_ALARM; 6513 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6514 flags |= RDP_OET_HIGH_WARNING; 6515 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6516 flags |= RDP_OET_LOW_WARNING; 6517 6518 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6519 desc->oed_info.function_flags = cpu_to_be32(flags); 6520 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6521 return sizeof(struct fc_rdp_oed_sfp_desc); 6522 } 6523 6524 static uint32_t 6525 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6526 struct fc_rdp_oed_sfp_desc *desc, 6527 uint8_t *page_a2) 6528 { 6529 uint32_t flags = 0; 6530 6531 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6532 6533 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6534 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6535 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6536 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6537 6538 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6539 flags |= RDP_OET_HIGH_ALARM; 6540 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6541 flags |= RDP_OET_LOW_ALARM; 6542 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6543 flags |= RDP_OET_HIGH_WARNING; 6544 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6545 flags |= RDP_OET_LOW_WARNING; 6546 6547 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6548 desc->oed_info.function_flags = cpu_to_be32(flags); 6549 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6550 return sizeof(struct fc_rdp_oed_sfp_desc); 6551 } 6552 6553 static uint32_t 6554 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6555 struct fc_rdp_oed_sfp_desc *desc, 6556 uint8_t *page_a2) 6557 { 6558 uint32_t flags = 0; 6559 6560 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6561 6562 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6563 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6564 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6565 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6566 6567 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6568 flags |= RDP_OET_HIGH_ALARM; 6569 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6570 flags |= RDP_OET_LOW_ALARM; 6571 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6572 flags |= RDP_OET_HIGH_WARNING; 6573 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6574 flags |= RDP_OET_LOW_WARNING; 6575 6576 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6577 desc->oed_info.function_flags = cpu_to_be32(flags); 6578 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6579 return sizeof(struct fc_rdp_oed_sfp_desc); 6580 } 6581 6582 6583 static uint32_t 6584 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6585 struct fc_rdp_oed_sfp_desc *desc, 6586 uint8_t *page_a2) 6587 { 6588 uint32_t flags = 0; 6589 6590 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6591 6592 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6593 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6594 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6595 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6596 6597 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6598 flags |= RDP_OET_HIGH_ALARM; 6599 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6600 flags |= RDP_OET_LOW_ALARM; 6601 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6602 flags |= RDP_OET_HIGH_WARNING; 6603 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6604 flags |= RDP_OET_LOW_WARNING; 6605 6606 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6607 desc->oed_info.function_flags = cpu_to_be32(flags); 6608 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6609 return sizeof(struct fc_rdp_oed_sfp_desc); 6610 } 6611 6612 static uint32_t 6613 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6614 uint8_t *page_a0, struct lpfc_vport *vport) 6615 { 6616 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6617 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6618 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6619 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6620 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6621 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6622 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6623 return sizeof(struct fc_rdp_opd_sfp_desc); 6624 } 6625 6626 static uint32_t 6627 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6628 { 6629 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6630 return 0; 6631 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6632 6633 desc->info.CorrectedBlocks = 6634 cpu_to_be32(stat->fecCorrBlkCount); 6635 desc->info.UncorrectableBlocks = 6636 cpu_to_be32(stat->fecUncorrBlkCount); 6637 6638 desc->length = cpu_to_be32(sizeof(desc->info)); 6639 6640 return sizeof(struct fc_fec_rdp_desc); 6641 } 6642 6643 static uint32_t 6644 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6645 { 6646 uint16_t rdp_cap = 0; 6647 uint16_t rdp_speed; 6648 6649 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6650 6651 switch (phba->fc_linkspeed) { 6652 case LPFC_LINK_SPEED_1GHZ: 6653 rdp_speed = RDP_PS_1GB; 6654 break; 6655 case LPFC_LINK_SPEED_2GHZ: 6656 rdp_speed = RDP_PS_2GB; 6657 break; 6658 case LPFC_LINK_SPEED_4GHZ: 6659 rdp_speed = RDP_PS_4GB; 6660 break; 6661 case LPFC_LINK_SPEED_8GHZ: 6662 rdp_speed = RDP_PS_8GB; 6663 break; 6664 case LPFC_LINK_SPEED_10GHZ: 6665 rdp_speed = RDP_PS_10GB; 6666 break; 6667 case LPFC_LINK_SPEED_16GHZ: 6668 rdp_speed = RDP_PS_16GB; 6669 break; 6670 case LPFC_LINK_SPEED_32GHZ: 6671 rdp_speed = RDP_PS_32GB; 6672 break; 6673 case LPFC_LINK_SPEED_64GHZ: 6674 rdp_speed = RDP_PS_64GB; 6675 break; 6676 case LPFC_LINK_SPEED_128GHZ: 6677 rdp_speed = RDP_PS_128GB; 6678 break; 6679 case LPFC_LINK_SPEED_256GHZ: 6680 rdp_speed = RDP_PS_256GB; 6681 break; 6682 default: 6683 rdp_speed = RDP_PS_UNKNOWN; 6684 break; 6685 } 6686 6687 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6688 6689 if (phba->lmt & LMT_256Gb) 6690 rdp_cap |= RDP_PS_256GB; 6691 if (phba->lmt & LMT_128Gb) 6692 rdp_cap |= RDP_PS_128GB; 6693 if (phba->lmt & LMT_64Gb) 6694 rdp_cap |= RDP_PS_64GB; 6695 if (phba->lmt & LMT_32Gb) 6696 rdp_cap |= RDP_PS_32GB; 6697 if (phba->lmt & LMT_16Gb) 6698 rdp_cap |= RDP_PS_16GB; 6699 if (phba->lmt & LMT_10Gb) 6700 rdp_cap |= RDP_PS_10GB; 6701 if (phba->lmt & LMT_8Gb) 6702 rdp_cap |= RDP_PS_8GB; 6703 if (phba->lmt & LMT_4Gb) 6704 rdp_cap |= RDP_PS_4GB; 6705 if (phba->lmt & LMT_2Gb) 6706 rdp_cap |= RDP_PS_2GB; 6707 if (phba->lmt & LMT_1Gb) 6708 rdp_cap |= RDP_PS_1GB; 6709 6710 if (rdp_cap == 0) 6711 rdp_cap = RDP_CAP_UNKNOWN; 6712 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6713 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6714 6715 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6716 desc->length = cpu_to_be32(sizeof(desc->info)); 6717 return sizeof(struct fc_rdp_port_speed_desc); 6718 } 6719 6720 static uint32_t 6721 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6722 struct lpfc_vport *vport) 6723 { 6724 6725 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6726 6727 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6728 sizeof(desc->port_names.wwnn)); 6729 6730 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6731 sizeof(desc->port_names.wwpn)); 6732 6733 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6734 return sizeof(struct fc_rdp_port_name_desc); 6735 } 6736 6737 static uint32_t 6738 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6739 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6740 { 6741 6742 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6743 if (vport->fc_flag & FC_FABRIC) { 6744 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6745 sizeof(desc->port_names.wwnn)); 6746 6747 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6748 sizeof(desc->port_names.wwpn)); 6749 } else { /* Point to Point */ 6750 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6751 sizeof(desc->port_names.wwnn)); 6752 6753 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6754 sizeof(desc->port_names.wwpn)); 6755 } 6756 6757 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6758 return sizeof(struct fc_rdp_port_name_desc); 6759 } 6760 6761 static void 6762 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6763 int status) 6764 { 6765 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6766 struct lpfc_vport *vport = ndlp->vport; 6767 struct lpfc_iocbq *elsiocb; 6768 struct ulp_bde64 *bpl; 6769 IOCB_t *icmd; 6770 uint8_t *pcmd; 6771 struct ls_rjt *stat; 6772 struct fc_rdp_res_frame *rdp_res; 6773 uint32_t cmdsize, len; 6774 uint16_t *flag_ptr; 6775 int rc; 6776 6777 if (status != SUCCESS) 6778 goto error; 6779 6780 /* This will change once we know the true size of the RDP payload */ 6781 cmdsize = sizeof(struct fc_rdp_res_frame); 6782 6783 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6784 lpfc_max_els_tries, rdp_context->ndlp, 6785 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6786 if (!elsiocb) 6787 goto free_rdp_context; 6788 6789 icmd = &elsiocb->iocb; 6790 icmd->ulpContext = rdp_context->rx_id; 6791 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6792 6793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6794 "2171 Xmit RDP response tag x%x xri x%x, " 6795 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6796 elsiocb->iotag, elsiocb->iocb.ulpContext, 6797 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6798 ndlp->nlp_rpi); 6799 rdp_res = (struct fc_rdp_res_frame *) 6800 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6801 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6802 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6803 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6804 6805 /* Update Alarm and Warning */ 6806 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6807 phba->sfp_alarm |= *flag_ptr; 6808 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6809 phba->sfp_warning |= *flag_ptr; 6810 6811 /* For RDP payload */ 6812 len = 8; 6813 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6814 (len + pcmd), ELS_CMD_RDP); 6815 6816 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6817 rdp_context->page_a0, rdp_context->page_a2); 6818 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6819 phba); 6820 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6821 (len + pcmd), &rdp_context->link_stat); 6822 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6823 (len + pcmd), vport); 6824 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6825 (len + pcmd), vport, ndlp); 6826 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6827 &rdp_context->link_stat); 6828 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6829 &rdp_context->link_stat, vport); 6830 len += lpfc_rdp_res_oed_temp_desc(phba, 6831 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6832 rdp_context->page_a2); 6833 len += lpfc_rdp_res_oed_voltage_desc(phba, 6834 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6835 rdp_context->page_a2); 6836 len += lpfc_rdp_res_oed_txbias_desc(phba, 6837 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6838 rdp_context->page_a2); 6839 len += lpfc_rdp_res_oed_txpower_desc(phba, 6840 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6841 rdp_context->page_a2); 6842 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6843 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6844 rdp_context->page_a2); 6845 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6846 rdp_context->page_a0, vport); 6847 6848 rdp_res->length = cpu_to_be32(len - 8); 6849 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6850 6851 /* Now that we know the true size of the payload, update the BPL */ 6852 bpl = (struct ulp_bde64 *) 6853 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6854 bpl->tus.f.bdeSize = len; 6855 bpl->tus.f.bdeFlags = 0; 6856 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6857 6858 phba->fc_stat.elsXmitACC++; 6859 elsiocb->context1 = lpfc_nlp_get(ndlp); 6860 if (!elsiocb->context1) { 6861 lpfc_els_free_iocb(phba, elsiocb); 6862 goto free_rdp_context; 6863 } 6864 6865 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6866 if (rc == IOCB_ERROR) { 6867 lpfc_els_free_iocb(phba, elsiocb); 6868 lpfc_nlp_put(ndlp); 6869 } 6870 6871 goto free_rdp_context; 6872 6873 error: 6874 cmdsize = 2 * sizeof(uint32_t); 6875 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6876 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6877 if (!elsiocb) 6878 goto free_rdp_context; 6879 6880 icmd = &elsiocb->iocb; 6881 icmd->ulpContext = rdp_context->rx_id; 6882 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6883 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6884 6885 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6886 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6887 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6888 6889 phba->fc_stat.elsXmitLSRJT++; 6890 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6891 elsiocb->context1 = lpfc_nlp_get(ndlp); 6892 if (!elsiocb->context1) { 6893 lpfc_els_free_iocb(phba, elsiocb); 6894 goto free_rdp_context; 6895 } 6896 6897 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6898 if (rc == IOCB_ERROR) { 6899 lpfc_els_free_iocb(phba, elsiocb); 6900 lpfc_nlp_put(ndlp); 6901 } 6902 6903 free_rdp_context: 6904 /* This reference put is for the original unsolicited RDP. If the 6905 * iocb prep failed, there is no reference to remove. 6906 */ 6907 lpfc_nlp_put(ndlp); 6908 kfree(rdp_context); 6909 } 6910 6911 static int 6912 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6913 { 6914 LPFC_MBOXQ_t *mbox = NULL; 6915 struct lpfc_dmabuf *mp; 6916 int rc; 6917 6918 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6919 if (!mbox) { 6920 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6921 "7105 failed to allocate mailbox memory"); 6922 return 1; 6923 } 6924 6925 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6926 goto prep_mbox_fail; 6927 mbox->vport = rdp_context->ndlp->vport; 6928 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6929 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6931 if (rc == MBX_NOT_FINISHED) { 6932 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 6933 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6934 goto issue_mbox_fail; 6935 } 6936 6937 return 0; 6938 6939 prep_mbox_fail: 6940 issue_mbox_fail: 6941 mempool_free(mbox, phba->mbox_mem_pool); 6942 return 1; 6943 } 6944 6945 /* 6946 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6947 * @vport: pointer to a host virtual N_Port data structure. 6948 * @cmdiocb: pointer to lpfc command iocb data structure. 6949 * @ndlp: pointer to a node-list data structure. 6950 * 6951 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6952 * IOCB. First, the payload of the unsolicited RDP is checked. 6953 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6954 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6955 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6956 * gather all data and send RDP response. 6957 * 6958 * Return code 6959 * 0 - Sent the acc response 6960 * 1 - Sent the reject response. 6961 */ 6962 static int 6963 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6964 struct lpfc_nodelist *ndlp) 6965 { 6966 struct lpfc_hba *phba = vport->phba; 6967 struct lpfc_dmabuf *pcmd; 6968 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6969 struct fc_rdp_req_frame *rdp_req; 6970 struct lpfc_rdp_context *rdp_context; 6971 IOCB_t *cmd = NULL; 6972 struct ls_rjt stat; 6973 6974 if (phba->sli_rev < LPFC_SLI_REV4 || 6975 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6976 LPFC_SLI_INTF_IF_TYPE_2) { 6977 rjt_err = LSRJT_UNABLE_TPC; 6978 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6979 goto error; 6980 } 6981 6982 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6983 rjt_err = LSRJT_UNABLE_TPC; 6984 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6985 goto error; 6986 } 6987 6988 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6989 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6990 6991 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6992 "2422 ELS RDP Request " 6993 "dec len %d tag x%x port_id %d len %d\n", 6994 be32_to_cpu(rdp_req->rdp_des_length), 6995 be32_to_cpu(rdp_req->nport_id_desc.tag), 6996 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6997 be32_to_cpu(rdp_req->nport_id_desc.length)); 6998 6999 if (sizeof(struct fc_rdp_nport_desc) != 7000 be32_to_cpu(rdp_req->rdp_des_length)) 7001 goto rjt_logerr; 7002 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7003 goto rjt_logerr; 7004 if (RDP_NPORT_ID_SIZE != 7005 be32_to_cpu(rdp_req->nport_id_desc.length)) 7006 goto rjt_logerr; 7007 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7008 if (!rdp_context) { 7009 rjt_err = LSRJT_UNABLE_TPC; 7010 goto error; 7011 } 7012 7013 cmd = &cmdiocb->iocb; 7014 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7015 if (!rdp_context->ndlp) { 7016 kfree(rdp_context); 7017 rjt_err = LSRJT_UNABLE_TPC; 7018 goto error; 7019 } 7020 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 7021 rdp_context->rx_id = cmd->ulpContext; 7022 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7023 if (lpfc_get_rdp_info(phba, rdp_context)) { 7024 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7025 "2423 Unable to send mailbox"); 7026 kfree(rdp_context); 7027 rjt_err = LSRJT_UNABLE_TPC; 7028 lpfc_nlp_put(ndlp); 7029 goto error; 7030 } 7031 7032 return 0; 7033 7034 rjt_logerr: 7035 rjt_err = LSRJT_LOGICAL_ERR; 7036 7037 error: 7038 memset(&stat, 0, sizeof(stat)); 7039 stat.un.b.lsRjtRsnCode = rjt_err; 7040 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7041 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7042 return 1; 7043 } 7044 7045 7046 static void 7047 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7048 { 7049 MAILBOX_t *mb; 7050 IOCB_t *icmd; 7051 uint8_t *pcmd; 7052 struct lpfc_iocbq *elsiocb; 7053 struct lpfc_nodelist *ndlp; 7054 struct ls_rjt *stat; 7055 union lpfc_sli4_cfg_shdr *shdr; 7056 struct lpfc_lcb_context *lcb_context; 7057 struct fc_lcb_res_frame *lcb_res; 7058 uint32_t cmdsize, shdr_status, shdr_add_status; 7059 int rc; 7060 7061 mb = &pmb->u.mb; 7062 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7063 ndlp = lcb_context->ndlp; 7064 pmb->ctx_ndlp = NULL; 7065 pmb->ctx_buf = NULL; 7066 7067 shdr = (union lpfc_sli4_cfg_shdr *) 7068 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7069 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7070 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7071 7072 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7073 "0194 SET_BEACON_CONFIG mailbox " 7074 "completed with status x%x add_status x%x," 7075 " mbx status x%x\n", 7076 shdr_status, shdr_add_status, mb->mbxStatus); 7077 7078 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7079 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7080 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7081 mempool_free(pmb, phba->mbox_mem_pool); 7082 goto error; 7083 } 7084 7085 mempool_free(pmb, phba->mbox_mem_pool); 7086 cmdsize = sizeof(struct fc_lcb_res_frame); 7087 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7088 lpfc_max_els_tries, ndlp, 7089 ndlp->nlp_DID, ELS_CMD_ACC); 7090 7091 /* Decrement the ndlp reference count from previous mbox command */ 7092 lpfc_nlp_put(ndlp); 7093 7094 if (!elsiocb) 7095 goto free_lcb_context; 7096 7097 lcb_res = (struct fc_lcb_res_frame *) 7098 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7099 7100 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7101 icmd = &elsiocb->iocb; 7102 icmd->ulpContext = lcb_context->rx_id; 7103 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7104 7105 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7106 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7107 lcb_res->lcb_sub_command = lcb_context->sub_command; 7108 lcb_res->lcb_type = lcb_context->type; 7109 lcb_res->capability = lcb_context->capability; 7110 lcb_res->lcb_frequency = lcb_context->frequency; 7111 lcb_res->lcb_duration = lcb_context->duration; 7112 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7113 phba->fc_stat.elsXmitACC++; 7114 7115 elsiocb->context1 = lpfc_nlp_get(ndlp); 7116 if (!elsiocb->context1) { 7117 lpfc_els_free_iocb(phba, elsiocb); 7118 goto out; 7119 } 7120 7121 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7122 if (rc == IOCB_ERROR) { 7123 lpfc_els_free_iocb(phba, elsiocb); 7124 lpfc_nlp_put(ndlp); 7125 } 7126 out: 7127 kfree(lcb_context); 7128 return; 7129 7130 error: 7131 cmdsize = sizeof(struct fc_lcb_res_frame); 7132 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7133 lpfc_max_els_tries, ndlp, 7134 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7135 lpfc_nlp_put(ndlp); 7136 if (!elsiocb) 7137 goto free_lcb_context; 7138 7139 icmd = &elsiocb->iocb; 7140 icmd->ulpContext = lcb_context->rx_id; 7141 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7142 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7143 7144 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7145 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7146 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7147 7148 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7149 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7150 7151 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7152 phba->fc_stat.elsXmitLSRJT++; 7153 elsiocb->context1 = lpfc_nlp_get(ndlp); 7154 if (!elsiocb->context1) { 7155 lpfc_els_free_iocb(phba, elsiocb); 7156 goto free_lcb_context; 7157 } 7158 7159 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7160 if (rc == IOCB_ERROR) { 7161 lpfc_els_free_iocb(phba, elsiocb); 7162 lpfc_nlp_put(ndlp); 7163 } 7164 free_lcb_context: 7165 kfree(lcb_context); 7166 } 7167 7168 static int 7169 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7170 struct lpfc_lcb_context *lcb_context, 7171 uint32_t beacon_state) 7172 { 7173 struct lpfc_hba *phba = vport->phba; 7174 union lpfc_sli4_cfg_shdr *cfg_shdr; 7175 LPFC_MBOXQ_t *mbox = NULL; 7176 uint32_t len; 7177 int rc; 7178 7179 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7180 if (!mbox) 7181 return 1; 7182 7183 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7184 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7185 sizeof(struct lpfc_sli4_cfg_mhdr); 7186 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7187 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7188 LPFC_SLI4_MBX_EMBED); 7189 mbox->ctx_ndlp = (void *)lcb_context; 7190 mbox->vport = phba->pport; 7191 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7192 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7193 phba->sli4_hba.physical_port); 7194 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7195 beacon_state); 7196 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7197 7198 /* 7199 * Check bv1s bit before issuing the mailbox 7200 * if bv1s == 1, LCB V1 supported 7201 * else, LCB V0 supported 7202 */ 7203 7204 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7205 /* COMMON_SET_BEACON_CONFIG_V1 */ 7206 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7207 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7208 bf_set(lpfc_mbx_set_beacon_port_type, 7209 &mbox->u.mqe.un.beacon_config, 0); 7210 bf_set(lpfc_mbx_set_beacon_duration_v1, 7211 &mbox->u.mqe.un.beacon_config, 7212 be16_to_cpu(lcb_context->duration)); 7213 } else { 7214 /* COMMON_SET_BEACON_CONFIG_V0 */ 7215 if (be16_to_cpu(lcb_context->duration) != 0) { 7216 mempool_free(mbox, phba->mbox_mem_pool); 7217 return 1; 7218 } 7219 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7220 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7221 bf_set(lpfc_mbx_set_beacon_state, 7222 &mbox->u.mqe.un.beacon_config, beacon_state); 7223 bf_set(lpfc_mbx_set_beacon_port_type, 7224 &mbox->u.mqe.un.beacon_config, 1); 7225 bf_set(lpfc_mbx_set_beacon_duration, 7226 &mbox->u.mqe.un.beacon_config, 7227 be16_to_cpu(lcb_context->duration)); 7228 } 7229 7230 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7231 if (rc == MBX_NOT_FINISHED) { 7232 mempool_free(mbox, phba->mbox_mem_pool); 7233 return 1; 7234 } 7235 7236 return 0; 7237 } 7238 7239 7240 /** 7241 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7242 * @vport: pointer to a host virtual N_Port data structure. 7243 * @cmdiocb: pointer to lpfc command iocb data structure. 7244 * @ndlp: pointer to a node-list data structure. 7245 * 7246 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7247 * First, the payload of the unsolicited LCB is checked. 7248 * Then based on Subcommand beacon will either turn on or off. 7249 * 7250 * Return code 7251 * 0 - Sent the acc response 7252 * 1 - Sent the reject response. 7253 **/ 7254 static int 7255 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7256 struct lpfc_nodelist *ndlp) 7257 { 7258 struct lpfc_hba *phba = vport->phba; 7259 struct lpfc_dmabuf *pcmd; 7260 uint8_t *lp; 7261 struct fc_lcb_request_frame *beacon; 7262 struct lpfc_lcb_context *lcb_context; 7263 u8 state, rjt_err = 0; 7264 struct ls_rjt stat; 7265 7266 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 7267 lp = (uint8_t *)pcmd->virt; 7268 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7269 7270 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7271 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7272 "type x%x frequency %x duration x%x\n", 7273 lp[0], lp[1], lp[2], 7274 beacon->lcb_command, 7275 beacon->lcb_sub_command, 7276 beacon->lcb_type, 7277 beacon->lcb_frequency, 7278 be16_to_cpu(beacon->lcb_duration)); 7279 7280 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7281 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7282 rjt_err = LSRJT_CMD_UNSUPPORTED; 7283 goto rjt; 7284 } 7285 7286 if (phba->sli_rev < LPFC_SLI_REV4 || 7287 phba->hba_flag & HBA_FCOE_MODE || 7288 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7289 LPFC_SLI_INTF_IF_TYPE_2)) { 7290 rjt_err = LSRJT_CMD_UNSUPPORTED; 7291 goto rjt; 7292 } 7293 7294 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7295 if (!lcb_context) { 7296 rjt_err = LSRJT_UNABLE_TPC; 7297 goto rjt; 7298 } 7299 7300 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7301 lcb_context->sub_command = beacon->lcb_sub_command; 7302 lcb_context->capability = 0; 7303 lcb_context->type = beacon->lcb_type; 7304 lcb_context->frequency = beacon->lcb_frequency; 7305 lcb_context->duration = beacon->lcb_duration; 7306 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7307 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 7308 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7309 if (!lcb_context->ndlp) { 7310 rjt_err = LSRJT_UNABLE_TPC; 7311 goto rjt_free; 7312 } 7313 7314 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7315 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7316 "0193 failed to send mail box"); 7317 lpfc_nlp_put(ndlp); 7318 rjt_err = LSRJT_UNABLE_TPC; 7319 goto rjt_free; 7320 } 7321 return 0; 7322 7323 rjt_free: 7324 kfree(lcb_context); 7325 rjt: 7326 memset(&stat, 0, sizeof(stat)); 7327 stat.un.b.lsRjtRsnCode = rjt_err; 7328 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7329 return 1; 7330 } 7331 7332 7333 /** 7334 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7335 * @vport: pointer to a host virtual N_Port data structure. 7336 * 7337 * This routine cleans up any Registration State Change Notification 7338 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7339 * @vport together with the host_lock is used to prevent multiple thread 7340 * trying to access the RSCN array on a same @vport at the same time. 7341 **/ 7342 void 7343 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7344 { 7345 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7346 struct lpfc_hba *phba = vport->phba; 7347 int i; 7348 7349 spin_lock_irq(shost->host_lock); 7350 if (vport->fc_rscn_flush) { 7351 /* Another thread is walking fc_rscn_id_list on this vport */ 7352 spin_unlock_irq(shost->host_lock); 7353 return; 7354 } 7355 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7356 vport->fc_rscn_flush = 1; 7357 spin_unlock_irq(shost->host_lock); 7358 7359 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7360 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7361 vport->fc_rscn_id_list[i] = NULL; 7362 } 7363 spin_lock_irq(shost->host_lock); 7364 vport->fc_rscn_id_cnt = 0; 7365 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7366 spin_unlock_irq(shost->host_lock); 7367 lpfc_can_disctmo(vport); 7368 /* Indicate we are done walking this fc_rscn_id_list */ 7369 vport->fc_rscn_flush = 0; 7370 } 7371 7372 /** 7373 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7374 * @vport: pointer to a host virtual N_Port data structure. 7375 * @did: remote destination port identifier. 7376 * 7377 * This routine checks whether there is any pending Registration State 7378 * Configuration Notification (RSCN) to a @did on @vport. 7379 * 7380 * Return code 7381 * None zero - The @did matched with a pending rscn 7382 * 0 - not able to match @did with a pending rscn 7383 **/ 7384 int 7385 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7386 { 7387 D_ID ns_did; 7388 D_ID rscn_did; 7389 uint32_t *lp; 7390 uint32_t payload_len, i; 7391 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7392 7393 ns_did.un.word = did; 7394 7395 /* Never match fabric nodes for RSCNs */ 7396 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7397 return 0; 7398 7399 /* If we are doing a FULL RSCN rediscovery, match everything */ 7400 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7401 return did; 7402 7403 spin_lock_irq(shost->host_lock); 7404 if (vport->fc_rscn_flush) { 7405 /* Another thread is walking fc_rscn_id_list on this vport */ 7406 spin_unlock_irq(shost->host_lock); 7407 return 0; 7408 } 7409 /* Indicate we are walking fc_rscn_id_list on this vport */ 7410 vport->fc_rscn_flush = 1; 7411 spin_unlock_irq(shost->host_lock); 7412 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7413 lp = vport->fc_rscn_id_list[i]->virt; 7414 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7415 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7416 while (payload_len) { 7417 rscn_did.un.word = be32_to_cpu(*lp++); 7418 payload_len -= sizeof(uint32_t); 7419 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7420 case RSCN_ADDRESS_FORMAT_PORT: 7421 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7422 && (ns_did.un.b.area == rscn_did.un.b.area) 7423 && (ns_did.un.b.id == rscn_did.un.b.id)) 7424 goto return_did_out; 7425 break; 7426 case RSCN_ADDRESS_FORMAT_AREA: 7427 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7428 && (ns_did.un.b.area == rscn_did.un.b.area)) 7429 goto return_did_out; 7430 break; 7431 case RSCN_ADDRESS_FORMAT_DOMAIN: 7432 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7433 goto return_did_out; 7434 break; 7435 case RSCN_ADDRESS_FORMAT_FABRIC: 7436 goto return_did_out; 7437 } 7438 } 7439 } 7440 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7441 vport->fc_rscn_flush = 0; 7442 return 0; 7443 return_did_out: 7444 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7445 vport->fc_rscn_flush = 0; 7446 return did; 7447 } 7448 7449 /** 7450 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7451 * @vport: pointer to a host virtual N_Port data structure. 7452 * 7453 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7454 * state machine for a @vport's nodes that are with pending RSCN (Registration 7455 * State Change Notification). 7456 * 7457 * Return code 7458 * 0 - Successful (currently alway return 0) 7459 **/ 7460 static int 7461 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7462 { 7463 struct lpfc_nodelist *ndlp = NULL; 7464 7465 /* Move all affected nodes by pending RSCNs to NPR state. */ 7466 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 7467 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7468 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7469 continue; 7470 7471 /* NVME Target mode does not do RSCN Recovery. */ 7472 if (vport->phba->nvmet_support) 7473 continue; 7474 7475 /* If we are in the process of doing discovery on this 7476 * NPort, let it continue on its own. 7477 */ 7478 switch (ndlp->nlp_state) { 7479 case NLP_STE_PLOGI_ISSUE: 7480 case NLP_STE_ADISC_ISSUE: 7481 case NLP_STE_REG_LOGIN_ISSUE: 7482 case NLP_STE_PRLI_ISSUE: 7483 case NLP_STE_LOGO_ISSUE: 7484 continue; 7485 } 7486 7487 lpfc_disc_state_machine(vport, ndlp, NULL, 7488 NLP_EVT_DEVICE_RECOVERY); 7489 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7490 } 7491 return 0; 7492 } 7493 7494 /** 7495 * lpfc_send_rscn_event - Send an RSCN event to management application 7496 * @vport: pointer to a host virtual N_Port data structure. 7497 * @cmdiocb: pointer to lpfc command iocb data structure. 7498 * 7499 * lpfc_send_rscn_event sends an RSCN netlink event to management 7500 * applications. 7501 */ 7502 static void 7503 lpfc_send_rscn_event(struct lpfc_vport *vport, 7504 struct lpfc_iocbq *cmdiocb) 7505 { 7506 struct lpfc_dmabuf *pcmd; 7507 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7508 uint32_t *payload_ptr; 7509 uint32_t payload_len; 7510 struct lpfc_rscn_event_header *rscn_event_data; 7511 7512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7513 payload_ptr = (uint32_t *) pcmd->virt; 7514 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7515 7516 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7517 payload_len, GFP_KERNEL); 7518 if (!rscn_event_data) { 7519 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7520 "0147 Failed to allocate memory for RSCN event\n"); 7521 return; 7522 } 7523 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7524 rscn_event_data->payload_length = payload_len; 7525 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7526 payload_len); 7527 7528 fc_host_post_vendor_event(shost, 7529 fc_get_event_number(), 7530 sizeof(struct lpfc_rscn_event_header) + payload_len, 7531 (char *)rscn_event_data, 7532 LPFC_NL_VENDOR_ID); 7533 7534 kfree(rscn_event_data); 7535 } 7536 7537 /** 7538 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7539 * @vport: pointer to a host virtual N_Port data structure. 7540 * @cmdiocb: pointer to lpfc command iocb data structure. 7541 * @ndlp: pointer to a node-list data structure. 7542 * 7543 * This routine processes an unsolicited RSCN (Registration State Change 7544 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7545 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7546 * discover state machine is about to begin discovery, it just accepts the 7547 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7548 * contains N_Port IDs for other vports on this HBA, it just accepts the 7549 * RSCN and ignore processing it. If the state machine is in the recovery 7550 * state, the fc_rscn_id_list of this @vport is walked and the 7551 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7552 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7553 * routine is invoked to handle the RSCN event. 7554 * 7555 * Return code 7556 * 0 - Just sent the acc response 7557 * 1 - Sent the acc response and waited for name server completion 7558 **/ 7559 static int 7560 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7561 struct lpfc_nodelist *ndlp) 7562 { 7563 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7564 struct lpfc_hba *phba = vport->phba; 7565 struct lpfc_dmabuf *pcmd; 7566 uint32_t *lp, *datap; 7567 uint32_t payload_len, length, nportid, *cmd; 7568 int rscn_cnt; 7569 int rscn_id = 0, hba_id = 0; 7570 int i, tmo; 7571 7572 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7573 lp = (uint32_t *) pcmd->virt; 7574 7575 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7576 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7577 /* RSCN received */ 7578 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7579 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7580 vport->fc_flag, payload_len, *lp, 7581 vport->fc_rscn_id_cnt); 7582 7583 /* Send an RSCN event to the management application */ 7584 lpfc_send_rscn_event(vport, cmdiocb); 7585 7586 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7587 fc_host_post_event(shost, fc_get_event_number(), 7588 FCH_EVT_RSCN, lp[i]); 7589 7590 /* Check if RSCN is coming from a direct-connected remote NPort */ 7591 if (vport->fc_flag & FC_PT2PT) { 7592 /* If so, just ACC it, no other action needed for now */ 7593 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7594 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7595 *lp, vport->fc_flag, payload_len); 7596 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7597 7598 /* Check to see if we need to NVME rescan this target 7599 * remoteport. 7600 */ 7601 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7602 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7603 lpfc_nvme_rescan_port(vport, ndlp); 7604 return 0; 7605 } 7606 7607 /* If we are about to begin discovery, just ACC the RSCN. 7608 * Discovery processing will satisfy it. 7609 */ 7610 if (vport->port_state <= LPFC_NS_QRY) { 7611 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7612 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7613 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7614 7615 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7616 return 0; 7617 } 7618 7619 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7620 * just ACC and ignore it. 7621 */ 7622 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7623 !(vport->cfg_peer_port_login)) { 7624 i = payload_len; 7625 datap = lp; 7626 while (i > 0) { 7627 nportid = *datap++; 7628 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7629 i -= sizeof(uint32_t); 7630 rscn_id++; 7631 if (lpfc_find_vport_by_did(phba, nportid)) 7632 hba_id++; 7633 } 7634 if (rscn_id == hba_id) { 7635 /* ALL NPortIDs in RSCN are on HBA */ 7636 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7637 "0219 Ignore RSCN " 7638 "Data: x%x x%x x%x x%x\n", 7639 vport->fc_flag, payload_len, 7640 *lp, vport->fc_rscn_id_cnt); 7641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7642 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7643 ndlp->nlp_DID, vport->port_state, 7644 ndlp->nlp_flag); 7645 7646 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7647 ndlp, NULL); 7648 return 0; 7649 } 7650 } 7651 7652 spin_lock_irq(shost->host_lock); 7653 if (vport->fc_rscn_flush) { 7654 /* Another thread is walking fc_rscn_id_list on this vport */ 7655 vport->fc_flag |= FC_RSCN_DISCOVERY; 7656 spin_unlock_irq(shost->host_lock); 7657 /* Send back ACC */ 7658 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7659 return 0; 7660 } 7661 /* Indicate we are walking fc_rscn_id_list on this vport */ 7662 vport->fc_rscn_flush = 1; 7663 spin_unlock_irq(shost->host_lock); 7664 /* Get the array count after successfully have the token */ 7665 rscn_cnt = vport->fc_rscn_id_cnt; 7666 /* If we are already processing an RSCN, save the received 7667 * RSCN payload buffer, cmdiocb->context2 to process later. 7668 */ 7669 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7670 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7671 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7672 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7673 7674 spin_lock_irq(shost->host_lock); 7675 vport->fc_flag |= FC_RSCN_DEFERRED; 7676 7677 /* Restart disctmo if its already running */ 7678 if (vport->fc_flag & FC_DISC_TMO) { 7679 tmo = ((phba->fc_ratov * 3) + 3); 7680 mod_timer(&vport->fc_disctmo, 7681 jiffies + msecs_to_jiffies(1000 * tmo)); 7682 } 7683 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7684 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7685 vport->fc_flag |= FC_RSCN_MODE; 7686 spin_unlock_irq(shost->host_lock); 7687 if (rscn_cnt) { 7688 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7689 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7690 } 7691 if ((rscn_cnt) && 7692 (payload_len + length <= LPFC_BPL_SIZE)) { 7693 *cmd &= ELS_CMD_MASK; 7694 *cmd |= cpu_to_be32(payload_len + length); 7695 memcpy(((uint8_t *)cmd) + length, lp, 7696 payload_len); 7697 } else { 7698 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7699 vport->fc_rscn_id_cnt++; 7700 /* If we zero, cmdiocb->context2, the calling 7701 * routine will not try to free it. 7702 */ 7703 cmdiocb->context2 = NULL; 7704 } 7705 /* Deferred RSCN */ 7706 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7707 "0235 Deferred RSCN " 7708 "Data: x%x x%x x%x\n", 7709 vport->fc_rscn_id_cnt, vport->fc_flag, 7710 vport->port_state); 7711 } else { 7712 vport->fc_flag |= FC_RSCN_DISCOVERY; 7713 spin_unlock_irq(shost->host_lock); 7714 /* ReDiscovery RSCN */ 7715 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7716 "0234 ReDiscovery RSCN " 7717 "Data: x%x x%x x%x\n", 7718 vport->fc_rscn_id_cnt, vport->fc_flag, 7719 vport->port_state); 7720 } 7721 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7722 vport->fc_rscn_flush = 0; 7723 /* Send back ACC */ 7724 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7725 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7726 lpfc_rscn_recovery_check(vport); 7727 return 0; 7728 } 7729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7730 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7731 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7732 7733 spin_lock_irq(shost->host_lock); 7734 vport->fc_flag |= FC_RSCN_MODE; 7735 spin_unlock_irq(shost->host_lock); 7736 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7737 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7738 vport->fc_rscn_flush = 0; 7739 /* 7740 * If we zero, cmdiocb->context2, the calling routine will 7741 * not try to free it. 7742 */ 7743 cmdiocb->context2 = NULL; 7744 lpfc_set_disctmo(vport); 7745 /* Send back ACC */ 7746 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7747 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7748 lpfc_rscn_recovery_check(vport); 7749 return lpfc_els_handle_rscn(vport); 7750 } 7751 7752 /** 7753 * lpfc_els_handle_rscn - Handle rscn for a vport 7754 * @vport: pointer to a host virtual N_Port data structure. 7755 * 7756 * This routine handles the Registration State Configuration Notification 7757 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7758 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7759 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7760 * NameServer shall be issued. If CT command to the NameServer fails to be 7761 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7762 * RSCN activities with the @vport. 7763 * 7764 * Return code 7765 * 0 - Cleaned up rscn on the @vport 7766 * 1 - Wait for plogi to name server before proceed 7767 **/ 7768 int 7769 lpfc_els_handle_rscn(struct lpfc_vport *vport) 7770 { 7771 struct lpfc_nodelist *ndlp; 7772 struct lpfc_hba *phba = vport->phba; 7773 7774 /* Ignore RSCN if the port is being torn down. */ 7775 if (vport->load_flag & FC_UNLOADING) { 7776 lpfc_els_flush_rscn(vport); 7777 return 0; 7778 } 7779 7780 /* Start timer for RSCN processing */ 7781 lpfc_set_disctmo(vport); 7782 7783 /* RSCN processed */ 7784 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7785 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 7786 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 7787 vport->port_state, vport->num_disc_nodes, 7788 vport->gidft_inp); 7789 7790 /* To process RSCN, first compare RSCN data with NameServer */ 7791 vport->fc_ns_retry = 0; 7792 vport->num_disc_nodes = 0; 7793 7794 ndlp = lpfc_findnode_did(vport, NameServer_DID); 7795 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 7796 /* Good ndlp, issue CT Request to NameServer. Need to 7797 * know how many gidfts were issued. If none, then just 7798 * flush the RSCN. Otherwise, the outstanding requests 7799 * need to complete. 7800 */ 7801 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7802 if (lpfc_issue_gidft(vport) > 0) 7803 return 1; 7804 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7805 if (lpfc_issue_gidpt(vport) > 0) 7806 return 1; 7807 } else { 7808 return 1; 7809 } 7810 } else { 7811 /* Nameserver login in question. Revalidate. */ 7812 if (ndlp) { 7813 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7815 } else { 7816 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7817 if (!ndlp) { 7818 lpfc_els_flush_rscn(vport); 7819 return 0; 7820 } 7821 ndlp->nlp_prev_state = ndlp->nlp_state; 7822 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7823 } 7824 ndlp->nlp_type |= NLP_FABRIC; 7825 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7826 /* Wait for NameServer login cmpl before we can 7827 * continue 7828 */ 7829 return 1; 7830 } 7831 7832 lpfc_els_flush_rscn(vport); 7833 return 0; 7834 } 7835 7836 /** 7837 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7838 * @vport: pointer to a host virtual N_Port data structure. 7839 * @cmdiocb: pointer to lpfc command iocb data structure. 7840 * @ndlp: pointer to a node-list data structure. 7841 * 7842 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7843 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7844 * point topology. As an unsolicited FLOGI should not be received in a loop 7845 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7846 * lpfc_check_sparm() routine is invoked to check the parameters in the 7847 * unsolicited FLOGI. If parameters validation failed, the routine 7848 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7849 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7850 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7851 * will initiate PLOGI. The higher lexicographical value party shall has 7852 * higher priority (as the winning port) and will initiate PLOGI and 7853 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7854 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7855 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7856 * 7857 * Return code 7858 * 0 - Successfully processed the unsolicited flogi 7859 * 1 - Failed to process the unsolicited flogi 7860 **/ 7861 static int 7862 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7863 struct lpfc_nodelist *ndlp) 7864 { 7865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7866 struct lpfc_hba *phba = vport->phba; 7867 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7868 uint32_t *lp = (uint32_t *) pcmd->virt; 7869 IOCB_t *icmd = &cmdiocb->iocb; 7870 struct serv_parm *sp; 7871 LPFC_MBOXQ_t *mbox; 7872 uint32_t cmd, did; 7873 int rc; 7874 uint32_t fc_flag = 0; 7875 uint32_t port_state = 0; 7876 7877 cmd = *lp++; 7878 sp = (struct serv_parm *) lp; 7879 7880 /* FLOGI received */ 7881 7882 lpfc_set_disctmo(vport); 7883 7884 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7885 /* We should never receive a FLOGI in loop mode, ignore it */ 7886 did = icmd->un.elsreq64.remoteID; 7887 7888 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7889 Loop Mode */ 7890 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7891 "0113 An FLOGI ELS command x%x was " 7892 "received from DID x%x in Loop Mode\n", 7893 cmd, did); 7894 return 1; 7895 } 7896 7897 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7898 7899 /* 7900 * If our portname is greater than the remote portname, 7901 * then we initiate Nport login. 7902 */ 7903 7904 rc = memcmp(&vport->fc_portname, &sp->portName, 7905 sizeof(struct lpfc_name)); 7906 7907 if (!rc) { 7908 if (phba->sli_rev < LPFC_SLI_REV4) { 7909 mbox = mempool_alloc(phba->mbox_mem_pool, 7910 GFP_KERNEL); 7911 if (!mbox) 7912 return 1; 7913 lpfc_linkdown(phba); 7914 lpfc_init_link(phba, mbox, 7915 phba->cfg_topology, 7916 phba->cfg_link_speed); 7917 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7918 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7919 mbox->vport = vport; 7920 rc = lpfc_sli_issue_mbox(phba, mbox, 7921 MBX_NOWAIT); 7922 lpfc_set_loopback_flag(phba); 7923 if (rc == MBX_NOT_FINISHED) 7924 mempool_free(mbox, phba->mbox_mem_pool); 7925 return 1; 7926 } 7927 7928 /* abort the flogi coming back to ourselves 7929 * due to external loopback on the port. 7930 */ 7931 lpfc_els_abort_flogi(phba); 7932 return 0; 7933 7934 } else if (rc > 0) { /* greater than */ 7935 spin_lock_irq(shost->host_lock); 7936 vport->fc_flag |= FC_PT2PT_PLOGI; 7937 spin_unlock_irq(shost->host_lock); 7938 7939 /* If we have the high WWPN we can assign our own 7940 * myDID; otherwise, we have to WAIT for a PLOGI 7941 * from the remote NPort to find out what it 7942 * will be. 7943 */ 7944 vport->fc_myDID = PT2PT_LocalID; 7945 } else { 7946 vport->fc_myDID = PT2PT_RemoteID; 7947 } 7948 7949 /* 7950 * The vport state should go to LPFC_FLOGI only 7951 * AFTER we issue a FLOGI, not receive one. 7952 */ 7953 spin_lock_irq(shost->host_lock); 7954 fc_flag = vport->fc_flag; 7955 port_state = vport->port_state; 7956 vport->fc_flag |= FC_PT2PT; 7957 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7958 7959 /* Acking an unsol FLOGI. Count 1 for link bounce 7960 * work-around. 7961 */ 7962 vport->rcv_flogi_cnt++; 7963 spin_unlock_irq(shost->host_lock); 7964 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7965 "3311 Rcv Flogi PS x%x new PS x%x " 7966 "fc_flag x%x new fc_flag x%x\n", 7967 port_state, vport->port_state, 7968 fc_flag, vport->fc_flag); 7969 7970 /* 7971 * We temporarily set fc_myDID to make it look like we are 7972 * a Fabric. This is done just so we end up with the right 7973 * did / sid on the FLOGI ACC rsp. 7974 */ 7975 did = vport->fc_myDID; 7976 vport->fc_myDID = Fabric_DID; 7977 7978 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7979 7980 /* Defer ACC response until AFTER we issue a FLOGI */ 7981 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7982 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7983 phba->defer_flogi_acc_ox_id = 7984 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7985 7986 vport->fc_myDID = did; 7987 7988 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7989 "3344 Deferring FLOGI ACC: rx_id: x%x," 7990 " ox_id: x%x, hba_flag x%x\n", 7991 phba->defer_flogi_acc_rx_id, 7992 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7993 7994 phba->defer_flogi_acc_flag = true; 7995 7996 return 0; 7997 } 7998 7999 /* Send back ACC */ 8000 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8001 8002 /* Now lets put fc_myDID back to what its supposed to be */ 8003 vport->fc_myDID = did; 8004 8005 return 0; 8006 } 8007 8008 /** 8009 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8010 * @vport: pointer to a host virtual N_Port data structure. 8011 * @cmdiocb: pointer to lpfc command iocb data structure. 8012 * @ndlp: pointer to a node-list data structure. 8013 * 8014 * This routine processes Request Node Identification Data (RNID) IOCB 8015 * received as an ELS unsolicited event. Only when the RNID specified format 8016 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8017 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8018 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8019 * rejected by invoking the lpfc_els_rsp_reject() routine. 8020 * 8021 * Return code 8022 * 0 - Successfully processed rnid iocb (currently always return 0) 8023 **/ 8024 static int 8025 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8026 struct lpfc_nodelist *ndlp) 8027 { 8028 struct lpfc_dmabuf *pcmd; 8029 uint32_t *lp; 8030 RNID *rn; 8031 struct ls_rjt stat; 8032 8033 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8034 lp = (uint32_t *) pcmd->virt; 8035 8036 lp++; 8037 rn = (RNID *) lp; 8038 8039 /* RNID received */ 8040 8041 switch (rn->Format) { 8042 case 0: 8043 case RNID_TOPOLOGY_DISC: 8044 /* Send back ACC */ 8045 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8046 break; 8047 default: 8048 /* Reject this request because format not supported */ 8049 stat.un.b.lsRjtRsvd0 = 0; 8050 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8051 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8052 stat.un.b.vendorUnique = 0; 8053 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8054 NULL); 8055 } 8056 return 0; 8057 } 8058 8059 /** 8060 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8061 * @vport: pointer to a host virtual N_Port data structure. 8062 * @cmdiocb: pointer to lpfc command iocb data structure. 8063 * @ndlp: pointer to a node-list data structure. 8064 * 8065 * Return code 8066 * 0 - Successfully processed echo iocb (currently always return 0) 8067 **/ 8068 static int 8069 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8070 struct lpfc_nodelist *ndlp) 8071 { 8072 uint8_t *pcmd; 8073 8074 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 8075 8076 /* skip over first word of echo command to find echo data */ 8077 pcmd += sizeof(uint32_t); 8078 8079 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8080 return 0; 8081 } 8082 8083 /** 8084 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8085 * @vport: pointer to a host virtual N_Port data structure. 8086 * @cmdiocb: pointer to lpfc command iocb data structure. 8087 * @ndlp: pointer to a node-list data structure. 8088 * 8089 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8090 * received as an ELS unsolicited event. Currently, this function just invokes 8091 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8092 * 8093 * Return code 8094 * 0 - Successfully processed lirr iocb (currently always return 0) 8095 **/ 8096 static int 8097 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8098 struct lpfc_nodelist *ndlp) 8099 { 8100 struct ls_rjt stat; 8101 8102 /* For now, unconditionally reject this command */ 8103 stat.un.b.lsRjtRsvd0 = 0; 8104 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8105 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8106 stat.un.b.vendorUnique = 0; 8107 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8108 return 0; 8109 } 8110 8111 /** 8112 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8113 * @vport: pointer to a host virtual N_Port data structure. 8114 * @cmdiocb: pointer to lpfc command iocb data structure. 8115 * @ndlp: pointer to a node-list data structure. 8116 * 8117 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8118 * received as an ELS unsolicited event. A request to RRQ shall only 8119 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8120 * Nx_Port N_Port_ID of the target Exchange is the same as the 8121 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8122 * not accepted, an LS_RJT with reason code "Unable to perform 8123 * command request" and reason code explanation "Invalid Originator 8124 * S_ID" shall be returned. For now, we just unconditionally accept 8125 * RRQ from the target. 8126 **/ 8127 static void 8128 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8129 struct lpfc_nodelist *ndlp) 8130 { 8131 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8132 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8133 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8134 } 8135 8136 /** 8137 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8138 * @phba: pointer to lpfc hba data structure. 8139 * @pmb: pointer to the driver internal queue element for mailbox command. 8140 * 8141 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8142 * mailbox command. This callback function is to actually send the Accept 8143 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8144 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8145 * mailbox command, constructs the RLS response with the link statistics 8146 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8147 * response to the RLS. 8148 * 8149 * Note that the ndlp reference count will be incremented by 1 for holding the 8150 * ndlp and the reference to ndlp will be stored into the context1 field of 8151 * the IOCB for the completion callback function to the RLS Accept Response 8152 * ELS IOCB command. 8153 * 8154 **/ 8155 static void 8156 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8157 { 8158 int rc = 0; 8159 MAILBOX_t *mb; 8160 IOCB_t *icmd; 8161 struct RLS_RSP *rls_rsp; 8162 uint8_t *pcmd; 8163 struct lpfc_iocbq *elsiocb; 8164 struct lpfc_nodelist *ndlp; 8165 uint16_t oxid; 8166 uint16_t rxid; 8167 uint32_t cmdsize; 8168 8169 mb = &pmb->u.mb; 8170 8171 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 8172 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8173 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8174 pmb->ctx_buf = NULL; 8175 pmb->ctx_ndlp = NULL; 8176 8177 if (mb->mbxStatus) { 8178 mempool_free(pmb, phba->mbox_mem_pool); 8179 return; 8180 } 8181 8182 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8183 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8184 lpfc_max_els_tries, ndlp, 8185 ndlp->nlp_DID, ELS_CMD_ACC); 8186 8187 /* Decrement the ndlp reference count from previous mbox command */ 8188 lpfc_nlp_put(ndlp); 8189 8190 if (!elsiocb) { 8191 mempool_free(pmb, phba->mbox_mem_pool); 8192 return; 8193 } 8194 8195 icmd = &elsiocb->iocb; 8196 icmd->ulpContext = rxid; 8197 icmd->unsli3.rcvsli3.ox_id = oxid; 8198 8199 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8200 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8201 pcmd += sizeof(uint32_t); /* Skip past command */ 8202 rls_rsp = (struct RLS_RSP *)pcmd; 8203 8204 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8205 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8206 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8207 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8208 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8209 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8210 mempool_free(pmb, phba->mbox_mem_pool); 8211 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8212 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8213 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8214 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8215 elsiocb->iotag, elsiocb->iocb.ulpContext, 8216 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8217 ndlp->nlp_rpi); 8218 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8219 phba->fc_stat.elsXmitACC++; 8220 elsiocb->context1 = lpfc_nlp_get(ndlp); 8221 if (!elsiocb->context1) { 8222 lpfc_els_free_iocb(phba, elsiocb); 8223 return; 8224 } 8225 8226 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8227 if (rc == IOCB_ERROR) { 8228 lpfc_els_free_iocb(phba, elsiocb); 8229 lpfc_nlp_put(ndlp); 8230 } 8231 return; 8232 } 8233 8234 /** 8235 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8236 * @vport: pointer to a host virtual N_Port data structure. 8237 * @cmdiocb: pointer to lpfc command iocb data structure. 8238 * @ndlp: pointer to a node-list data structure. 8239 * 8240 * This routine processes Read Link Status (RLS) IOCB received as an 8241 * ELS unsolicited event. It first checks the remote port state. If the 8242 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8243 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8244 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8245 * for reading the HBA link statistics. It is for the callback function, 8246 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8247 * to actually sending out RPL Accept (ACC) response. 8248 * 8249 * Return codes 8250 * 0 - Successfully processed rls iocb (currently always return 0) 8251 **/ 8252 static int 8253 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8254 struct lpfc_nodelist *ndlp) 8255 { 8256 struct lpfc_hba *phba = vport->phba; 8257 LPFC_MBOXQ_t *mbox; 8258 struct ls_rjt stat; 8259 8260 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8261 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8262 /* reject the unsolicited RLS request and done with it */ 8263 goto reject_out; 8264 8265 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8266 if (mbox) { 8267 lpfc_read_lnk_stat(phba, mbox); 8268 mbox->ctx_buf = (void *)((unsigned long) 8269 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 8270 cmdiocb->iocb.ulpContext)); /* rx_id */ 8271 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8272 if (!mbox->ctx_ndlp) 8273 goto node_err; 8274 mbox->vport = vport; 8275 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8276 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8277 != MBX_NOT_FINISHED) 8278 /* Mbox completion will send ELS Response */ 8279 return 0; 8280 /* Decrement reference count used for the failed mbox 8281 * command. 8282 */ 8283 lpfc_nlp_put(ndlp); 8284 node_err: 8285 mempool_free(mbox, phba->mbox_mem_pool); 8286 } 8287 reject_out: 8288 /* issue rejection response */ 8289 stat.un.b.lsRjtRsvd0 = 0; 8290 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8291 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8292 stat.un.b.vendorUnique = 0; 8293 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8294 return 0; 8295 } 8296 8297 /** 8298 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8299 * @vport: pointer to a host virtual N_Port data structure. 8300 * @cmdiocb: pointer to lpfc command iocb data structure. 8301 * @ndlp: pointer to a node-list data structure. 8302 * 8303 * This routine processes Read Timout Value (RTV) IOCB received as an 8304 * ELS unsolicited event. It first checks the remote port state. If the 8305 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8306 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8307 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8308 * Value (RTV) unsolicited IOCB event. 8309 * 8310 * Note that the ndlp reference count will be incremented by 1 for holding the 8311 * ndlp and the reference to ndlp will be stored into the context1 field of 8312 * the IOCB for the completion callback function to the RTV Accept Response 8313 * ELS IOCB command. 8314 * 8315 * Return codes 8316 * 0 - Successfully processed rtv iocb (currently always return 0) 8317 **/ 8318 static int 8319 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8320 struct lpfc_nodelist *ndlp) 8321 { 8322 int rc = 0; 8323 struct lpfc_hba *phba = vport->phba; 8324 struct ls_rjt stat; 8325 struct RTV_RSP *rtv_rsp; 8326 uint8_t *pcmd; 8327 struct lpfc_iocbq *elsiocb; 8328 uint32_t cmdsize; 8329 8330 8331 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8332 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8333 /* reject the unsolicited RTV request and done with it */ 8334 goto reject_out; 8335 8336 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8337 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8338 lpfc_max_els_tries, ndlp, 8339 ndlp->nlp_DID, ELS_CMD_ACC); 8340 8341 if (!elsiocb) 8342 return 1; 8343 8344 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8345 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8346 pcmd += sizeof(uint32_t); /* Skip past command */ 8347 8348 /* use the command's xri in the response */ 8349 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 8350 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 8351 8352 rtv_rsp = (struct RTV_RSP *)pcmd; 8353 8354 /* populate RTV payload */ 8355 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8356 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8357 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8358 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8359 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8360 8361 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8362 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8363 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8364 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8365 "Data: x%x x%x x%x\n", 8366 elsiocb->iotag, elsiocb->iocb.ulpContext, 8367 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8368 ndlp->nlp_rpi, 8369 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8370 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8371 phba->fc_stat.elsXmitACC++; 8372 elsiocb->context1 = lpfc_nlp_get(ndlp); 8373 if (!elsiocb->context1) { 8374 lpfc_els_free_iocb(phba, elsiocb); 8375 return 0; 8376 } 8377 8378 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8379 if (rc == IOCB_ERROR) { 8380 lpfc_els_free_iocb(phba, elsiocb); 8381 lpfc_nlp_put(ndlp); 8382 } 8383 return 0; 8384 8385 reject_out: 8386 /* issue rejection response */ 8387 stat.un.b.lsRjtRsvd0 = 0; 8388 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8389 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8390 stat.un.b.vendorUnique = 0; 8391 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8392 return 0; 8393 } 8394 8395 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8396 * @vport: pointer to a host virtual N_Port data structure. 8397 * @ndlp: pointer to a node-list data structure. 8398 * @did: DID of the target. 8399 * @rrq: Pointer to the rrq struct. 8400 * 8401 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8402 * Successful the the completion handler will clear the RRQ. 8403 * 8404 * Return codes 8405 * 0 - Successfully sent rrq els iocb. 8406 * 1 - Failed to send rrq els iocb. 8407 **/ 8408 static int 8409 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8410 uint32_t did, struct lpfc_node_rrq *rrq) 8411 { 8412 struct lpfc_hba *phba = vport->phba; 8413 struct RRQ *els_rrq; 8414 struct lpfc_iocbq *elsiocb; 8415 uint8_t *pcmd; 8416 uint16_t cmdsize; 8417 int ret; 8418 8419 if (!ndlp) 8420 return 1; 8421 8422 /* If ndlp is not NULL, we will bump the reference count on it */ 8423 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8424 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8425 ELS_CMD_RRQ); 8426 if (!elsiocb) 8427 return 1; 8428 8429 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8430 8431 /* For RRQ request, remainder of payload is Exchange IDs */ 8432 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8433 pcmd += sizeof(uint32_t); 8434 els_rrq = (struct RRQ *) pcmd; 8435 8436 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8437 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8438 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8439 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8440 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8441 8442 8443 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8444 "Issue RRQ: did:x%x", 8445 did, rrq->xritag, rrq->rxid); 8446 elsiocb->context_un.rrq = rrq; 8447 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 8448 8449 lpfc_nlp_get(ndlp); 8450 elsiocb->context1 = ndlp; 8451 8452 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8453 if (ret == IOCB_ERROR) 8454 goto io_err; 8455 return 0; 8456 8457 io_err: 8458 lpfc_els_free_iocb(phba, elsiocb); 8459 lpfc_nlp_put(ndlp); 8460 return 1; 8461 } 8462 8463 /** 8464 * lpfc_send_rrq - Sends ELS RRQ if needed. 8465 * @phba: pointer to lpfc hba data structure. 8466 * @rrq: pointer to the active rrq. 8467 * 8468 * This routine will call the lpfc_issue_els_rrq if the rrq is 8469 * still active for the xri. If this function returns a failure then 8470 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8471 * 8472 * Returns 0 Success. 8473 * 1 Failure. 8474 **/ 8475 int 8476 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8477 { 8478 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8479 rrq->nlp_DID); 8480 if (!ndlp) 8481 return 1; 8482 8483 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8484 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8485 rrq->nlp_DID, rrq); 8486 else 8487 return 1; 8488 } 8489 8490 /** 8491 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8492 * @vport: pointer to a host virtual N_Port data structure. 8493 * @cmdsize: size of the ELS command. 8494 * @oldiocb: pointer to the original lpfc command iocb data structure. 8495 * @ndlp: pointer to a node-list data structure. 8496 * 8497 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8498 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8499 * 8500 * Note that the ndlp reference count will be incremented by 1 for holding the 8501 * ndlp and the reference to ndlp will be stored into the context1 field of 8502 * the IOCB for the completion callback function to the RPL Accept Response 8503 * ELS command. 8504 * 8505 * Return code 8506 * 0 - Successfully issued ACC RPL ELS command 8507 * 1 - Failed to issue ACC RPL ELS command 8508 **/ 8509 static int 8510 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8511 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8512 { 8513 int rc = 0; 8514 struct lpfc_hba *phba = vport->phba; 8515 IOCB_t *icmd, *oldcmd; 8516 RPL_RSP rpl_rsp; 8517 struct lpfc_iocbq *elsiocb; 8518 uint8_t *pcmd; 8519 8520 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8521 ndlp->nlp_DID, ELS_CMD_ACC); 8522 8523 if (!elsiocb) 8524 return 1; 8525 8526 icmd = &elsiocb->iocb; 8527 oldcmd = &oldiocb->iocb; 8528 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 8529 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 8530 8531 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8532 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8533 pcmd += sizeof(uint16_t); 8534 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8535 pcmd += sizeof(uint16_t); 8536 8537 /* Setup the RPL ACC payload */ 8538 rpl_rsp.listLen = be32_to_cpu(1); 8539 rpl_rsp.index = 0; 8540 rpl_rsp.port_num_blk.portNum = 0; 8541 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8542 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8543 sizeof(struct lpfc_name)); 8544 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8545 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8546 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8547 "0120 Xmit ELS RPL ACC response tag x%x " 8548 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8549 "rpi x%x\n", 8550 elsiocb->iotag, elsiocb->iocb.ulpContext, 8551 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8552 ndlp->nlp_rpi); 8553 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8554 phba->fc_stat.elsXmitACC++; 8555 elsiocb->context1 = lpfc_nlp_get(ndlp); 8556 if (!elsiocb->context1) { 8557 lpfc_els_free_iocb(phba, elsiocb); 8558 return 1; 8559 } 8560 8561 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8562 if (rc == IOCB_ERROR) { 8563 lpfc_els_free_iocb(phba, elsiocb); 8564 lpfc_nlp_put(ndlp); 8565 return 1; 8566 } 8567 8568 return 0; 8569 } 8570 8571 /** 8572 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8573 * @vport: pointer to a host virtual N_Port data structure. 8574 * @cmdiocb: pointer to lpfc command iocb data structure. 8575 * @ndlp: pointer to a node-list data structure. 8576 * 8577 * This routine processes Read Port List (RPL) IOCB received as an ELS 8578 * unsolicited event. It first checks the remote port state. If the remote 8579 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8580 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8581 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8582 * to accept the RPL. 8583 * 8584 * Return code 8585 * 0 - Successfully processed rpl iocb (currently always return 0) 8586 **/ 8587 static int 8588 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8589 struct lpfc_nodelist *ndlp) 8590 { 8591 struct lpfc_dmabuf *pcmd; 8592 uint32_t *lp; 8593 uint32_t maxsize; 8594 uint16_t cmdsize; 8595 RPL *rpl; 8596 struct ls_rjt stat; 8597 8598 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8599 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8600 /* issue rejection response */ 8601 stat.un.b.lsRjtRsvd0 = 0; 8602 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8603 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8604 stat.un.b.vendorUnique = 0; 8605 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8606 NULL); 8607 /* rejected the unsolicited RPL request and done with it */ 8608 return 0; 8609 } 8610 8611 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8612 lp = (uint32_t *) pcmd->virt; 8613 rpl = (RPL *) (lp + 1); 8614 maxsize = be32_to_cpu(rpl->maxsize); 8615 8616 /* We support only one port */ 8617 if ((rpl->index == 0) && 8618 ((maxsize == 0) || 8619 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8620 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8621 } else { 8622 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8623 } 8624 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8625 8626 return 0; 8627 } 8628 8629 /** 8630 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8631 * @vport: pointer to a virtual N_Port data structure. 8632 * @cmdiocb: pointer to lpfc command iocb data structure. 8633 * @ndlp: pointer to a node-list data structure. 8634 * 8635 * This routine processes Fibre Channel Address Resolution Protocol 8636 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8637 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8638 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8639 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8640 * remote PortName is compared against the FC PortName stored in the @vport 8641 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8642 * compared against the FC NodeName stored in the @vport data structure. 8643 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8644 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8645 * invoked to send out FARP Response to the remote node. Before sending the 8646 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8647 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8648 * routine is invoked to log into the remote port first. 8649 * 8650 * Return code 8651 * 0 - Either the FARP Match Mode not supported or successfully processed 8652 **/ 8653 static int 8654 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8655 struct lpfc_nodelist *ndlp) 8656 { 8657 struct lpfc_dmabuf *pcmd; 8658 uint32_t *lp; 8659 IOCB_t *icmd; 8660 FARP *fp; 8661 uint32_t cnt, did; 8662 8663 icmd = &cmdiocb->iocb; 8664 did = icmd->un.elsreq64.remoteID; 8665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8666 lp = (uint32_t *) pcmd->virt; 8667 8668 lp++; 8669 fp = (FARP *) lp; 8670 /* FARP-REQ received from DID <did> */ 8671 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8672 "0601 FARP-REQ received from DID x%x\n", did); 8673 /* We will only support match on WWPN or WWNN */ 8674 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8675 return 0; 8676 } 8677 8678 cnt = 0; 8679 /* If this FARP command is searching for my portname */ 8680 if (fp->Mflags & FARP_MATCH_PORT) { 8681 if (memcmp(&fp->RportName, &vport->fc_portname, 8682 sizeof(struct lpfc_name)) == 0) 8683 cnt = 1; 8684 } 8685 8686 /* If this FARP command is searching for my nodename */ 8687 if (fp->Mflags & FARP_MATCH_NODE) { 8688 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8689 sizeof(struct lpfc_name)) == 0) 8690 cnt = 1; 8691 } 8692 8693 if (cnt) { 8694 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8695 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8696 /* Log back into the node before sending the FARP. */ 8697 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8698 ndlp->nlp_prev_state = ndlp->nlp_state; 8699 lpfc_nlp_set_state(vport, ndlp, 8700 NLP_STE_PLOGI_ISSUE); 8701 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8702 } 8703 8704 /* Send a FARP response to that node */ 8705 if (fp->Rflags & FARP_REQUEST_FARPR) 8706 lpfc_issue_els_farpr(vport, did, 0); 8707 } 8708 } 8709 return 0; 8710 } 8711 8712 /** 8713 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8714 * @vport: pointer to a host virtual N_Port data structure. 8715 * @cmdiocb: pointer to lpfc command iocb data structure. 8716 * @ndlp: pointer to a node-list data structure. 8717 * 8718 * This routine processes Fibre Channel Address Resolution Protocol 8719 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 8720 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 8721 * the FARP response request. 8722 * 8723 * Return code 8724 * 0 - Successfully processed FARPR IOCB (currently always return 0) 8725 **/ 8726 static int 8727 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8728 struct lpfc_nodelist *ndlp) 8729 { 8730 struct lpfc_dmabuf *pcmd; 8731 uint32_t *lp; 8732 IOCB_t *icmd; 8733 uint32_t did; 8734 8735 icmd = &cmdiocb->iocb; 8736 did = icmd->un.elsreq64.remoteID; 8737 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8738 lp = (uint32_t *) pcmd->virt; 8739 8740 lp++; 8741 /* FARP-RSP received from DID <did> */ 8742 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8743 "0600 FARP-RSP received from DID x%x\n", did); 8744 /* ACCEPT the Farp resp request */ 8745 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8746 8747 return 0; 8748 } 8749 8750 /** 8751 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 8752 * @vport: pointer to a host virtual N_Port data structure. 8753 * @cmdiocb: pointer to lpfc command iocb data structure. 8754 * @fan_ndlp: pointer to a node-list data structure. 8755 * 8756 * This routine processes a Fabric Address Notification (FAN) IOCB 8757 * command received as an ELS unsolicited event. The FAN ELS command will 8758 * only be processed on a physical port (i.e., the @vport represents the 8759 * physical port). The fabric NodeName and PortName from the FAN IOCB are 8760 * compared against those in the phba data structure. If any of those is 8761 * different, the lpfc_initial_flogi() routine is invoked to initialize 8762 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 8763 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 8764 * is invoked to register login to the fabric. 8765 * 8766 * Return code 8767 * 0 - Successfully processed fan iocb (currently always return 0). 8768 **/ 8769 static int 8770 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8771 struct lpfc_nodelist *fan_ndlp) 8772 { 8773 struct lpfc_hba *phba = vport->phba; 8774 uint32_t *lp; 8775 FAN *fp; 8776 8777 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 8778 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8779 fp = (FAN *) ++lp; 8780 /* FAN received; Fan does not have a reply sequence */ 8781 if ((vport == phba->pport) && 8782 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 8783 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 8784 sizeof(struct lpfc_name))) || 8785 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 8786 sizeof(struct lpfc_name)))) { 8787 /* This port has switched fabrics. FLOGI is required */ 8788 lpfc_issue_init_vfi(vport); 8789 } else { 8790 /* FAN verified - skip FLOGI */ 8791 vport->fc_myDID = vport->fc_prevDID; 8792 if (phba->sli_rev < LPFC_SLI_REV4) 8793 lpfc_issue_fabric_reglogin(vport); 8794 else { 8795 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8796 "3138 Need register VFI: (x%x/%x)\n", 8797 vport->fc_prevDID, vport->fc_myDID); 8798 lpfc_issue_reg_vfi(vport); 8799 } 8800 } 8801 } 8802 return 0; 8803 } 8804 8805 /** 8806 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 8807 * @vport: pointer to a host virtual N_Port data structure. 8808 * @cmdiocb: pointer to lpfc command iocb data structure. 8809 * @ndlp: pointer to a node-list data structure. 8810 * 8811 * Return code 8812 * 0 - Successfully processed echo iocb (currently always return 0) 8813 **/ 8814 static int 8815 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8816 struct lpfc_nodelist *ndlp) 8817 { 8818 struct lpfc_hba *phba = vport->phba; 8819 struct fc_els_edc *edc_req; 8820 struct fc_tlv_desc *tlv; 8821 uint8_t *payload; 8822 uint32_t *ptr, dtag; 8823 const char *dtag_nm; 8824 int desc_cnt = 0, bytes_remain; 8825 bool rcv_cap_desc = false; 8826 8827 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8828 8829 edc_req = (struct fc_els_edc *)payload; 8830 bytes_remain = be32_to_cpu(edc_req->desc_len); 8831 8832 ptr = (uint32_t *)payload; 8833 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 8834 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 8835 bytes_remain, be32_to_cpu(*ptr), 8836 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 8837 8838 /* No signal support unless there is a congestion descriptor */ 8839 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 8840 phba->cgn_sig_freq = 0; 8841 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 8842 8843 if (bytes_remain <= 0) 8844 goto out; 8845 8846 tlv = edc_req->desc; 8847 8848 /* 8849 * cycle through EDC diagnostic descriptors to find the 8850 * congestion signaling capability descriptor 8851 */ 8852 while (bytes_remain && !rcv_cap_desc) { 8853 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 8854 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8855 "6464 Truncated TLV hdr on " 8856 "Diagnostic descriptor[%d]\n", 8857 desc_cnt); 8858 goto out; 8859 } 8860 8861 dtag = be32_to_cpu(tlv->desc_tag); 8862 switch (dtag) { 8863 case ELS_DTAG_LNK_FAULT_CAP: 8864 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8865 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8866 sizeof(struct fc_diag_lnkflt_desc)) { 8867 lpfc_printf_log( 8868 phba, KERN_WARNING, LOG_CGN_MGMT, 8869 "6465 Truncated Link Fault Diagnostic " 8870 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8871 desc_cnt, bytes_remain, 8872 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8873 sizeof(struct fc_diag_cg_sig_desc)); 8874 goto out; 8875 } 8876 /* No action for Link Fault descriptor for now */ 8877 break; 8878 case ELS_DTAG_CG_SIGNAL_CAP: 8879 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8880 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8881 sizeof(struct fc_diag_cg_sig_desc)) { 8882 lpfc_printf_log( 8883 phba, KERN_WARNING, LOG_CGN_MGMT, 8884 "6466 Truncated cgn signal Diagnostic " 8885 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8886 desc_cnt, bytes_remain, 8887 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8888 sizeof(struct fc_diag_cg_sig_desc)); 8889 goto out; 8890 } 8891 8892 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 8893 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 8894 8895 /* We start negotiation with lpfc_fabric_cgn_frequency. 8896 * When we process the EDC, we will settle on the 8897 * higher frequency. 8898 */ 8899 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 8900 8901 lpfc_least_capable_settings( 8902 phba, (struct fc_diag_cg_sig_desc *)tlv); 8903 rcv_cap_desc = true; 8904 break; 8905 default: 8906 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8907 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8908 "6467 unknown Diagnostic " 8909 "Descriptor[%d]: tag x%x (%s)\n", 8910 desc_cnt, dtag, dtag_nm); 8911 } 8912 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8913 tlv = fc_tlv_next_desc(tlv); 8914 desc_cnt++; 8915 } 8916 out: 8917 /* Need to send back an ACC */ 8918 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 8919 8920 lpfc_config_cgn_signal(phba); 8921 return 0; 8922 } 8923 8924 /** 8925 * lpfc_els_timeout - Handler funciton to the els timer 8926 * @t: timer context used to obtain the vport. 8927 * 8928 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8929 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8930 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8931 * up the worker thread. It is for the worker thread to invoke the routine 8932 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8933 **/ 8934 void 8935 lpfc_els_timeout(struct timer_list *t) 8936 { 8937 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8938 struct lpfc_hba *phba = vport->phba; 8939 uint32_t tmo_posted; 8940 unsigned long iflag; 8941 8942 spin_lock_irqsave(&vport->work_port_lock, iflag); 8943 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8944 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8945 vport->work_port_events |= WORKER_ELS_TMO; 8946 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8947 8948 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8949 lpfc_worker_wake_up(phba); 8950 return; 8951 } 8952 8953 8954 /** 8955 * lpfc_els_timeout_handler - Process an els timeout event 8956 * @vport: pointer to a virtual N_Port data structure. 8957 * 8958 * This routine is the actual handler function that processes an ELS timeout 8959 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8960 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8961 * invoking the lpfc_sli_issue_abort_iotag() routine. 8962 **/ 8963 void 8964 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8965 { 8966 struct lpfc_hba *phba = vport->phba; 8967 struct lpfc_sli_ring *pring; 8968 struct lpfc_iocbq *tmp_iocb, *piocb; 8969 IOCB_t *cmd = NULL; 8970 struct lpfc_dmabuf *pcmd; 8971 uint32_t els_command = 0; 8972 uint32_t timeout; 8973 uint32_t remote_ID = 0xffffffff; 8974 LIST_HEAD(abort_list); 8975 8976 8977 timeout = (uint32_t)(phba->fc_ratov << 1); 8978 8979 pring = lpfc_phba_elsring(phba); 8980 if (unlikely(!pring)) 8981 return; 8982 8983 if (phba->pport->load_flag & FC_UNLOADING) 8984 return; 8985 8986 spin_lock_irq(&phba->hbalock); 8987 if (phba->sli_rev == LPFC_SLI_REV4) 8988 spin_lock(&pring->ring_lock); 8989 8990 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8991 cmd = &piocb->iocb; 8992 8993 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8994 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8995 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8996 continue; 8997 8998 if (piocb->vport != vport) 8999 continue; 9000 9001 pcmd = (struct lpfc_dmabuf *) piocb->context2; 9002 if (pcmd) 9003 els_command = *(uint32_t *) (pcmd->virt); 9004 9005 if (els_command == ELS_CMD_FARP || 9006 els_command == ELS_CMD_FARPR || 9007 els_command == ELS_CMD_FDISC) 9008 continue; 9009 9010 if (piocb->drvrTimeout > 0) { 9011 if (piocb->drvrTimeout >= timeout) 9012 piocb->drvrTimeout -= timeout; 9013 else 9014 piocb->drvrTimeout = 0; 9015 continue; 9016 } 9017 9018 remote_ID = 0xffffffff; 9019 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 9020 remote_ID = cmd->un.elsreq64.remoteID; 9021 else { 9022 struct lpfc_nodelist *ndlp; 9023 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 9024 if (ndlp) 9025 remote_ID = ndlp->nlp_DID; 9026 } 9027 list_add_tail(&piocb->dlist, &abort_list); 9028 } 9029 if (phba->sli_rev == LPFC_SLI_REV4) 9030 spin_unlock(&pring->ring_lock); 9031 spin_unlock_irq(&phba->hbalock); 9032 9033 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9034 cmd = &piocb->iocb; 9035 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9036 "0127 ELS timeout Data: x%x x%x x%x " 9037 "x%x\n", els_command, 9038 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 9039 spin_lock_irq(&phba->hbalock); 9040 list_del_init(&piocb->dlist); 9041 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9042 spin_unlock_irq(&phba->hbalock); 9043 } 9044 9045 /* Make sure HBA is alive */ 9046 lpfc_issue_hb_tmo(phba); 9047 9048 if (!list_empty(&pring->txcmplq)) 9049 if (!(phba->pport->load_flag & FC_UNLOADING)) 9050 mod_timer(&vport->els_tmofunc, 9051 jiffies + msecs_to_jiffies(1000 * timeout)); 9052 } 9053 9054 /** 9055 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9056 * @vport: pointer to a host virtual N_Port data structure. 9057 * 9058 * This routine is used to clean up all the outstanding ELS commands on a 9059 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9060 * routine. After that, it walks the ELS transmit queue to remove all the 9061 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9062 * the IOCBs with a non-NULL completion callback function, the callback 9063 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9064 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9065 * callback function, the IOCB will simply be released. Finally, it walks 9066 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9067 * completion queue IOCB that is associated with the @vport and is not 9068 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9069 * part of the discovery state machine) out to HBA by invoking the 9070 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9071 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9072 * the IOCBs are aborted when this function returns. 9073 **/ 9074 void 9075 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9076 { 9077 LIST_HEAD(abort_list); 9078 struct lpfc_hba *phba = vport->phba; 9079 struct lpfc_sli_ring *pring; 9080 struct lpfc_iocbq *tmp_iocb, *piocb; 9081 IOCB_t *cmd = NULL; 9082 unsigned long iflags = 0; 9083 9084 lpfc_fabric_abort_vport(vport); 9085 9086 /* 9087 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9088 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9089 * ultimately grabs the ring_lock, the driver must splice the list into 9090 * a working list and release the locks before calling the abort. 9091 */ 9092 spin_lock_irqsave(&phba->hbalock, iflags); 9093 pring = lpfc_phba_elsring(phba); 9094 9095 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9096 if (unlikely(!pring)) { 9097 spin_unlock_irqrestore(&phba->hbalock, iflags); 9098 return; 9099 } 9100 9101 if (phba->sli_rev == LPFC_SLI_REV4) 9102 spin_lock(&pring->ring_lock); 9103 9104 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9105 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9106 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 9107 continue; 9108 9109 if (piocb->vport != vport) 9110 continue; 9111 9112 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 9113 continue; 9114 9115 /* On the ELS ring we can have ELS_REQUESTs or 9116 * GEN_REQUESTs waiting for a response. 9117 */ 9118 cmd = &piocb->iocb; 9119 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 9120 list_add_tail(&piocb->dlist, &abort_list); 9121 9122 /* If the link is down when flushing ELS commands 9123 * the firmware will not complete them till after 9124 * the link comes back up. This may confuse 9125 * discovery for the new link up, so we need to 9126 * change the compl routine to just clean up the iocb 9127 * and avoid any retry logic. 9128 */ 9129 if (phba->link_state == LPFC_LINK_DOWN) 9130 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 9131 } 9132 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 9133 list_add_tail(&piocb->dlist, &abort_list); 9134 } 9135 9136 if (phba->sli_rev == LPFC_SLI_REV4) 9137 spin_unlock(&pring->ring_lock); 9138 spin_unlock_irqrestore(&phba->hbalock, iflags); 9139 9140 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9141 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9142 spin_lock_irqsave(&phba->hbalock, iflags); 9143 list_del_init(&piocb->dlist); 9144 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9145 spin_unlock_irqrestore(&phba->hbalock, iflags); 9146 } 9147 /* Make sure HBA is alive */ 9148 lpfc_issue_hb_tmo(phba); 9149 9150 if (!list_empty(&abort_list)) 9151 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9152 "3387 abort list for txq not empty\n"); 9153 INIT_LIST_HEAD(&abort_list); 9154 9155 spin_lock_irqsave(&phba->hbalock, iflags); 9156 if (phba->sli_rev == LPFC_SLI_REV4) 9157 spin_lock(&pring->ring_lock); 9158 9159 /* No need to abort the txq list, 9160 * just queue them up for lpfc_sli_cancel_iocbs 9161 */ 9162 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9163 cmd = &piocb->iocb; 9164 9165 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 9166 continue; 9167 } 9168 9169 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9170 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 9171 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 9172 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 9173 cmd->ulpCommand == CMD_ABORT_XRI_CN) 9174 continue; 9175 9176 if (piocb->vport != vport) 9177 continue; 9178 9179 list_del_init(&piocb->list); 9180 list_add_tail(&piocb->list, &abort_list); 9181 } 9182 9183 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9184 if (vport == phba->pport) { 9185 list_for_each_entry_safe(piocb, tmp_iocb, 9186 &phba->fabric_iocb_list, list) { 9187 cmd = &piocb->iocb; 9188 list_del_init(&piocb->list); 9189 list_add_tail(&piocb->list, &abort_list); 9190 } 9191 } 9192 9193 if (phba->sli_rev == LPFC_SLI_REV4) 9194 spin_unlock(&pring->ring_lock); 9195 spin_unlock_irqrestore(&phba->hbalock, iflags); 9196 9197 /* Cancel all the IOCBs from the completions list */ 9198 lpfc_sli_cancel_iocbs(phba, &abort_list, 9199 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9200 9201 return; 9202 } 9203 9204 /** 9205 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9206 * @phba: pointer to lpfc hba data structure. 9207 * 9208 * This routine is used to clean up all the outstanding ELS commands on a 9209 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9210 * routine. After that, it walks the ELS transmit queue to remove all the 9211 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9212 * the IOCBs with the completion callback function associated, the callback 9213 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9214 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9215 * callback function associated, the IOCB will simply be released. Finally, 9216 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9217 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9218 * management plane IOCBs that are not part of the discovery state machine) 9219 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9220 **/ 9221 void 9222 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9223 { 9224 struct lpfc_vport *vport; 9225 9226 spin_lock_irq(&phba->port_list_lock); 9227 list_for_each_entry(vport, &phba->port_list, listentry) 9228 lpfc_els_flush_cmd(vport); 9229 spin_unlock_irq(&phba->port_list_lock); 9230 9231 return; 9232 } 9233 9234 /** 9235 * lpfc_send_els_failure_event - Posts an ELS command failure event 9236 * @phba: Pointer to hba context object. 9237 * @cmdiocbp: Pointer to command iocb which reported error. 9238 * @rspiocbp: Pointer to response iocb which reported error. 9239 * 9240 * This function sends an event when there is an ELS command 9241 * failure. 9242 **/ 9243 void 9244 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9245 struct lpfc_iocbq *cmdiocbp, 9246 struct lpfc_iocbq *rspiocbp) 9247 { 9248 struct lpfc_vport *vport = cmdiocbp->vport; 9249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9250 struct lpfc_lsrjt_event lsrjt_event; 9251 struct lpfc_fabric_event_header fabric_event; 9252 struct ls_rjt stat; 9253 struct lpfc_nodelist *ndlp; 9254 uint32_t *pcmd; 9255 9256 ndlp = cmdiocbp->context1; 9257 if (!ndlp) 9258 return; 9259 9260 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 9261 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9262 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9263 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9264 sizeof(struct lpfc_name)); 9265 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9266 sizeof(struct lpfc_name)); 9267 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9268 cmdiocbp->context2)->virt); 9269 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9270 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 9271 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9272 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9273 fc_host_post_vendor_event(shost, 9274 fc_get_event_number(), 9275 sizeof(lsrjt_event), 9276 (char *)&lsrjt_event, 9277 LPFC_NL_VENDOR_ID); 9278 return; 9279 } 9280 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 9281 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 9282 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9283 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 9284 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9285 else 9286 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9287 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9288 sizeof(struct lpfc_name)); 9289 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9290 sizeof(struct lpfc_name)); 9291 fc_host_post_vendor_event(shost, 9292 fc_get_event_number(), 9293 sizeof(fabric_event), 9294 (char *)&fabric_event, 9295 LPFC_NL_VENDOR_ID); 9296 return; 9297 } 9298 9299 } 9300 9301 /** 9302 * lpfc_send_els_event - Posts unsolicited els event 9303 * @vport: Pointer to vport object. 9304 * @ndlp: Pointer FC node object. 9305 * @payload: ELS command code type. 9306 * 9307 * This function posts an event when there is an incoming 9308 * unsolicited ELS command. 9309 **/ 9310 static void 9311 lpfc_send_els_event(struct lpfc_vport *vport, 9312 struct lpfc_nodelist *ndlp, 9313 uint32_t *payload) 9314 { 9315 struct lpfc_els_event_header *els_data = NULL; 9316 struct lpfc_logo_event *logo_data = NULL; 9317 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9318 9319 if (*payload == ELS_CMD_LOGO) { 9320 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9321 if (!logo_data) { 9322 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9323 "0148 Failed to allocate memory " 9324 "for LOGO event\n"); 9325 return; 9326 } 9327 els_data = &logo_data->header; 9328 } else { 9329 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9330 GFP_KERNEL); 9331 if (!els_data) { 9332 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9333 "0149 Failed to allocate memory " 9334 "for ELS event\n"); 9335 return; 9336 } 9337 } 9338 els_data->event_type = FC_REG_ELS_EVENT; 9339 switch (*payload) { 9340 case ELS_CMD_PLOGI: 9341 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9342 break; 9343 case ELS_CMD_PRLO: 9344 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9345 break; 9346 case ELS_CMD_ADISC: 9347 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9348 break; 9349 case ELS_CMD_LOGO: 9350 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9351 /* Copy the WWPN in the LOGO payload */ 9352 memcpy(logo_data->logo_wwpn, &payload[2], 9353 sizeof(struct lpfc_name)); 9354 break; 9355 default: 9356 kfree(els_data); 9357 return; 9358 } 9359 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9360 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9361 if (*payload == ELS_CMD_LOGO) { 9362 fc_host_post_vendor_event(shost, 9363 fc_get_event_number(), 9364 sizeof(struct lpfc_logo_event), 9365 (char *)logo_data, 9366 LPFC_NL_VENDOR_ID); 9367 kfree(logo_data); 9368 } else { 9369 fc_host_post_vendor_event(shost, 9370 fc_get_event_number(), 9371 sizeof(struct lpfc_els_event_header), 9372 (char *)els_data, 9373 LPFC_NL_VENDOR_ID); 9374 kfree(els_data); 9375 } 9376 9377 return; 9378 } 9379 9380 9381 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9382 FC_FPIN_LI_EVT_TYPES_INIT); 9383 9384 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9385 FC_FPIN_DELI_EVT_TYPES_INIT); 9386 9387 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9388 FC_FPIN_CONGN_EVT_TYPES_INIT); 9389 9390 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9391 fc_fpin_congn_severity_types, 9392 FC_FPIN_CONGN_SEVERITY_INIT); 9393 9394 9395 /** 9396 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9397 * @phba: Pointer to phba object. 9398 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9399 * @cnt: count of WWPNs in FPIN payload 9400 * 9401 * This routine is called by LI and PC descriptors. 9402 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9403 */ 9404 static void 9405 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9406 { 9407 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9408 __be64 wwn; 9409 u64 wwpn; 9410 int i, len; 9411 int line = 0; 9412 int wcnt = 0; 9413 bool endit = false; 9414 9415 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9416 for (i = 0; i < cnt; i++) { 9417 /* Are we on the last WWPN */ 9418 if (i == (cnt - 1)) 9419 endit = true; 9420 9421 /* Extract the next WWPN from the payload */ 9422 wwn = *wwnlist++; 9423 wwpn = be64_to_cpu(wwn); 9424 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9425 " %016llx", wwpn); 9426 9427 /* Log a message if we are on the last WWPN 9428 * or if we hit the max allowed per message. 9429 */ 9430 wcnt++; 9431 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9432 buf[len] = 0; 9433 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9434 "4686 %s\n", buf); 9435 9436 /* Check if we reached the last WWPN */ 9437 if (endit) 9438 return; 9439 9440 /* Limit the number of log message displayed per FPIN */ 9441 line++; 9442 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9443 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9444 "4687 %d WWPNs Truncated\n", 9445 cnt - i - 1); 9446 return; 9447 } 9448 9449 /* Start over with next log message */ 9450 wcnt = 0; 9451 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9452 "Additional WWPNs:"); 9453 } 9454 } 9455 } 9456 9457 /** 9458 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9459 * @phba: Pointer to phba object. 9460 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9461 * 9462 * This function processes a Link Integrity FPIN event by logging a message. 9463 **/ 9464 static void 9465 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9466 { 9467 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9468 const char *li_evt_str; 9469 u32 li_evt, cnt; 9470 9471 li_evt = be16_to_cpu(li->event_type); 9472 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9473 cnt = be32_to_cpu(li->pname_count); 9474 9475 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9476 "4680 FPIN Link Integrity %s (x%x) " 9477 "Detecting PN x%016llx Attached PN x%016llx " 9478 "Duration %d mSecs Count %d Port Cnt %d\n", 9479 li_evt_str, li_evt, 9480 be64_to_cpu(li->detecting_wwpn), 9481 be64_to_cpu(li->attached_wwpn), 9482 be32_to_cpu(li->event_threshold), 9483 be32_to_cpu(li->event_count), cnt); 9484 9485 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9486 } 9487 9488 /** 9489 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9490 * @phba: Pointer to hba object. 9491 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9492 * 9493 * This function processes a Delivery FPIN event by logging a message. 9494 **/ 9495 static void 9496 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9497 { 9498 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9499 const char *del_rsn_str; 9500 u32 del_rsn; 9501 __be32 *frame; 9502 9503 del_rsn = be16_to_cpu(del->deli_reason_code); 9504 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9505 9506 /* Skip over desc_tag/desc_len header to payload */ 9507 frame = (__be32 *)(del + 1); 9508 9509 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9510 "4681 FPIN Delivery %s (x%x) " 9511 "Detecting PN x%016llx Attached PN x%016llx " 9512 "DiscHdr0 x%08x " 9513 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9514 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9515 del_rsn_str, del_rsn, 9516 be64_to_cpu(del->detecting_wwpn), 9517 be64_to_cpu(del->attached_wwpn), 9518 be32_to_cpu(frame[0]), 9519 be32_to_cpu(frame[1]), 9520 be32_to_cpu(frame[2]), 9521 be32_to_cpu(frame[3]), 9522 be32_to_cpu(frame[4]), 9523 be32_to_cpu(frame[5])); 9524 } 9525 9526 /** 9527 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9528 * @phba: Pointer to hba object. 9529 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9530 * 9531 * This function processes a Peer Congestion FPIN event by logging a message. 9532 **/ 9533 static void 9534 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9535 { 9536 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9537 const char *pc_evt_str; 9538 u32 pc_evt, cnt; 9539 9540 pc_evt = be16_to_cpu(pc->event_type); 9541 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9542 cnt = be32_to_cpu(pc->pname_count); 9543 9544 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9545 "4684 FPIN Peer Congestion %s (x%x) " 9546 "Duration %d mSecs " 9547 "Detecting PN x%016llx Attached PN x%016llx " 9548 "Impacted Port Cnt %d\n", 9549 pc_evt_str, pc_evt, 9550 be32_to_cpu(pc->event_period), 9551 be64_to_cpu(pc->detecting_wwpn), 9552 be64_to_cpu(pc->attached_wwpn), 9553 cnt); 9554 9555 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9556 } 9557 9558 /** 9559 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9560 * @phba: Pointer to hba object. 9561 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9562 * 9563 * This function processes an FPIN Congestion Notifiction. The notification 9564 * could be an Alarm or Warning. This routine feeds that data into driver's 9565 * running congestion algorithm. It also processes the FPIN by 9566 * logging a message. It returns 1 to indicate deliver this message 9567 * to the upper layer or 0 to indicate don't deliver it. 9568 **/ 9569 static int 9570 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9571 { 9572 struct lpfc_cgn_info *cp; 9573 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9574 const char *cgn_evt_str; 9575 u32 cgn_evt; 9576 const char *cgn_sev_str; 9577 u32 cgn_sev; 9578 uint16_t value; 9579 u32 crc; 9580 bool nm_log = false; 9581 int rc = 1; 9582 9583 cgn_evt = be16_to_cpu(cgn->event_type); 9584 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9585 cgn_sev = cgn->severity; 9586 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9587 9588 /* The driver only takes action on a Credit Stall or Oversubscription 9589 * event type to engage the IO algorithm. The driver prints an 9590 * unmaskable message only for Lost Credit and Credit Stall. 9591 * TODO: Still need to have definition of host action on clear, 9592 * lost credit and device specific event types. 9593 */ 9594 switch (cgn_evt) { 9595 case FPIN_CONGN_LOST_CREDIT: 9596 nm_log = true; 9597 break; 9598 case FPIN_CONGN_CREDIT_STALL: 9599 nm_log = true; 9600 fallthrough; 9601 case FPIN_CONGN_OVERSUBSCRIPTION: 9602 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9603 nm_log = false; 9604 switch (cgn_sev) { 9605 case FPIN_CONGN_SEVERITY_ERROR: 9606 /* Take action here for an Alarm event */ 9607 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9608 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9609 /* Track of alarm cnt for cgn_info */ 9610 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9611 /* Track of alarm cnt for SYNC_WQE */ 9612 atomic_inc(&phba->cgn_sync_alarm_cnt); 9613 } 9614 goto cleanup; 9615 } 9616 break; 9617 case FPIN_CONGN_SEVERITY_WARNING: 9618 /* Take action here for a Warning event */ 9619 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9620 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9621 /* Track of warning cnt for cgn_info */ 9622 atomic_inc(&phba->cgn_fabric_warn_cnt); 9623 /* Track of warning cnt for SYNC_WQE */ 9624 atomic_inc(&phba->cgn_sync_warn_cnt); 9625 } 9626 cleanup: 9627 /* Save frequency in ms */ 9628 phba->cgn_fpin_frequency = 9629 be32_to_cpu(cgn->event_period); 9630 value = phba->cgn_fpin_frequency; 9631 if (phba->cgn_i) { 9632 cp = (struct lpfc_cgn_info *) 9633 phba->cgn_i->virt; 9634 if (phba->cgn_reg_fpin & 9635 LPFC_CGN_FPIN_ALARM) 9636 cp->cgn_alarm_freq = 9637 cpu_to_le16(value); 9638 if (phba->cgn_reg_fpin & 9639 LPFC_CGN_FPIN_WARN) 9640 cp->cgn_warn_freq = 9641 cpu_to_le16(value); 9642 crc = lpfc_cgn_calc_crc32 9643 (cp, 9644 LPFC_CGN_INFO_SZ, 9645 LPFC_CGN_CRC32_SEED); 9646 cp->cgn_info_crc = cpu_to_le32(crc); 9647 } 9648 9649 /* Don't deliver to upper layer since 9650 * driver took action on this tlv. 9651 */ 9652 rc = 0; 9653 } 9654 break; 9655 } 9656 break; 9657 } 9658 9659 /* Change the log level to unmaskable for the following event types. */ 9660 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9661 LOG_CGN_MGMT | LOG_ELS, 9662 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9663 "Duration %d mSecs\n", 9664 cgn_sev_str, cgn_evt_str, cgn_evt, 9665 be32_to_cpu(cgn->event_period)); 9666 return rc; 9667 } 9668 9669 void 9670 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 9671 { 9672 struct lpfc_hba *phba = vport->phba; 9673 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 9674 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 9675 const char *dtag_nm; 9676 int desc_cnt = 0, bytes_remain, cnt; 9677 u32 dtag, deliver = 0; 9678 int len; 9679 9680 /* FPINs handled only if we are in the right discovery state */ 9681 if (vport->port_state < LPFC_DISC_AUTH) 9682 return; 9683 9684 /* make sure there is the full fpin header */ 9685 if (fpin_length < sizeof(struct fc_els_fpin)) 9686 return; 9687 9688 /* Sanity check descriptor length. The desc_len value does not 9689 * include space for the ELS command and the desc_len fields. 9690 */ 9691 len = be32_to_cpu(fpin->desc_len); 9692 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 9693 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9694 "4671 Bad ELS FPIN length %d: %d\n", 9695 len, fpin_length); 9696 return; 9697 } 9698 9699 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 9700 first_tlv = tlv; 9701 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 9702 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 9703 9704 /* process each descriptor separately */ 9705 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 9706 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 9707 dtag = be32_to_cpu(tlv->desc_tag); 9708 switch (dtag) { 9709 case ELS_DTAG_LNK_INTEGRITY: 9710 lpfc_els_rcv_fpin_li(phba, tlv); 9711 deliver = 1; 9712 break; 9713 case ELS_DTAG_DELIVERY: 9714 lpfc_els_rcv_fpin_del(phba, tlv); 9715 deliver = 1; 9716 break; 9717 case ELS_DTAG_PEER_CONGEST: 9718 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 9719 deliver = 1; 9720 break; 9721 case ELS_DTAG_CONGESTION: 9722 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 9723 break; 9724 default: 9725 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9726 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9727 "4678 unknown FPIN descriptor[%d]: " 9728 "tag x%x (%s)\n", 9729 desc_cnt, dtag, dtag_nm); 9730 9731 /* If descriptor is bad, drop the rest of the data */ 9732 return; 9733 } 9734 lpfc_cgn_update_stat(phba, dtag); 9735 cnt = be32_to_cpu(tlv->desc_len); 9736 9737 /* Sanity check descriptor length. The desc_len value does not 9738 * include space for the desc_tag and the desc_len fields. 9739 */ 9740 len -= (cnt + sizeof(struct fc_tlv_desc)); 9741 if (len < 0) { 9742 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9743 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9744 "4672 Bad FPIN descriptor TLV length " 9745 "%d: %d %d %s\n", 9746 cnt, len, fpin_length, dtag_nm); 9747 return; 9748 } 9749 9750 current_tlv = tlv; 9751 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9752 tlv = fc_tlv_next_desc(tlv); 9753 9754 /* Format payload such that the FPIN delivered to the 9755 * upper layer is a single descriptor FPIN. 9756 */ 9757 if (desc_cnt) 9758 memcpy(first_tlv, current_tlv, 9759 (cnt + sizeof(struct fc_els_fpin))); 9760 9761 /* Adjust the length so that it only reflects a 9762 * single descriptor FPIN. 9763 */ 9764 fpin_length = cnt + sizeof(struct fc_els_fpin); 9765 fpin->desc_len = cpu_to_be32(fpin_length); 9766 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 9767 9768 /* Send every descriptor individually to the upper layer */ 9769 if (deliver) 9770 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 9771 fpin_length, (char *)fpin); 9772 desc_cnt++; 9773 } 9774 } 9775 9776 /** 9777 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 9778 * @phba: pointer to lpfc hba data structure. 9779 * @pring: pointer to a SLI ring. 9780 * @vport: pointer to a host virtual N_Port data structure. 9781 * @elsiocb: pointer to lpfc els command iocb data structure. 9782 * 9783 * This routine is used for processing the IOCB associated with a unsolicited 9784 * event. It first determines whether there is an existing ndlp that matches 9785 * the DID from the unsolicited IOCB. If not, it will create a new one with 9786 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 9787 * IOCB is then used to invoke the proper routine and to set up proper state 9788 * of the discovery state machine. 9789 **/ 9790 static void 9791 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9792 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 9793 { 9794 struct lpfc_nodelist *ndlp; 9795 struct ls_rjt stat; 9796 uint32_t *payload, payload_len; 9797 uint32_t cmd, did, newnode; 9798 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 9799 IOCB_t *icmd = &elsiocb->iocb; 9800 LPFC_MBOXQ_t *mbox; 9801 9802 if (!vport || !(elsiocb->context2)) 9803 goto dropit; 9804 9805 newnode = 0; 9806 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 9807 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 9808 cmd = *payload; 9809 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 9810 lpfc_post_buffer(phba, pring, 1); 9811 9812 did = icmd->un.rcvels.remoteID; 9813 if (icmd->ulpStatus) { 9814 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9815 "RCV Unsol ELS: status:x%x/x%x did:x%x", 9816 icmd->ulpStatus, icmd->un.ulpWord[4], did); 9817 goto dropit; 9818 } 9819 9820 /* Check to see if link went down during discovery */ 9821 if (lpfc_els_chk_latt(vport)) 9822 goto dropit; 9823 9824 /* Ignore traffic received during vport shutdown. */ 9825 if (vport->load_flag & FC_UNLOADING) 9826 goto dropit; 9827 9828 /* If NPort discovery is delayed drop incoming ELS */ 9829 if ((vport->fc_flag & FC_DISC_DELAYED) && 9830 (cmd != ELS_CMD_PLOGI)) 9831 goto dropit; 9832 9833 ndlp = lpfc_findnode_did(vport, did); 9834 if (!ndlp) { 9835 /* Cannot find existing Fabric ndlp, so allocate a new one */ 9836 ndlp = lpfc_nlp_init(vport, did); 9837 if (!ndlp) 9838 goto dropit; 9839 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9840 newnode = 1; 9841 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 9842 ndlp->nlp_type |= NLP_FABRIC; 9843 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 9844 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9845 newnode = 1; 9846 } 9847 9848 phba->fc_stat.elsRcvFrame++; 9849 9850 /* 9851 * Do not process any unsolicited ELS commands 9852 * if the ndlp is in DEV_LOSS 9853 */ 9854 spin_lock_irq(&ndlp->lock); 9855 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 9856 spin_unlock_irq(&ndlp->lock); 9857 if (newnode) 9858 lpfc_nlp_put(ndlp); 9859 goto dropit; 9860 } 9861 spin_unlock_irq(&ndlp->lock); 9862 9863 elsiocb->context1 = lpfc_nlp_get(ndlp); 9864 if (!elsiocb->context1) 9865 goto dropit; 9866 elsiocb->vport = vport; 9867 9868 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 9869 cmd &= ELS_CMD_MASK; 9870 } 9871 /* ELS command <elsCmd> received from NPORT <did> */ 9872 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9873 "0112 ELS command x%x received from NPORT x%x " 9874 "refcnt %d Data: x%x x%x x%x x%x\n", 9875 cmd, did, kref_read(&ndlp->kref), vport->port_state, 9876 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 9877 9878 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 9879 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 9880 (cmd != ELS_CMD_FLOGI) && 9881 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 9882 rjt_err = LSRJT_LOGICAL_BSY; 9883 rjt_exp = LSEXP_NOTHING_MORE; 9884 goto lsrjt; 9885 } 9886 9887 switch (cmd) { 9888 case ELS_CMD_PLOGI: 9889 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9890 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 9891 did, vport->port_state, ndlp->nlp_flag); 9892 9893 phba->fc_stat.elsRcvPLOGI++; 9894 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 9895 if (phba->sli_rev == LPFC_SLI_REV4 && 9896 (phba->pport->fc_flag & FC_PT2PT)) { 9897 vport->fc_prevDID = vport->fc_myDID; 9898 /* Our DID needs to be updated before registering 9899 * the vfi. This is done in lpfc_rcv_plogi but 9900 * that is called after the reg_vfi. 9901 */ 9902 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 9903 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9904 "3312 Remote port assigned DID x%x " 9905 "%x\n", vport->fc_myDID, 9906 vport->fc_prevDID); 9907 } 9908 9909 lpfc_send_els_event(vport, ndlp, payload); 9910 9911 /* If Nport discovery is delayed, reject PLOGIs */ 9912 if (vport->fc_flag & FC_DISC_DELAYED) { 9913 rjt_err = LSRJT_UNABLE_TPC; 9914 rjt_exp = LSEXP_NOTHING_MORE; 9915 break; 9916 } 9917 9918 if (vport->port_state < LPFC_DISC_AUTH) { 9919 if (!(phba->pport->fc_flag & FC_PT2PT) || 9920 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 9921 rjt_err = LSRJT_UNABLE_TPC; 9922 rjt_exp = LSEXP_NOTHING_MORE; 9923 break; 9924 } 9925 } 9926 9927 spin_lock_irq(&ndlp->lock); 9928 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 9929 spin_unlock_irq(&ndlp->lock); 9930 9931 lpfc_disc_state_machine(vport, ndlp, elsiocb, 9932 NLP_EVT_RCV_PLOGI); 9933 9934 break; 9935 case ELS_CMD_FLOGI: 9936 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9937 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 9938 did, vport->port_state, ndlp->nlp_flag); 9939 9940 phba->fc_stat.elsRcvFLOGI++; 9941 9942 /* If the driver believes fabric discovery is done and is ready, 9943 * bounce the link. There is some descrepancy. 9944 */ 9945 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 9946 vport->fc_flag & FC_PT2PT && 9947 vport->rcv_flogi_cnt >= 1) { 9948 rjt_err = LSRJT_LOGICAL_BSY; 9949 rjt_exp = LSEXP_NOTHING_MORE; 9950 init_link++; 9951 goto lsrjt; 9952 } 9953 9954 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 9955 if (newnode) 9956 lpfc_disc_state_machine(vport, ndlp, NULL, 9957 NLP_EVT_DEVICE_RM); 9958 break; 9959 case ELS_CMD_LOGO: 9960 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9961 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 9962 did, vport->port_state, ndlp->nlp_flag); 9963 9964 phba->fc_stat.elsRcvLOGO++; 9965 lpfc_send_els_event(vport, ndlp, payload); 9966 if (vport->port_state < LPFC_DISC_AUTH) { 9967 rjt_err = LSRJT_UNABLE_TPC; 9968 rjt_exp = LSEXP_NOTHING_MORE; 9969 break; 9970 } 9971 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 9972 if (newnode) 9973 lpfc_disc_state_machine(vport, ndlp, NULL, 9974 NLP_EVT_DEVICE_RM); 9975 break; 9976 case ELS_CMD_PRLO: 9977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9978 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 9979 did, vport->port_state, ndlp->nlp_flag); 9980 9981 phba->fc_stat.elsRcvPRLO++; 9982 lpfc_send_els_event(vport, ndlp, payload); 9983 if (vport->port_state < LPFC_DISC_AUTH) { 9984 rjt_err = LSRJT_UNABLE_TPC; 9985 rjt_exp = LSEXP_NOTHING_MORE; 9986 break; 9987 } 9988 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 9989 break; 9990 case ELS_CMD_LCB: 9991 phba->fc_stat.elsRcvLCB++; 9992 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 9993 break; 9994 case ELS_CMD_RDP: 9995 phba->fc_stat.elsRcvRDP++; 9996 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 9997 break; 9998 case ELS_CMD_RSCN: 9999 phba->fc_stat.elsRcvRSCN++; 10000 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10001 if (newnode) 10002 lpfc_disc_state_machine(vport, ndlp, NULL, 10003 NLP_EVT_DEVICE_RM); 10004 break; 10005 case ELS_CMD_ADISC: 10006 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10007 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10008 did, vport->port_state, ndlp->nlp_flag); 10009 10010 lpfc_send_els_event(vport, ndlp, payload); 10011 phba->fc_stat.elsRcvADISC++; 10012 if (vport->port_state < LPFC_DISC_AUTH) { 10013 rjt_err = LSRJT_UNABLE_TPC; 10014 rjt_exp = LSEXP_NOTHING_MORE; 10015 break; 10016 } 10017 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10018 NLP_EVT_RCV_ADISC); 10019 break; 10020 case ELS_CMD_PDISC: 10021 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10022 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10023 did, vport->port_state, ndlp->nlp_flag); 10024 10025 phba->fc_stat.elsRcvPDISC++; 10026 if (vport->port_state < LPFC_DISC_AUTH) { 10027 rjt_err = LSRJT_UNABLE_TPC; 10028 rjt_exp = LSEXP_NOTHING_MORE; 10029 break; 10030 } 10031 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10032 NLP_EVT_RCV_PDISC); 10033 break; 10034 case ELS_CMD_FARPR: 10035 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10036 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10037 did, vport->port_state, ndlp->nlp_flag); 10038 10039 phba->fc_stat.elsRcvFARPR++; 10040 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10041 break; 10042 case ELS_CMD_FARP: 10043 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10044 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10045 did, vport->port_state, ndlp->nlp_flag); 10046 10047 phba->fc_stat.elsRcvFARP++; 10048 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10049 break; 10050 case ELS_CMD_FAN: 10051 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10052 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10053 did, vport->port_state, ndlp->nlp_flag); 10054 10055 phba->fc_stat.elsRcvFAN++; 10056 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10057 break; 10058 case ELS_CMD_PRLI: 10059 case ELS_CMD_NVMEPRLI: 10060 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10061 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10062 did, vport->port_state, ndlp->nlp_flag); 10063 10064 phba->fc_stat.elsRcvPRLI++; 10065 if ((vport->port_state < LPFC_DISC_AUTH) && 10066 (vport->fc_flag & FC_FABRIC)) { 10067 rjt_err = LSRJT_UNABLE_TPC; 10068 rjt_exp = LSEXP_NOTHING_MORE; 10069 break; 10070 } 10071 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10072 break; 10073 case ELS_CMD_LIRR: 10074 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10075 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10076 did, vport->port_state, ndlp->nlp_flag); 10077 10078 phba->fc_stat.elsRcvLIRR++; 10079 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10080 if (newnode) 10081 lpfc_disc_state_machine(vport, ndlp, NULL, 10082 NLP_EVT_DEVICE_RM); 10083 break; 10084 case ELS_CMD_RLS: 10085 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10086 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10087 did, vport->port_state, ndlp->nlp_flag); 10088 10089 phba->fc_stat.elsRcvRLS++; 10090 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10091 if (newnode) 10092 lpfc_disc_state_machine(vport, ndlp, NULL, 10093 NLP_EVT_DEVICE_RM); 10094 break; 10095 case ELS_CMD_RPL: 10096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10097 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10098 did, vport->port_state, ndlp->nlp_flag); 10099 10100 phba->fc_stat.elsRcvRPL++; 10101 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10102 if (newnode) 10103 lpfc_disc_state_machine(vport, ndlp, NULL, 10104 NLP_EVT_DEVICE_RM); 10105 break; 10106 case ELS_CMD_RNID: 10107 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10108 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10109 did, vport->port_state, ndlp->nlp_flag); 10110 10111 phba->fc_stat.elsRcvRNID++; 10112 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10113 if (newnode) 10114 lpfc_disc_state_machine(vport, ndlp, NULL, 10115 NLP_EVT_DEVICE_RM); 10116 break; 10117 case ELS_CMD_RTV: 10118 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10119 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10120 did, vport->port_state, ndlp->nlp_flag); 10121 phba->fc_stat.elsRcvRTV++; 10122 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10123 if (newnode) 10124 lpfc_disc_state_machine(vport, ndlp, NULL, 10125 NLP_EVT_DEVICE_RM); 10126 break; 10127 case ELS_CMD_RRQ: 10128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10129 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10130 did, vport->port_state, ndlp->nlp_flag); 10131 10132 phba->fc_stat.elsRcvRRQ++; 10133 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10134 if (newnode) 10135 lpfc_disc_state_machine(vport, ndlp, NULL, 10136 NLP_EVT_DEVICE_RM); 10137 break; 10138 case ELS_CMD_ECHO: 10139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10140 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10141 did, vport->port_state, ndlp->nlp_flag); 10142 10143 phba->fc_stat.elsRcvECHO++; 10144 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10145 if (newnode) 10146 lpfc_disc_state_machine(vport, ndlp, NULL, 10147 NLP_EVT_DEVICE_RM); 10148 break; 10149 case ELS_CMD_REC: 10150 /* receive this due to exchange closed */ 10151 rjt_err = LSRJT_UNABLE_TPC; 10152 rjt_exp = LSEXP_INVALID_OX_RX; 10153 break; 10154 case ELS_CMD_FPIN: 10155 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10156 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10157 did, vport->port_state, ndlp->nlp_flag); 10158 10159 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10160 payload_len); 10161 10162 /* There are no replies, so no rjt codes */ 10163 break; 10164 case ELS_CMD_EDC: 10165 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10166 break; 10167 case ELS_CMD_RDF: 10168 phba->fc_stat.elsRcvRDF++; 10169 /* Accept RDF only from fabric controller */ 10170 if (did != Fabric_Cntl_DID) { 10171 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10172 "1115 Received RDF from invalid DID " 10173 "x%x\n", did); 10174 rjt_err = LSRJT_PROTOCOL_ERR; 10175 rjt_exp = LSEXP_NOTHING_MORE; 10176 goto lsrjt; 10177 } 10178 10179 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10180 break; 10181 default: 10182 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10183 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10184 cmd, did, vport->port_state); 10185 10186 /* Unsupported ELS command, reject */ 10187 rjt_err = LSRJT_CMD_UNSUPPORTED; 10188 rjt_exp = LSEXP_NOTHING_MORE; 10189 10190 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10191 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10192 "0115 Unknown ELS command x%x " 10193 "received from NPORT x%x\n", cmd, did); 10194 if (newnode) 10195 lpfc_disc_state_machine(vport, ndlp, NULL, 10196 NLP_EVT_DEVICE_RM); 10197 break; 10198 } 10199 10200 lsrjt: 10201 /* check if need to LS_RJT received ELS cmd */ 10202 if (rjt_err) { 10203 memset(&stat, 0, sizeof(stat)); 10204 stat.un.b.lsRjtRsnCode = rjt_err; 10205 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10206 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10207 NULL); 10208 /* Remove the reference from above for new nodes. */ 10209 if (newnode) 10210 lpfc_disc_state_machine(vport, ndlp, NULL, 10211 NLP_EVT_DEVICE_RM); 10212 } 10213 10214 /* Release the reference on this elsiocb, not the ndlp. */ 10215 lpfc_nlp_put(elsiocb->context1); 10216 elsiocb->context1 = NULL; 10217 10218 /* Special case. Driver received an unsolicited command that 10219 * unsupportable given the driver's current state. Reset the 10220 * link and start over. 10221 */ 10222 if (init_link) { 10223 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10224 if (!mbox) 10225 return; 10226 lpfc_linkdown(phba); 10227 lpfc_init_link(phba, mbox, 10228 phba->cfg_topology, 10229 phba->cfg_link_speed); 10230 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10231 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10232 mbox->vport = vport; 10233 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10234 MBX_NOT_FINISHED) 10235 mempool_free(mbox, phba->mbox_mem_pool); 10236 } 10237 10238 return; 10239 10240 dropit: 10241 if (vport && !(vport->load_flag & FC_UNLOADING)) 10242 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10243 "0111 Dropping received ELS cmd " 10244 "Data: x%x x%x x%x\n", 10245 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 10246 phba->fc_stat.elsRcvDrop++; 10247 } 10248 10249 /** 10250 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10251 * @phba: pointer to lpfc hba data structure. 10252 * @pring: pointer to a SLI ring. 10253 * @elsiocb: pointer to lpfc els iocb data structure. 10254 * 10255 * This routine is used to process an unsolicited event received from a SLI 10256 * (Service Level Interface) ring. The actual processing of the data buffer 10257 * associated with the unsolicited event is done by invoking the routine 10258 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10259 * SLI ring on which the unsolicited event was received. 10260 **/ 10261 void 10262 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10263 struct lpfc_iocbq *elsiocb) 10264 { 10265 struct lpfc_vport *vport = phba->pport; 10266 IOCB_t *icmd = &elsiocb->iocb; 10267 dma_addr_t paddr; 10268 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 10269 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 10270 10271 elsiocb->context1 = NULL; 10272 elsiocb->context2 = NULL; 10273 elsiocb->context3 = NULL; 10274 10275 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 10276 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10277 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 10278 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 10279 IOERR_RCV_BUFFER_WAITING) { 10280 phba->fc_stat.NoRcvBuf++; 10281 /* Not enough posted buffers; Try posting more buffers */ 10282 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10283 lpfc_post_buffer(phba, pring, 0); 10284 return; 10285 } 10286 10287 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10288 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 10289 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 10290 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10291 vport = phba->pport; 10292 else 10293 vport = lpfc_find_vport_by_vpid(phba, 10294 icmd->unsli3.rcvsli3.vpi); 10295 } 10296 10297 /* If there are no BDEs associated 10298 * with this IOCB, there is nothing to do. 10299 */ 10300 if (icmd->ulpBdeCount == 0) 10301 return; 10302 10303 /* type of ELS cmd is first 32bit word 10304 * in packet 10305 */ 10306 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10307 elsiocb->context2 = bdeBuf1; 10308 } else { 10309 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10310 icmd->un.cont64[0].addrLow); 10311 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 10312 paddr); 10313 } 10314 10315 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10316 /* 10317 * The different unsolicited event handlers would tell us 10318 * if they are done with "mp" by setting context2 to NULL. 10319 */ 10320 if (elsiocb->context2) { 10321 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 10322 elsiocb->context2 = NULL; 10323 } 10324 10325 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 10326 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 10327 icmd->ulpBdeCount == 2) { 10328 elsiocb->context2 = bdeBuf2; 10329 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10330 /* free mp if we are done with it */ 10331 if (elsiocb->context2) { 10332 lpfc_in_buf_free(phba, elsiocb->context2); 10333 elsiocb->context2 = NULL; 10334 } 10335 } 10336 } 10337 10338 static void 10339 lpfc_start_fdmi(struct lpfc_vport *vport) 10340 { 10341 struct lpfc_nodelist *ndlp; 10342 10343 /* If this is the first time, allocate an ndlp and initialize 10344 * it. Otherwise, make sure the node is enabled and then do the 10345 * login. 10346 */ 10347 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10348 if (!ndlp) { 10349 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10350 if (ndlp) { 10351 ndlp->nlp_type |= NLP_FABRIC; 10352 } else { 10353 return; 10354 } 10355 } 10356 10357 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10358 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10359 } 10360 10361 /** 10362 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10363 * @phba: pointer to lpfc hba data structure. 10364 * @vport: pointer to a virtual N_Port data structure. 10365 * 10366 * This routine issues a Port Login (PLOGI) to the Name Server with 10367 * State Change Request (SCR) for a @vport. This routine will create an 10368 * ndlp for the Name Server associated to the @vport if such node does 10369 * not already exist. The PLOGI to Name Server is issued by invoking the 10370 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10371 * (FDMI) is configured to the @vport, a FDMI node will be created and 10372 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10373 **/ 10374 void 10375 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10376 { 10377 struct lpfc_nodelist *ndlp; 10378 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10379 10380 /* 10381 * If lpfc_delay_discovery parameter is set and the clean address 10382 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10383 * discovery. 10384 */ 10385 spin_lock_irq(shost->host_lock); 10386 if (vport->fc_flag & FC_DISC_DELAYED) { 10387 spin_unlock_irq(shost->host_lock); 10388 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10389 "3334 Delay fc port discovery for %d secs\n", 10390 phba->fc_ratov); 10391 mod_timer(&vport->delayed_disc_tmo, 10392 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10393 return; 10394 } 10395 spin_unlock_irq(shost->host_lock); 10396 10397 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10398 if (!ndlp) { 10399 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10400 if (!ndlp) { 10401 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10402 lpfc_disc_start(vport); 10403 return; 10404 } 10405 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10406 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10407 "0251 NameServer login: no memory\n"); 10408 return; 10409 } 10410 } 10411 10412 ndlp->nlp_type |= NLP_FABRIC; 10413 10414 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10415 10416 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10417 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10418 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10419 "0252 Cannot issue NameServer login\n"); 10420 return; 10421 } 10422 10423 if ((phba->cfg_enable_SmartSAN || 10424 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10425 (vport->load_flag & FC_ALLOW_FDMI)) 10426 lpfc_start_fdmi(vport); 10427 } 10428 10429 /** 10430 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10431 * @phba: pointer to lpfc hba data structure. 10432 * @pmb: pointer to the driver internal queue element for mailbox command. 10433 * 10434 * This routine is the completion callback function to register new vport 10435 * mailbox command. If the new vport mailbox command completes successfully, 10436 * the fabric registration login shall be performed on physical port (the 10437 * new vport created is actually a physical port, with VPI 0) or the port 10438 * login to Name Server for State Change Request (SCR) will be performed 10439 * on virtual port (real virtual port, with VPI greater than 0). 10440 **/ 10441 static void 10442 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10443 { 10444 struct lpfc_vport *vport = pmb->vport; 10445 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10446 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 10447 MAILBOX_t *mb = &pmb->u.mb; 10448 int rc; 10449 10450 spin_lock_irq(shost->host_lock); 10451 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10452 spin_unlock_irq(shost->host_lock); 10453 10454 if (mb->mbxStatus) { 10455 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10456 "0915 Register VPI failed : Status: x%x" 10457 " upd bit: x%x \n", mb->mbxStatus, 10458 mb->un.varRegVpi.upd); 10459 if (phba->sli_rev == LPFC_SLI_REV4 && 10460 mb->un.varRegVpi.upd) 10461 goto mbox_err_exit ; 10462 10463 switch (mb->mbxStatus) { 10464 case 0x11: /* unsupported feature */ 10465 case 0x9603: /* max_vpi exceeded */ 10466 case 0x9602: /* Link event since CLEAR_LA */ 10467 /* giving up on vport registration */ 10468 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10469 spin_lock_irq(shost->host_lock); 10470 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10471 spin_unlock_irq(shost->host_lock); 10472 lpfc_can_disctmo(vport); 10473 break; 10474 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10475 case 0x20: 10476 spin_lock_irq(shost->host_lock); 10477 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10478 spin_unlock_irq(shost->host_lock); 10479 lpfc_init_vpi(phba, pmb, vport->vpi); 10480 pmb->vport = vport; 10481 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10482 rc = lpfc_sli_issue_mbox(phba, pmb, 10483 MBX_NOWAIT); 10484 if (rc == MBX_NOT_FINISHED) { 10485 lpfc_printf_vlog(vport, KERN_ERR, 10486 LOG_TRACE_EVENT, 10487 "2732 Failed to issue INIT_VPI" 10488 " mailbox command\n"); 10489 } else { 10490 lpfc_nlp_put(ndlp); 10491 return; 10492 } 10493 fallthrough; 10494 default: 10495 /* Try to recover from this error */ 10496 if (phba->sli_rev == LPFC_SLI_REV4) 10497 lpfc_sli4_unreg_all_rpis(vport); 10498 lpfc_mbx_unreg_vpi(vport); 10499 spin_lock_irq(shost->host_lock); 10500 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10501 spin_unlock_irq(shost->host_lock); 10502 if (mb->mbxStatus == MBX_NOT_FINISHED) 10503 break; 10504 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10505 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10506 if (phba->sli_rev == LPFC_SLI_REV4) 10507 lpfc_issue_init_vfi(vport); 10508 else 10509 lpfc_initial_flogi(vport); 10510 } else { 10511 lpfc_initial_fdisc(vport); 10512 } 10513 break; 10514 } 10515 } else { 10516 spin_lock_irq(shost->host_lock); 10517 vport->vpi_state |= LPFC_VPI_REGISTERED; 10518 spin_unlock_irq(shost->host_lock); 10519 if (vport == phba->pport) { 10520 if (phba->sli_rev < LPFC_SLI_REV4) 10521 lpfc_issue_fabric_reglogin(vport); 10522 else { 10523 /* 10524 * If the physical port is instantiated using 10525 * FDISC, do not start vport discovery. 10526 */ 10527 if (vport->port_state != LPFC_FDISC) 10528 lpfc_start_fdiscs(phba); 10529 lpfc_do_scr_ns_plogi(phba, vport); 10530 } 10531 } else { 10532 lpfc_do_scr_ns_plogi(phba, vport); 10533 } 10534 } 10535 mbox_err_exit: 10536 /* Now, we decrement the ndlp reference count held for this 10537 * callback function 10538 */ 10539 lpfc_nlp_put(ndlp); 10540 10541 mempool_free(pmb, phba->mbox_mem_pool); 10542 return; 10543 } 10544 10545 /** 10546 * lpfc_register_new_vport - Register a new vport with a HBA 10547 * @phba: pointer to lpfc hba data structure. 10548 * @vport: pointer to a host virtual N_Port data structure. 10549 * @ndlp: pointer to a node-list data structure. 10550 * 10551 * This routine registers the @vport as a new virtual port with a HBA. 10552 * It is done through a registering vpi mailbox command. 10553 **/ 10554 void 10555 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10556 struct lpfc_nodelist *ndlp) 10557 { 10558 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10559 LPFC_MBOXQ_t *mbox; 10560 10561 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10562 if (mbox) { 10563 lpfc_reg_vpi(vport, mbox); 10564 mbox->vport = vport; 10565 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10566 if (!mbox->ctx_ndlp) { 10567 mempool_free(mbox, phba->mbox_mem_pool); 10568 goto mbox_err_exit; 10569 } 10570 10571 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10572 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10573 == MBX_NOT_FINISHED) { 10574 /* mailbox command not success, decrement ndlp 10575 * reference count for this command 10576 */ 10577 lpfc_nlp_put(ndlp); 10578 mempool_free(mbox, phba->mbox_mem_pool); 10579 10580 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10581 "0253 Register VPI: Can't send mbox\n"); 10582 goto mbox_err_exit; 10583 } 10584 } else { 10585 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10586 "0254 Register VPI: no memory\n"); 10587 goto mbox_err_exit; 10588 } 10589 return; 10590 10591 mbox_err_exit: 10592 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10593 spin_lock_irq(shost->host_lock); 10594 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10595 spin_unlock_irq(shost->host_lock); 10596 return; 10597 } 10598 10599 /** 10600 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10601 * @phba: pointer to lpfc hba data structure. 10602 * 10603 * This routine cancels the retry delay timers to all the vports. 10604 **/ 10605 void 10606 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10607 { 10608 struct lpfc_vport **vports; 10609 struct lpfc_nodelist *ndlp; 10610 uint32_t link_state; 10611 int i; 10612 10613 /* Treat this failure as linkdown for all vports */ 10614 link_state = phba->link_state; 10615 lpfc_linkdown(phba); 10616 phba->link_state = link_state; 10617 10618 vports = lpfc_create_vport_work_array(phba); 10619 10620 if (vports) { 10621 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10622 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10623 if (ndlp) 10624 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10625 lpfc_els_flush_cmd(vports[i]); 10626 } 10627 lpfc_destroy_vport_work_array(phba, vports); 10628 } 10629 } 10630 10631 /** 10632 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10633 * @phba: pointer to lpfc hba data structure. 10634 * 10635 * This routine abort all pending discovery commands and 10636 * start a timer to retry FLOGI for the physical port 10637 * discovery. 10638 **/ 10639 void 10640 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 10641 { 10642 struct lpfc_nodelist *ndlp; 10643 10644 /* Cancel the all vports retry delay retry timers */ 10645 lpfc_cancel_all_vport_retry_delay_timer(phba); 10646 10647 /* If fabric require FLOGI, then re-instantiate physical login */ 10648 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 10649 if (!ndlp) 10650 return; 10651 10652 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 10653 spin_lock_irq(&ndlp->lock); 10654 ndlp->nlp_flag |= NLP_DELAY_TMO; 10655 spin_unlock_irq(&ndlp->lock); 10656 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 10657 phba->pport->port_state = LPFC_FLOGI; 10658 return; 10659 } 10660 10661 /** 10662 * lpfc_fabric_login_reqd - Check if FLOGI required. 10663 * @phba: pointer to lpfc hba data structure. 10664 * @cmdiocb: pointer to FDISC command iocb. 10665 * @rspiocb: pointer to FDISC response iocb. 10666 * 10667 * This routine checks if a FLOGI is reguired for FDISC 10668 * to succeed. 10669 **/ 10670 static int 10671 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 10672 struct lpfc_iocbq *cmdiocb, 10673 struct lpfc_iocbq *rspiocb) 10674 { 10675 10676 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 10677 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 10678 return 0; 10679 else 10680 return 1; 10681 } 10682 10683 /** 10684 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 10685 * @phba: pointer to lpfc hba data structure. 10686 * @cmdiocb: pointer to lpfc command iocb data structure. 10687 * @rspiocb: pointer to lpfc response iocb data structure. 10688 * 10689 * This routine is the completion callback function to a Fabric Discover 10690 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 10691 * single threaded, each FDISC completion callback function will reset 10692 * the discovery timer for all vports such that the timers will not get 10693 * unnecessary timeout. The function checks the FDISC IOCB status. If error 10694 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 10695 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 10696 * assigned to the vport has been changed with the completion of the FDISC 10697 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 10698 * are unregistered from the HBA, and then the lpfc_register_new_vport() 10699 * routine is invoked to register new vport with the HBA. Otherwise, the 10700 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 10701 * Server for State Change Request (SCR). 10702 **/ 10703 static void 10704 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10705 struct lpfc_iocbq *rspiocb) 10706 { 10707 struct lpfc_vport *vport = cmdiocb->vport; 10708 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10709 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 10710 struct lpfc_nodelist *np; 10711 struct lpfc_nodelist *next_np; 10712 IOCB_t *irsp = &rspiocb->iocb; 10713 struct lpfc_iocbq *piocb; 10714 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 10715 struct serv_parm *sp; 10716 uint8_t fabric_param_changed; 10717 10718 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10719 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 10720 irsp->ulpStatus, irsp->un.ulpWord[4], 10721 vport->fc_prevDID); 10722 /* Since all FDISCs are being single threaded, we 10723 * must reset the discovery timer for ALL vports 10724 * waiting to send FDISC when one completes. 10725 */ 10726 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 10727 lpfc_set_disctmo(piocb->vport); 10728 } 10729 10730 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10731 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 10732 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 10733 10734 if (irsp->ulpStatus) { 10735 10736 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 10737 lpfc_retry_pport_discovery(phba); 10738 goto out; 10739 } 10740 10741 /* Check for retry */ 10742 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 10743 goto out; 10744 /* FDISC failed */ 10745 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10746 "0126 FDISC failed. (x%x/x%x)\n", 10747 irsp->ulpStatus, irsp->un.ulpWord[4]); 10748 goto fdisc_failed; 10749 } 10750 10751 lpfc_check_nlp_post_devloss(vport, ndlp); 10752 10753 spin_lock_irq(shost->host_lock); 10754 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 10755 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 10756 vport->fc_flag |= FC_FABRIC; 10757 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 10758 vport->fc_flag |= FC_PUBLIC_LOOP; 10759 spin_unlock_irq(shost->host_lock); 10760 10761 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 10762 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 10763 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 10764 if (!prsp) 10765 goto out; 10766 sp = prsp->virt + sizeof(uint32_t); 10767 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 10768 memcpy(&vport->fabric_portname, &sp->portName, 10769 sizeof(struct lpfc_name)); 10770 memcpy(&vport->fabric_nodename, &sp->nodeName, 10771 sizeof(struct lpfc_name)); 10772 if (fabric_param_changed && 10773 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10774 /* If our NportID changed, we need to ensure all 10775 * remaining NPORTs get unreg_login'ed so we can 10776 * issue unreg_vpi. 10777 */ 10778 list_for_each_entry_safe(np, next_np, 10779 &vport->fc_nodes, nlp_listp) { 10780 if ((np->nlp_state != NLP_STE_NPR_NODE) || 10781 !(np->nlp_flag & NLP_NPR_ADISC)) 10782 continue; 10783 spin_lock_irq(&ndlp->lock); 10784 np->nlp_flag &= ~NLP_NPR_ADISC; 10785 spin_unlock_irq(&ndlp->lock); 10786 lpfc_unreg_rpi(vport, np); 10787 } 10788 lpfc_cleanup_pending_mbox(vport); 10789 10790 if (phba->sli_rev == LPFC_SLI_REV4) 10791 lpfc_sli4_unreg_all_rpis(vport); 10792 10793 lpfc_mbx_unreg_vpi(vport); 10794 spin_lock_irq(shost->host_lock); 10795 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10796 if (phba->sli_rev == LPFC_SLI_REV4) 10797 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 10798 else 10799 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 10800 spin_unlock_irq(shost->host_lock); 10801 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 10802 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10803 /* 10804 * Driver needs to re-reg VPI in order for f/w 10805 * to update the MAC address. 10806 */ 10807 lpfc_register_new_vport(phba, vport, ndlp); 10808 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10809 goto out; 10810 } 10811 10812 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 10813 lpfc_issue_init_vpi(vport); 10814 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 10815 lpfc_register_new_vport(phba, vport, ndlp); 10816 else 10817 lpfc_do_scr_ns_plogi(phba, vport); 10818 10819 /* The FDISC completed successfully. Move the fabric ndlp to 10820 * UNMAPPED state and register with the transport. 10821 */ 10822 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10823 goto out; 10824 10825 fdisc_failed: 10826 if (vport->fc_vport && 10827 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 10828 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10829 /* Cancel discovery timer */ 10830 lpfc_can_disctmo(vport); 10831 out: 10832 lpfc_els_free_iocb(phba, cmdiocb); 10833 lpfc_nlp_put(ndlp); 10834 } 10835 10836 /** 10837 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 10838 * @vport: pointer to a virtual N_Port data structure. 10839 * @ndlp: pointer to a node-list data structure. 10840 * @retry: number of retries to the command IOCB. 10841 * 10842 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 10843 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 10844 * routine to issue the IOCB, which makes sure only one outstanding fabric 10845 * IOCB will be sent off HBA at any given time. 10846 * 10847 * Note that the ndlp reference count will be incremented by 1 for holding the 10848 * ndlp and the reference to ndlp will be stored into the context1 field of 10849 * the IOCB for the completion callback function to the FDISC ELS command. 10850 * 10851 * Return code 10852 * 0 - Successfully issued fdisc iocb command 10853 * 1 - Failed to issue fdisc iocb command 10854 **/ 10855 static int 10856 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 10857 uint8_t retry) 10858 { 10859 struct lpfc_hba *phba = vport->phba; 10860 IOCB_t *icmd; 10861 struct lpfc_iocbq *elsiocb; 10862 struct serv_parm *sp; 10863 uint8_t *pcmd; 10864 uint16_t cmdsize; 10865 int did = ndlp->nlp_DID; 10866 int rc; 10867 10868 vport->port_state = LPFC_FDISC; 10869 vport->fc_myDID = 0; 10870 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 10871 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 10872 ELS_CMD_FDISC); 10873 if (!elsiocb) { 10874 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10875 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10876 "0255 Issue FDISC: no IOCB\n"); 10877 return 1; 10878 } 10879 10880 icmd = &elsiocb->iocb; 10881 icmd->un.elsreq64.myID = 0; 10882 icmd->un.elsreq64.fl = 1; 10883 10884 /* 10885 * SLI3 ports require a different context type value than SLI4. 10886 * Catch SLI3 ports here and override the prep. 10887 */ 10888 if (phba->sli_rev == LPFC_SLI_REV3) { 10889 icmd->ulpCt_h = 1; 10890 icmd->ulpCt_l = 0; 10891 } 10892 10893 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 10894 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 10895 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 10896 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 10897 sp = (struct serv_parm *) pcmd; 10898 /* Setup CSPs accordingly for Fabric */ 10899 sp->cmn.e_d_tov = 0; 10900 sp->cmn.w2.r_a_tov = 0; 10901 sp->cmn.virtual_fabric_support = 0; 10902 sp->cls1.classValid = 0; 10903 sp->cls2.seqDelivery = 1; 10904 sp->cls3.seqDelivery = 1; 10905 10906 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 10907 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 10908 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 10909 pcmd += sizeof(uint32_t); /* Port Name */ 10910 memcpy(pcmd, &vport->fc_portname, 8); 10911 pcmd += sizeof(uint32_t); /* Node Name */ 10912 pcmd += sizeof(uint32_t); /* Node Name */ 10913 memcpy(pcmd, &vport->fc_nodename, 8); 10914 sp->cmn.valid_vendor_ver_level = 0; 10915 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 10916 lpfc_set_disctmo(vport); 10917 10918 phba->fc_stat.elsXmitFDISC++; 10919 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 10920 10921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10922 "Issue FDISC: did:x%x", 10923 did, 0, 0); 10924 10925 elsiocb->context1 = lpfc_nlp_get(ndlp); 10926 if (!elsiocb->context1) { 10927 lpfc_els_free_iocb(phba, elsiocb); 10928 goto err_out; 10929 } 10930 10931 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 10932 if (rc == IOCB_ERROR) { 10933 lpfc_els_free_iocb(phba, elsiocb); 10934 lpfc_nlp_put(ndlp); 10935 goto err_out; 10936 } 10937 10938 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 10939 return 0; 10940 10941 err_out: 10942 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10943 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10944 "0256 Issue FDISC: Cannot send IOCB\n"); 10945 return 1; 10946 } 10947 10948 /** 10949 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 10950 * @phba: pointer to lpfc hba data structure. 10951 * @cmdiocb: pointer to lpfc command iocb data structure. 10952 * @rspiocb: pointer to lpfc response iocb data structure. 10953 * 10954 * This routine is the completion callback function to the issuing of a LOGO 10955 * ELS command off a vport. It frees the command IOCB and then decrement the 10956 * reference count held on ndlp for this completion function, indicating that 10957 * the reference to the ndlp is no long needed. Note that the 10958 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 10959 * callback function and an additional explicit ndlp reference decrementation 10960 * will trigger the actual release of the ndlp. 10961 **/ 10962 static void 10963 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10964 struct lpfc_iocbq *rspiocb) 10965 { 10966 struct lpfc_vport *vport = cmdiocb->vport; 10967 IOCB_t *irsp; 10968 struct lpfc_nodelist *ndlp; 10969 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10970 10971 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 10972 irsp = &rspiocb->iocb; 10973 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10974 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 10975 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 10976 10977 /* NPIV LOGO completes to NPort <nlp_DID> */ 10978 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10979 "2928 NPIV LOGO completes to NPort x%x " 10980 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 10981 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 10982 irsp->ulpTimeout, vport->num_disc_nodes, 10983 kref_read(&ndlp->kref), ndlp->nlp_flag, 10984 ndlp->fc4_xpt_flags); 10985 10986 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 10987 spin_lock_irq(shost->host_lock); 10988 vport->fc_flag &= ~FC_NDISC_ACTIVE; 10989 vport->fc_flag &= ~FC_FABRIC; 10990 spin_unlock_irq(shost->host_lock); 10991 lpfc_can_disctmo(vport); 10992 } 10993 10994 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 10995 /* Wake up lpfc_vport_delete if waiting...*/ 10996 if (ndlp->logo_waitq) 10997 wake_up(ndlp->logo_waitq); 10998 spin_lock_irq(&ndlp->lock); 10999 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11000 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11001 spin_unlock_irq(&ndlp->lock); 11002 } 11003 11004 /* Safe to release resources now. */ 11005 lpfc_els_free_iocb(phba, cmdiocb); 11006 lpfc_nlp_put(ndlp); 11007 } 11008 11009 /** 11010 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11011 * @vport: pointer to a virtual N_Port data structure. 11012 * @ndlp: pointer to a node-list data structure. 11013 * 11014 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11015 * 11016 * Note that the ndlp reference count will be incremented by 1 for holding the 11017 * ndlp and the reference to ndlp will be stored into the context1 field of 11018 * the IOCB for the completion callback function to the LOGO ELS command. 11019 * 11020 * Return codes 11021 * 0 - Successfully issued logo off the @vport 11022 * 1 - Failed to issue logo off the @vport 11023 **/ 11024 int 11025 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11026 { 11027 int rc = 0; 11028 struct lpfc_hba *phba = vport->phba; 11029 struct lpfc_iocbq *elsiocb; 11030 uint8_t *pcmd; 11031 uint16_t cmdsize; 11032 11033 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11034 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11035 ELS_CMD_LOGO); 11036 if (!elsiocb) 11037 return 1; 11038 11039 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 11040 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11041 pcmd += sizeof(uint32_t); 11042 11043 /* Fill in LOGO payload */ 11044 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11045 pcmd += sizeof(uint32_t); 11046 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11047 11048 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11049 "Issue LOGO npiv did:x%x flg:x%x", 11050 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11051 11052 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 11053 spin_lock_irq(&ndlp->lock); 11054 ndlp->nlp_flag |= NLP_LOGO_SND; 11055 spin_unlock_irq(&ndlp->lock); 11056 elsiocb->context1 = lpfc_nlp_get(ndlp); 11057 if (!elsiocb->context1) { 11058 lpfc_els_free_iocb(phba, elsiocb); 11059 goto err; 11060 } 11061 11062 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11063 if (rc == IOCB_ERROR) { 11064 lpfc_els_free_iocb(phba, elsiocb); 11065 lpfc_nlp_put(ndlp); 11066 goto err; 11067 } 11068 return 0; 11069 11070 err: 11071 spin_lock_irq(&ndlp->lock); 11072 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11073 spin_unlock_irq(&ndlp->lock); 11074 return 1; 11075 } 11076 11077 /** 11078 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11079 * @t: timer context used to obtain the lpfc hba. 11080 * 11081 * This routine is invoked by the fabric iocb block timer after 11082 * timeout. It posts the fabric iocb block timeout event by setting the 11083 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11084 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11085 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11086 * posted event WORKER_FABRIC_BLOCK_TMO. 11087 **/ 11088 void 11089 lpfc_fabric_block_timeout(struct timer_list *t) 11090 { 11091 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11092 unsigned long iflags; 11093 uint32_t tmo_posted; 11094 11095 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11096 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11097 if (!tmo_posted) 11098 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11099 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11100 11101 if (!tmo_posted) 11102 lpfc_worker_wake_up(phba); 11103 return; 11104 } 11105 11106 /** 11107 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11108 * @phba: pointer to lpfc hba data structure. 11109 * 11110 * This routine issues one fabric iocb from the driver internal list to 11111 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11112 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11113 * remove one pending fabric iocb from the driver internal list and invokes 11114 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11115 **/ 11116 static void 11117 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11118 { 11119 struct lpfc_iocbq *iocb; 11120 unsigned long iflags; 11121 int ret; 11122 IOCB_t *cmd; 11123 11124 repeat: 11125 iocb = NULL; 11126 spin_lock_irqsave(&phba->hbalock, iflags); 11127 /* Post any pending iocb to the SLI layer */ 11128 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11129 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11130 list); 11131 if (iocb) 11132 /* Increment fabric iocb count to hold the position */ 11133 atomic_inc(&phba->fabric_iocb_count); 11134 } 11135 spin_unlock_irqrestore(&phba->hbalock, iflags); 11136 if (iocb) { 11137 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11138 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11139 iocb->iocb_flag |= LPFC_IO_FABRIC; 11140 11141 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11142 "Fabric sched1: ste:x%x", 11143 iocb->vport->port_state, 0, 0); 11144 11145 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11146 11147 if (ret == IOCB_ERROR) { 11148 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11149 iocb->fabric_iocb_cmpl = NULL; 11150 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11151 cmd = &iocb->iocb; 11152 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 11153 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 11154 iocb->iocb_cmpl(phba, iocb, iocb); 11155 11156 atomic_dec(&phba->fabric_iocb_count); 11157 goto repeat; 11158 } 11159 } 11160 } 11161 11162 /** 11163 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11164 * @phba: pointer to lpfc hba data structure. 11165 * 11166 * This routine unblocks the issuing fabric iocb command. The function 11167 * will clear the fabric iocb block bit and then invoke the routine 11168 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11169 * from the driver internal fabric iocb list. 11170 **/ 11171 void 11172 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11173 { 11174 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11175 11176 lpfc_resume_fabric_iocbs(phba); 11177 return; 11178 } 11179 11180 /** 11181 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11182 * @phba: pointer to lpfc hba data structure. 11183 * 11184 * This routine blocks the issuing fabric iocb for a specified amount of 11185 * time (currently 100 ms). This is done by set the fabric iocb block bit 11186 * and set up a timeout timer for 100ms. When the block bit is set, no more 11187 * fabric iocb will be issued out of the HBA. 11188 **/ 11189 static void 11190 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11191 { 11192 int blocked; 11193 11194 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11195 /* Start a timer to unblock fabric iocbs after 100ms */ 11196 if (!blocked) 11197 mod_timer(&phba->fabric_block_timer, 11198 jiffies + msecs_to_jiffies(100)); 11199 11200 return; 11201 } 11202 11203 /** 11204 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11205 * @phba: pointer to lpfc hba data structure. 11206 * @cmdiocb: pointer to lpfc command iocb data structure. 11207 * @rspiocb: pointer to lpfc response iocb data structure. 11208 * 11209 * This routine is the callback function that is put to the fabric iocb's 11210 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 11211 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 11212 * function first restores and invokes the original iocb's callback function 11213 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11214 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11215 **/ 11216 static void 11217 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11218 struct lpfc_iocbq *rspiocb) 11219 { 11220 struct ls_rjt stat; 11221 11222 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11223 11224 switch (rspiocb->iocb.ulpStatus) { 11225 case IOSTAT_NPORT_RJT: 11226 case IOSTAT_FABRIC_RJT: 11227 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 11228 lpfc_block_fabric_iocbs(phba); 11229 } 11230 break; 11231 11232 case IOSTAT_NPORT_BSY: 11233 case IOSTAT_FABRIC_BSY: 11234 lpfc_block_fabric_iocbs(phba); 11235 break; 11236 11237 case IOSTAT_LS_RJT: 11238 stat.un.lsRjtError = 11239 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 11240 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11241 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11242 lpfc_block_fabric_iocbs(phba); 11243 break; 11244 } 11245 11246 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11247 11248 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 11249 cmdiocb->fabric_iocb_cmpl = NULL; 11250 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 11251 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 11252 11253 atomic_dec(&phba->fabric_iocb_count); 11254 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11255 /* Post any pending iocbs to HBA */ 11256 lpfc_resume_fabric_iocbs(phba); 11257 } 11258 } 11259 11260 /** 11261 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11262 * @phba: pointer to lpfc hba data structure. 11263 * @iocb: pointer to lpfc command iocb data structure. 11264 * 11265 * This routine is used as the top-level API for issuing a fabric iocb command 11266 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11267 * function makes sure that only one fabric bound iocb will be outstanding at 11268 * any given time. As such, this function will first check to see whether there 11269 * is already an outstanding fabric iocb on the wire. If so, it will put the 11270 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11271 * issued later. Otherwise, it will issue the iocb on the wire and update the 11272 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11273 * 11274 * Note, this implementation has a potential sending out fabric IOCBs out of 11275 * order. The problem is caused by the construction of the "ready" boolen does 11276 * not include the condition that the internal fabric IOCB list is empty. As 11277 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11278 * ahead of the fabric IOCBs in the internal list. 11279 * 11280 * Return code 11281 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11282 * IOCB_ERROR - failed to issue fabric iocb 11283 **/ 11284 static int 11285 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11286 { 11287 unsigned long iflags; 11288 int ready; 11289 int ret; 11290 11291 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11292 11293 spin_lock_irqsave(&phba->hbalock, iflags); 11294 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11295 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11296 11297 if (ready) 11298 /* Increment fabric iocb count to hold the position */ 11299 atomic_inc(&phba->fabric_iocb_count); 11300 spin_unlock_irqrestore(&phba->hbalock, iflags); 11301 if (ready) { 11302 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11303 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11304 iocb->iocb_flag |= LPFC_IO_FABRIC; 11305 11306 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11307 "Fabric sched2: ste:x%x", 11308 iocb->vport->port_state, 0, 0); 11309 11310 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11311 11312 if (ret == IOCB_ERROR) { 11313 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11314 iocb->fabric_iocb_cmpl = NULL; 11315 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11316 atomic_dec(&phba->fabric_iocb_count); 11317 } 11318 } else { 11319 spin_lock_irqsave(&phba->hbalock, iflags); 11320 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11321 spin_unlock_irqrestore(&phba->hbalock, iflags); 11322 ret = IOCB_SUCCESS; 11323 } 11324 return ret; 11325 } 11326 11327 /** 11328 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11329 * @vport: pointer to a virtual N_Port data structure. 11330 * 11331 * This routine aborts all the IOCBs associated with a @vport from the 11332 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11333 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11334 * list, removes each IOCB associated with the @vport off the list, set the 11335 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11336 * associated with the IOCB. 11337 **/ 11338 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11339 { 11340 LIST_HEAD(completions); 11341 struct lpfc_hba *phba = vport->phba; 11342 struct lpfc_iocbq *tmp_iocb, *piocb; 11343 11344 spin_lock_irq(&phba->hbalock); 11345 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11346 list) { 11347 11348 if (piocb->vport != vport) 11349 continue; 11350 11351 list_move_tail(&piocb->list, &completions); 11352 } 11353 spin_unlock_irq(&phba->hbalock); 11354 11355 /* Cancel all the IOCBs from the completions list */ 11356 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11357 IOERR_SLI_ABORTED); 11358 } 11359 11360 /** 11361 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11362 * @ndlp: pointer to a node-list data structure. 11363 * 11364 * This routine aborts all the IOCBs associated with an @ndlp from the 11365 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11366 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11367 * list, removes each IOCB associated with the @ndlp off the list, set the 11368 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11369 * associated with the IOCB. 11370 **/ 11371 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11372 { 11373 LIST_HEAD(completions); 11374 struct lpfc_hba *phba = ndlp->phba; 11375 struct lpfc_iocbq *tmp_iocb, *piocb; 11376 struct lpfc_sli_ring *pring; 11377 11378 pring = lpfc_phba_elsring(phba); 11379 11380 if (unlikely(!pring)) 11381 return; 11382 11383 spin_lock_irq(&phba->hbalock); 11384 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11385 list) { 11386 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11387 11388 list_move_tail(&piocb->list, &completions); 11389 } 11390 } 11391 spin_unlock_irq(&phba->hbalock); 11392 11393 /* Cancel all the IOCBs from the completions list */ 11394 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11395 IOERR_SLI_ABORTED); 11396 } 11397 11398 /** 11399 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11400 * @phba: pointer to lpfc hba data structure. 11401 * 11402 * This routine aborts all the IOCBs currently on the driver internal 11403 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11404 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11405 * list, removes IOCBs off the list, set the status field to 11406 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11407 * the IOCB. 11408 **/ 11409 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11410 { 11411 LIST_HEAD(completions); 11412 11413 spin_lock_irq(&phba->hbalock); 11414 list_splice_init(&phba->fabric_iocb_list, &completions); 11415 spin_unlock_irq(&phba->hbalock); 11416 11417 /* Cancel all the IOCBs from the completions list */ 11418 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11419 IOERR_SLI_ABORTED); 11420 } 11421 11422 /** 11423 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11424 * @vport: pointer to lpfc vport data structure. 11425 * 11426 * This routine is invoked by the vport cleanup for deletions and the cleanup 11427 * for an ndlp on removal. 11428 **/ 11429 void 11430 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11431 { 11432 struct lpfc_hba *phba = vport->phba; 11433 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11434 struct lpfc_nodelist *ndlp = NULL; 11435 unsigned long iflag = 0; 11436 11437 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11438 list_for_each_entry_safe(sglq_entry, sglq_next, 11439 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11440 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11441 lpfc_nlp_put(sglq_entry->ndlp); 11442 ndlp = sglq_entry->ndlp; 11443 sglq_entry->ndlp = NULL; 11444 11445 /* If the xri on the abts_els_sgl list is for the Fport 11446 * node and the vport is unloading, the xri aborted wcqe 11447 * likely isn't coming back. Just release the sgl. 11448 */ 11449 if ((vport->load_flag & FC_UNLOADING) && 11450 ndlp->nlp_DID == Fabric_DID) { 11451 list_del(&sglq_entry->list); 11452 sglq_entry->state = SGL_FREED; 11453 list_add_tail(&sglq_entry->list, 11454 &phba->sli4_hba.lpfc_els_sgl_list); 11455 } 11456 } 11457 } 11458 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11459 return; 11460 } 11461 11462 /** 11463 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11464 * @phba: pointer to lpfc hba data structure. 11465 * @axri: pointer to the els xri abort wcqe structure. 11466 * 11467 * This routine is invoked by the worker thread to process a SLI4 slow-path 11468 * ELS aborted xri. 11469 **/ 11470 void 11471 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11472 struct sli4_wcqe_xri_aborted *axri) 11473 { 11474 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11475 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11476 uint16_t lxri = 0; 11477 11478 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11479 unsigned long iflag = 0; 11480 struct lpfc_nodelist *ndlp; 11481 struct lpfc_sli_ring *pring; 11482 11483 pring = lpfc_phba_elsring(phba); 11484 11485 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11486 list_for_each_entry_safe(sglq_entry, sglq_next, 11487 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11488 if (sglq_entry->sli4_xritag == xri) { 11489 list_del(&sglq_entry->list); 11490 ndlp = sglq_entry->ndlp; 11491 sglq_entry->ndlp = NULL; 11492 list_add_tail(&sglq_entry->list, 11493 &phba->sli4_hba.lpfc_els_sgl_list); 11494 sglq_entry->state = SGL_FREED; 11495 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11496 iflag); 11497 11498 if (ndlp) { 11499 lpfc_set_rrq_active(phba, ndlp, 11500 sglq_entry->sli4_lxritag, 11501 rxid, 1); 11502 lpfc_nlp_put(ndlp); 11503 } 11504 11505 /* Check if TXQ queue needs to be serviced */ 11506 if (pring && !list_empty(&pring->txq)) 11507 lpfc_worker_wake_up(phba); 11508 return; 11509 } 11510 } 11511 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11512 lxri = lpfc_sli4_xri_inrange(phba, xri); 11513 if (lxri == NO_XRI) 11514 return; 11515 11516 spin_lock_irqsave(&phba->hbalock, iflag); 11517 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11518 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11519 spin_unlock_irqrestore(&phba->hbalock, iflag); 11520 return; 11521 } 11522 sglq_entry->state = SGL_XRI_ABORTED; 11523 spin_unlock_irqrestore(&phba->hbalock, iflag); 11524 return; 11525 } 11526 11527 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11528 * @vport: pointer to virtual port object. 11529 * @ndlp: nodelist pointer for the impacted node. 11530 * 11531 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11532 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11533 * the driver is required to send a LOGO to the remote node before it 11534 * attempts to recover its login to the remote node. 11535 */ 11536 void 11537 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11538 struct lpfc_nodelist *ndlp) 11539 { 11540 struct Scsi_Host *shost; 11541 struct lpfc_hba *phba; 11542 unsigned long flags = 0; 11543 11544 shost = lpfc_shost_from_vport(vport); 11545 phba = vport->phba; 11546 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11547 lpfc_printf_log(phba, KERN_INFO, 11548 LOG_SLI, "3093 No rport recovery needed. " 11549 "rport in state 0x%x\n", ndlp->nlp_state); 11550 return; 11551 } 11552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11553 "3094 Start rport recovery on shost id 0x%x " 11554 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11555 "flags 0x%x\n", 11556 shost->host_no, ndlp->nlp_DID, 11557 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11558 ndlp->nlp_flag); 11559 /* 11560 * The rport is not responding. Remove the FCP-2 flag to prevent 11561 * an ADISC in the follow-up recovery code. 11562 */ 11563 spin_lock_irqsave(&ndlp->lock, flags); 11564 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11565 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11566 spin_unlock_irqrestore(&ndlp->lock, flags); 11567 lpfc_unreg_rpi(vport, ndlp); 11568 } 11569 11570 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11571 { 11572 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11573 } 11574 11575 static void 11576 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11577 { 11578 u32 i; 11579 11580 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11581 return; 11582 11583 for (i = min; i <= max; i++) 11584 set_bit(i, vport->vmid_priority_range); 11585 } 11586 11587 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11588 { 11589 set_bit(ctcl_vmid, vport->vmid_priority_range); 11590 } 11591 11592 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11593 { 11594 u32 i; 11595 11596 i = find_first_bit(vport->vmid_priority_range, 11597 LPFC_VMID_MAX_PRIORITY_RANGE); 11598 11599 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11600 return 0; 11601 11602 clear_bit(i, vport->vmid_priority_range); 11603 return i; 11604 } 11605 11606 #define MAX_PRIORITY_DESC 255 11607 11608 static void 11609 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11610 struct lpfc_iocbq *rspiocb) 11611 { 11612 struct lpfc_vport *vport = cmdiocb->vport; 11613 struct priority_range_desc *desc; 11614 struct lpfc_dmabuf *prsp = NULL; 11615 struct lpfc_vmid_priority_range *vmid_range = NULL; 11616 u32 *data; 11617 struct lpfc_dmabuf *dmabuf = cmdiocb->context2; 11618 IOCB_t *irsp = &rspiocb->iocb; 11619 u8 *pcmd, max_desc; 11620 u32 len, i; 11621 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 11622 11623 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11624 if (!prsp) 11625 goto out; 11626 11627 pcmd = prsp->virt; 11628 data = (u32 *)pcmd; 11629 if (data[0] == ELS_CMD_LS_RJT) { 11630 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11631 "3277 QFPA LS_RJT x%x x%x\n", 11632 data[0], data[1]); 11633 goto out; 11634 } 11635 if (irsp->ulpStatus) { 11636 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 11637 "6529 QFPA failed with status x%x x%x\n", 11638 irsp->ulpStatus, irsp->un.ulpWord[4]); 11639 goto out; 11640 } 11641 11642 if (!vport->qfpa_res) { 11643 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 11644 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 11645 GFP_KERNEL); 11646 if (!vport->qfpa_res) 11647 goto out; 11648 } 11649 11650 len = *((u32 *)(pcmd + 4)); 11651 len = be32_to_cpu(len); 11652 memcpy(vport->qfpa_res, pcmd, len + 8); 11653 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 11654 11655 desc = (struct priority_range_desc *)(pcmd + 8); 11656 vmid_range = vport->vmid_priority.vmid_range; 11657 if (!vmid_range) { 11658 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 11659 GFP_KERNEL); 11660 if (!vmid_range) { 11661 kfree(vport->qfpa_res); 11662 goto out; 11663 } 11664 vport->vmid_priority.vmid_range = vmid_range; 11665 } 11666 vport->vmid_priority.num_descriptors = len; 11667 11668 for (i = 0; i < len; i++, vmid_range++, desc++) { 11669 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11670 "6539 vmid values low=%d, high=%d, qos=%d, " 11671 "local ve id=%d\n", desc->lo_range, 11672 desc->hi_range, desc->qos_priority, 11673 desc->local_ve_id); 11674 11675 vmid_range->low = desc->lo_range << 1; 11676 if (desc->local_ve_id == QFPA_ODD_ONLY) 11677 vmid_range->low++; 11678 if (desc->qos_priority) 11679 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 11680 vmid_range->qos = desc->qos_priority; 11681 11682 vmid_range->high = desc->hi_range << 1; 11683 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 11684 (desc->local_ve_id == QFPA_EVEN_ODD)) 11685 vmid_range->high++; 11686 } 11687 lpfc_init_cs_ctl_bitmap(vport); 11688 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 11689 lpfc_vmid_set_cs_ctl_range(vport, 11690 vport->vmid_priority.vmid_range[i].low, 11691 vport->vmid_priority.vmid_range[i].high); 11692 } 11693 11694 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 11695 out: 11696 lpfc_els_free_iocb(phba, cmdiocb); 11697 lpfc_nlp_put(ndlp); 11698 } 11699 11700 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 11701 { 11702 struct lpfc_hba *phba = vport->phba; 11703 struct lpfc_nodelist *ndlp; 11704 struct lpfc_iocbq *elsiocb; 11705 u8 *pcmd; 11706 int ret; 11707 11708 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11709 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11710 return -ENXIO; 11711 11712 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 11713 ndlp->nlp_DID, ELS_CMD_QFPA); 11714 if (!elsiocb) 11715 return -ENOMEM; 11716 11717 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11718 11719 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 11720 pcmd += 4; 11721 11722 elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa; 11723 11724 elsiocb->context1 = lpfc_nlp_get(ndlp); 11725 if (!elsiocb->context1) { 11726 lpfc_els_free_iocb(vport->phba, elsiocb); 11727 return -ENXIO; 11728 } 11729 11730 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 11731 if (ret != IOCB_SUCCESS) { 11732 lpfc_els_free_iocb(phba, elsiocb); 11733 lpfc_nlp_put(ndlp); 11734 return -EIO; 11735 } 11736 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 11737 return 0; 11738 } 11739 11740 int 11741 lpfc_vmid_uvem(struct lpfc_vport *vport, 11742 struct lpfc_vmid *vmid, bool instantiated) 11743 { 11744 struct lpfc_vem_id_desc *vem_id_desc; 11745 struct lpfc_nodelist *ndlp; 11746 struct lpfc_iocbq *elsiocb; 11747 struct instantiated_ve_desc *inst_desc; 11748 struct lpfc_vmid_context *vmid_context; 11749 u8 *pcmd; 11750 u32 *len; 11751 int ret = 0; 11752 11753 ndlp = lpfc_findnode_did(vport, Fabric_DID); 11754 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11755 return -ENXIO; 11756 11757 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 11758 if (!vmid_context) 11759 return -ENOMEM; 11760 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 11761 ndlp, Fabric_DID, ELS_CMD_UVEM); 11762 if (!elsiocb) 11763 goto out; 11764 11765 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11766 "3427 Host vmid %s %d\n", 11767 vmid->host_vmid, instantiated); 11768 vmid_context->vmp = vmid; 11769 vmid_context->nlp = ndlp; 11770 vmid_context->instantiated = instantiated; 11771 elsiocb->vmid_tag.vmid_context = vmid_context; 11772 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11773 11774 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 11775 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 11776 LPFC_COMPRESS_VMID_SIZE); 11777 11778 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 11779 len = (u32 *)(pcmd + 4); 11780 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 11781 11782 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 11783 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 11784 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 11785 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 11786 LPFC_COMPRESS_VMID_SIZE); 11787 11788 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 11789 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11790 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 11791 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 11792 LPFC_COMPRESS_VMID_SIZE); 11793 11794 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 11795 bf_set(lpfc_instantiated_local_id, inst_desc, 11796 vmid->un.cs_ctl_vmid); 11797 if (instantiated) { 11798 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11799 } else { 11800 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 11801 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 11802 } 11803 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 11804 11805 elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem; 11806 11807 elsiocb->context1 = lpfc_nlp_get(ndlp); 11808 if (!elsiocb->context1) { 11809 lpfc_els_free_iocb(vport->phba, elsiocb); 11810 goto out; 11811 } 11812 11813 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 11814 if (ret != IOCB_SUCCESS) { 11815 lpfc_els_free_iocb(vport->phba, elsiocb); 11816 lpfc_nlp_put(ndlp); 11817 goto out; 11818 } 11819 11820 return 0; 11821 out: 11822 kfree(vmid_context); 11823 return -EIO; 11824 } 11825 11826 static void 11827 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 11828 struct lpfc_iocbq *rspiocb) 11829 { 11830 struct lpfc_vport *vport = icmdiocb->vport; 11831 struct lpfc_dmabuf *prsp = NULL; 11832 struct lpfc_vmid_context *vmid_context = 11833 icmdiocb->vmid_tag.vmid_context; 11834 struct lpfc_nodelist *ndlp = icmdiocb->context1; 11835 u8 *pcmd; 11836 u32 *data; 11837 IOCB_t *irsp = &rspiocb->iocb; 11838 struct lpfc_dmabuf *dmabuf = icmdiocb->context2; 11839 struct lpfc_vmid *vmid; 11840 11841 vmid = vmid_context->vmp; 11842 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11843 ndlp = NULL; 11844 11845 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11846 if (!prsp) 11847 goto out; 11848 pcmd = prsp->virt; 11849 data = (u32 *)pcmd; 11850 if (data[0] == ELS_CMD_LS_RJT) { 11851 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11852 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 11853 goto out; 11854 } 11855 if (irsp->ulpStatus) { 11856 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11857 "4533 UVEM error status %x: %x\n", 11858 irsp->ulpStatus, irsp->un.ulpWord[4]); 11859 goto out; 11860 } 11861 spin_lock(&phba->hbalock); 11862 /* Set IN USE flag */ 11863 vport->vmid_flag |= LPFC_VMID_IN_USE; 11864 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 11865 spin_unlock(&phba->hbalock); 11866 11867 if (vmid_context->instantiated) { 11868 write_lock(&vport->vmid_lock); 11869 vmid->flag |= LPFC_VMID_REGISTERED; 11870 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 11871 write_unlock(&vport->vmid_lock); 11872 } 11873 11874 out: 11875 kfree(vmid_context); 11876 lpfc_els_free_iocb(phba, icmdiocb); 11877 lpfc_nlp_put(ndlp); 11878 } 11879