1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expectRsp: flag indicating whether response is expected. 138 * @cmdSize: size of the ELS command. 139 * @retry: number of retries to the command IOCB when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * context1 of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 164 uint16_t cmdSize, uint8_t retry, 165 struct lpfc_nodelist *ndlp, uint32_t did, 166 uint32_t elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 171 struct ulp_bde64 *bpl; 172 IOCB_t *icmd; 173 174 175 if (!lpfc_is_link_up(phba)) 176 return NULL; 177 178 /* Allocate buffer for command iocb */ 179 elsiocb = lpfc_sli_get_iocbq(phba); 180 181 if (elsiocb == NULL) 182 return NULL; 183 184 /* 185 * If this command is for fabric controller and HBA running 186 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 187 */ 188 if ((did == Fabric_DID) && 189 (phba->hba_flag & HBA_FIP_SUPPORT) && 190 ((elscmd == ELS_CMD_FLOGI) || 191 (elscmd == ELS_CMD_FDISC) || 192 (elscmd == ELS_CMD_LOGO))) 193 switch (elscmd) { 194 case ELS_CMD_FLOGI: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 case ELS_CMD_FDISC: 200 elsiocb->iocb_flag |= 201 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 202 & LPFC_FIP_ELS_ID_MASK); 203 break; 204 case ELS_CMD_LOGO: 205 elsiocb->iocb_flag |= 206 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 207 & LPFC_FIP_ELS_ID_MASK); 208 break; 209 } 210 else 211 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 212 213 icmd = &elsiocb->iocb; 214 215 /* fill in BDEs for command */ 216 /* Allocate buffer for command payload */ 217 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (pcmd) 219 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 220 if (!pcmd || !pcmd->virt) 221 goto els_iocb_free_pcmb_exit; 222 223 INIT_LIST_HEAD(&pcmd->list); 224 225 /* Allocate buffer for response payload */ 226 if (expectRsp) { 227 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 228 if (prsp) 229 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 230 &prsp->phys); 231 if (!prsp || !prsp->virt) 232 goto els_iocb_free_prsp_exit; 233 INIT_LIST_HEAD(&prsp->list); 234 } else 235 prsp = NULL; 236 237 /* Allocate buffer for Buffer ptr list */ 238 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 239 if (pbuflist) 240 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 241 &pbuflist->phys); 242 if (!pbuflist || !pbuflist->virt) 243 goto els_iocb_free_pbuf_exit; 244 245 INIT_LIST_HEAD(&pbuflist->list); 246 247 if (expectRsp) { 248 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 249 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 250 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 251 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 252 253 icmd->un.elsreq64.remoteID = did; /* DID */ 254 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 255 if (elscmd == ELS_CMD_FLOGI) 256 icmd->ulpTimeout = FF_DEF_RATOV * 2; 257 else if (elscmd == ELS_CMD_LOGO) 258 icmd->ulpTimeout = phba->fc_ratov; 259 else 260 icmd->ulpTimeout = phba->fc_ratov * 2; 261 } else { 262 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 263 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 264 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 265 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 266 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 267 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 268 } 269 icmd->ulpBdeCount = 1; 270 icmd->ulpLe = 1; 271 icmd->ulpClass = CLASS3; 272 273 /* 274 * If we have NPIV enabled, we want to send ELS traffic by VPI. 275 * For SLI4, since the driver controls VPIs we also want to include 276 * all ELS pt2pt protocol traffic as well. 277 */ 278 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 279 ((phba->sli_rev == LPFC_SLI_REV4) && 280 (vport->fc_flag & FC_PT2PT))) { 281 282 if (expectRsp) { 283 icmd->un.elsreq64.myID = vport->fc_myDID; 284 285 /* For ELS_REQUEST64_CR, use the VPI by default */ 286 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 287 } 288 289 icmd->ulpCt_h = 0; 290 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 291 if (elscmd == ELS_CMD_ECHO) 292 icmd->ulpCt_l = 0; /* context = invalid RPI */ 293 else 294 icmd->ulpCt_l = 1; /* context = VPI */ 295 } 296 297 bpl = (struct ulp_bde64 *) pbuflist->virt; 298 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 299 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 300 bpl->tus.f.bdeSize = cmdSize; 301 bpl->tus.f.bdeFlags = 0; 302 bpl->tus.w = le32_to_cpu(bpl->tus.w); 303 304 if (expectRsp) { 305 bpl++; 306 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 307 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 308 bpl->tus.f.bdeSize = FCELSSIZE; 309 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 310 bpl->tus.w = le32_to_cpu(bpl->tus.w); 311 } 312 313 elsiocb->context2 = pcmd; 314 elsiocb->context3 = pbuflist; 315 elsiocb->retry = retry; 316 elsiocb->vport = vport; 317 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 318 319 if (prsp) { 320 list_add(&prsp->list, &pcmd->list); 321 } 322 if (expectRsp) { 323 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 325 "0116 Xmit ELS command x%x to remote " 326 "NPORT x%x I/O tag: x%x, port state:x%x " 327 "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n", 328 elscmd, did, elsiocb->iotag, 329 vport->port_state, ndlp->nlp_rpi, 330 vport->fc_flag, ndlp->nlp_flag, vport); 331 } else { 332 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 333 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 "0117 Xmit ELS response x%x to remote " 335 "NPORT x%x I/O tag: x%x, size: x%x " 336 "port_state x%x rpi x%x fc_flag x%x\n", 337 elscmd, ndlp->nlp_DID, elsiocb->iotag, 338 cmdSize, vport->port_state, 339 ndlp->nlp_rpi, vport->fc_flag); 340 } 341 return elsiocb; 342 343 els_iocb_free_pbuf_exit: 344 if (expectRsp) 345 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 346 kfree(pbuflist); 347 348 els_iocb_free_prsp_exit: 349 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 350 kfree(prsp); 351 352 els_iocb_free_pcmb_exit: 353 kfree(pcmd); 354 lpfc_sli_release_iocbq(phba, elsiocb); 355 return NULL; 356 } 357 358 /** 359 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 360 * @vport: pointer to a host virtual N_Port data structure. 361 * 362 * This routine issues a fabric registration login for a @vport. An 363 * active ndlp node with Fabric_DID must already exist for this @vport. 364 * The routine invokes two mailbox commands to carry out fabric registration 365 * login through the HBA firmware: the first mailbox command requests the 366 * HBA to perform link configuration for the @vport; and the second mailbox 367 * command requests the HBA to perform the actual fabric registration login 368 * with the @vport. 369 * 370 * Return code 371 * 0 - successfully issued fabric registration login for @vport 372 * -ENXIO -- failed to issue fabric registration login for @vport 373 **/ 374 int 375 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 376 { 377 struct lpfc_hba *phba = vport->phba; 378 LPFC_MBOXQ_t *mbox; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_nodelist *ndlp; 381 struct serv_parm *sp; 382 int rc; 383 int err = 0; 384 385 sp = &phba->fc_fabparam; 386 ndlp = lpfc_findnode_did(vport, Fabric_DID); 387 if (!ndlp) { 388 err = 1; 389 goto fail; 390 } 391 392 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 393 if (!mbox) { 394 err = 2; 395 goto fail; 396 } 397 398 vport->port_state = LPFC_FABRIC_CFG_LINK; 399 lpfc_config_link(phba, mbox); 400 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 401 mbox->vport = vport; 402 403 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 404 if (rc == MBX_NOT_FINISHED) { 405 err = 3; 406 goto fail_free_mbox; 407 } 408 409 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 410 if (!mbox) { 411 err = 4; 412 goto fail; 413 } 414 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 415 ndlp->nlp_rpi); 416 if (rc) { 417 err = 5; 418 goto fail_free_mbox; 419 } 420 421 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 422 mbox->vport = vport; 423 /* increment the reference count on ndlp to hold reference 424 * for the callback routine. 425 */ 426 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 427 if (!mbox->ctx_ndlp) { 428 err = 6; 429 goto fail_no_ndlp; 430 } 431 432 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 433 if (rc == MBX_NOT_FINISHED) { 434 err = 7; 435 goto fail_issue_reg_login; 436 } 437 438 return 0; 439 440 fail_issue_reg_login: 441 /* decrement the reference count on ndlp just incremented 442 * for the failed mbox command. 443 */ 444 lpfc_nlp_put(ndlp); 445 fail_no_ndlp: 446 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 447 lpfc_mbuf_free(phba, mp->virt, mp->phys); 448 kfree(mp); 449 fail_free_mbox: 450 mempool_free(mbox, phba->mbox_mem_pool); 451 452 fail: 453 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 454 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 455 "0249 Cannot issue Register Fabric login: Err %d\n", 456 err); 457 return -ENXIO; 458 } 459 460 /** 461 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 462 * @vport: pointer to a host virtual N_Port data structure. 463 * 464 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 465 * the @vport. This mailbox command is necessary for SLI4 port only. 466 * 467 * Return code 468 * 0 - successfully issued REG_VFI for @vport 469 * A failure code otherwise. 470 **/ 471 int 472 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 473 { 474 struct lpfc_hba *phba = vport->phba; 475 LPFC_MBOXQ_t *mboxq = NULL; 476 struct lpfc_nodelist *ndlp; 477 struct lpfc_dmabuf *dmabuf = NULL; 478 int rc = 0; 479 480 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 481 if ((phba->sli_rev == LPFC_SLI_REV4) && 482 !(phba->link_flag & LS_LOOPBACK_MODE) && 483 !(vport->fc_flag & FC_PT2PT)) { 484 ndlp = lpfc_findnode_did(vport, Fabric_DID); 485 if (!ndlp) { 486 rc = -ENODEV; 487 goto fail; 488 } 489 } 490 491 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 492 if (!mboxq) { 493 rc = -ENOMEM; 494 goto fail; 495 } 496 497 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 498 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 499 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 500 if (!dmabuf) { 501 rc = -ENOMEM; 502 goto fail; 503 } 504 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 505 if (!dmabuf->virt) { 506 rc = -ENOMEM; 507 goto fail; 508 } 509 memcpy(dmabuf->virt, &phba->fc_fabparam, 510 sizeof(struct serv_parm)); 511 } 512 513 vport->port_state = LPFC_FABRIC_CFG_LINK; 514 if (dmabuf) 515 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 516 else 517 lpfc_reg_vfi(mboxq, vport, 0); 518 519 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 520 mboxq->vport = vport; 521 mboxq->ctx_buf = dmabuf; 522 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 523 if (rc == MBX_NOT_FINISHED) { 524 rc = -ENXIO; 525 goto fail; 526 } 527 return 0; 528 529 fail: 530 if (mboxq) 531 mempool_free(mboxq, phba->mbox_mem_pool); 532 if (dmabuf) { 533 if (dmabuf->virt) 534 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 535 kfree(dmabuf); 536 } 537 538 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 539 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 540 "0289 Issue Register VFI failed: Err %d\n", rc); 541 return rc; 542 } 543 544 /** 545 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 546 * @vport: pointer to a host virtual N_Port data structure. 547 * 548 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 549 * the @vport. This mailbox command is necessary for SLI4 port only. 550 * 551 * Return code 552 * 0 - successfully issued REG_VFI for @vport 553 * A failure code otherwise. 554 **/ 555 int 556 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 557 { 558 struct lpfc_hba *phba = vport->phba; 559 struct Scsi_Host *shost; 560 LPFC_MBOXQ_t *mboxq; 561 int rc; 562 563 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 564 if (!mboxq) { 565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 566 "2556 UNREG_VFI mbox allocation failed" 567 "HBA state x%x\n", phba->pport->port_state); 568 return -ENOMEM; 569 } 570 571 lpfc_unreg_vfi(mboxq, vport); 572 mboxq->vport = vport; 573 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 574 575 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 576 if (rc == MBX_NOT_FINISHED) { 577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 578 "2557 UNREG_VFI issue mbox failed rc x%x " 579 "HBA state x%x\n", 580 rc, phba->pport->port_state); 581 mempool_free(mboxq, phba->mbox_mem_pool); 582 return -EIO; 583 } 584 585 shost = lpfc_shost_from_vport(vport); 586 spin_lock_irq(shost->host_lock); 587 vport->fc_flag &= ~FC_VFI_REGISTERED; 588 spin_unlock_irq(shost->host_lock); 589 return 0; 590 } 591 592 /** 593 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 594 * @vport: pointer to a host virtual N_Port data structure. 595 * @sp: pointer to service parameter data structure. 596 * 597 * This routine is called from FLOGI/FDISC completion handler functions. 598 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 599 * node nodename is changed in the completion service parameter else return 600 * 0. This function also set flag in the vport data structure to delay 601 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 602 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 603 * node nodename is changed in the completion service parameter. 604 * 605 * Return code 606 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 607 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 608 * 609 **/ 610 static uint8_t 611 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 612 struct serv_parm *sp) 613 { 614 struct lpfc_hba *phba = vport->phba; 615 uint8_t fabric_param_changed = 0; 616 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 617 618 if ((vport->fc_prevDID != vport->fc_myDID) || 619 memcmp(&vport->fabric_portname, &sp->portName, 620 sizeof(struct lpfc_name)) || 621 memcmp(&vport->fabric_nodename, &sp->nodeName, 622 sizeof(struct lpfc_name)) || 623 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 624 fabric_param_changed = 1; 625 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 626 } 627 /* 628 * Word 1 Bit 31 in common service parameter is overloaded. 629 * Word 1 Bit 31 in FLOGI request is multiple NPort request 630 * Word 1 Bit 31 in FLOGI response is clean address bit 631 * 632 * If fabric parameter is changed and clean address bit is 633 * cleared delay nport discovery if 634 * - vport->fc_prevDID != 0 (not initial discovery) OR 635 * - lpfc_delay_discovery module parameter is set. 636 */ 637 if (fabric_param_changed && !sp->cmn.clean_address_bit && 638 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 639 spin_lock_irq(shost->host_lock); 640 vport->fc_flag |= FC_DISC_DELAYED; 641 spin_unlock_irq(shost->host_lock); 642 } 643 644 return fabric_param_changed; 645 } 646 647 648 /** 649 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 650 * @vport: pointer to a host virtual N_Port data structure. 651 * @ndlp: pointer to a node-list data structure. 652 * @sp: pointer to service parameter data structure. 653 * @irsp: pointer to the IOCB within the lpfc response IOCB. 654 * 655 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 656 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 657 * port in a fabric topology. It properly sets up the parameters to the @ndlp 658 * from the IOCB response. It also check the newly assigned N_Port ID to the 659 * @vport against the previously assigned N_Port ID. If it is different from 660 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 661 * is invoked on all the remaining nodes with the @vport to unregister the 662 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 663 * is invoked to register login to the fabric. 664 * 665 * Return code 666 * 0 - Success (currently, always return 0) 667 **/ 668 static int 669 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 670 struct serv_parm *sp, IOCB_t *irsp) 671 { 672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 673 struct lpfc_hba *phba = vport->phba; 674 struct lpfc_nodelist *np; 675 struct lpfc_nodelist *next_np; 676 uint8_t fabric_param_changed; 677 678 spin_lock_irq(shost->host_lock); 679 vport->fc_flag |= FC_FABRIC; 680 spin_unlock_irq(shost->host_lock); 681 682 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 683 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 684 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 685 686 phba->fc_edtovResol = sp->cmn.edtovResolution; 687 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 688 689 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 690 spin_lock_irq(shost->host_lock); 691 vport->fc_flag |= FC_PUBLIC_LOOP; 692 spin_unlock_irq(shost->host_lock); 693 } 694 695 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 696 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 697 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 698 ndlp->nlp_class_sup = 0; 699 if (sp->cls1.classValid) 700 ndlp->nlp_class_sup |= FC_COS_CLASS1; 701 if (sp->cls2.classValid) 702 ndlp->nlp_class_sup |= FC_COS_CLASS2; 703 if (sp->cls3.classValid) 704 ndlp->nlp_class_sup |= FC_COS_CLASS3; 705 if (sp->cls4.classValid) 706 ndlp->nlp_class_sup |= FC_COS_CLASS4; 707 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 708 sp->cmn.bbRcvSizeLsb; 709 710 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 711 if (fabric_param_changed) { 712 /* Reset FDMI attribute masks based on config parameter */ 713 if (phba->cfg_enable_SmartSAN || 714 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 715 /* Setup appropriate attribute masks */ 716 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 717 if (phba->cfg_enable_SmartSAN) 718 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 719 else 720 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 721 } else { 722 vport->fdmi_hba_mask = 0; 723 vport->fdmi_port_mask = 0; 724 } 725 726 } 727 memcpy(&vport->fabric_portname, &sp->portName, 728 sizeof(struct lpfc_name)); 729 memcpy(&vport->fabric_nodename, &sp->nodeName, 730 sizeof(struct lpfc_name)); 731 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 732 733 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 734 if (sp->cmn.response_multiple_NPort) { 735 lpfc_printf_vlog(vport, KERN_WARNING, 736 LOG_ELS | LOG_VPORT, 737 "1816 FLOGI NPIV supported, " 738 "response data 0x%x\n", 739 sp->cmn.response_multiple_NPort); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } else { 744 /* Because we asked f/w for NPIV it still expects us 745 to call reg_vnpid at least for the physical host */ 746 lpfc_printf_vlog(vport, KERN_WARNING, 747 LOG_ELS | LOG_VPORT, 748 "1817 Fabric does not support NPIV " 749 "- configuring single port mode.\n"); 750 spin_lock_irq(&phba->hbalock); 751 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 752 spin_unlock_irq(&phba->hbalock); 753 } 754 } 755 756 /* 757 * For FC we need to do some special processing because of the SLI 758 * Port's default settings of the Common Service Parameters. 759 */ 760 if ((phba->sli_rev == LPFC_SLI_REV4) && 761 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 762 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 763 if (fabric_param_changed) 764 lpfc_unregister_fcf_prep(phba); 765 766 /* This should just update the VFI CSPs*/ 767 if (vport->fc_flag & FC_VFI_REGISTERED) 768 lpfc_issue_reg_vfi(vport); 769 } 770 771 if (fabric_param_changed && 772 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 773 774 /* If our NportID changed, we need to ensure all 775 * remaining NPORTs get unreg_login'ed. 776 */ 777 list_for_each_entry_safe(np, next_np, 778 &vport->fc_nodes, nlp_listp) { 779 if ((np->nlp_state != NLP_STE_NPR_NODE) || 780 !(np->nlp_flag & NLP_NPR_ADISC)) 781 continue; 782 spin_lock_irq(&np->lock); 783 np->nlp_flag &= ~NLP_NPR_ADISC; 784 spin_unlock_irq(&np->lock); 785 lpfc_unreg_rpi(vport, np); 786 } 787 lpfc_cleanup_pending_mbox(vport); 788 789 if (phba->sli_rev == LPFC_SLI_REV4) { 790 lpfc_sli4_unreg_all_rpis(vport); 791 lpfc_mbx_unreg_vpi(vport); 792 spin_lock_irq(shost->host_lock); 793 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 794 spin_unlock_irq(shost->host_lock); 795 } 796 797 /* 798 * For SLI3 and SLI4, the VPI needs to be reregistered in 799 * response to this fabric parameter change event. 800 */ 801 spin_lock_irq(shost->host_lock); 802 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 803 spin_unlock_irq(shost->host_lock); 804 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 805 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 806 /* 807 * Driver needs to re-reg VPI in order for f/w 808 * to update the MAC address. 809 */ 810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 811 lpfc_register_new_vport(phba, vport, ndlp); 812 return 0; 813 } 814 815 if (phba->sli_rev < LPFC_SLI_REV4) { 816 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 817 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 818 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 819 lpfc_register_new_vport(phba, vport, ndlp); 820 else 821 lpfc_issue_fabric_reglogin(vport); 822 } else { 823 ndlp->nlp_type |= NLP_FABRIC; 824 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 825 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 826 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 827 lpfc_start_fdiscs(phba); 828 lpfc_do_scr_ns_plogi(phba, vport); 829 } else if (vport->fc_flag & FC_VFI_REGISTERED) 830 lpfc_issue_init_vpi(vport); 831 else { 832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 833 "3135 Need register VFI: (x%x/%x)\n", 834 vport->fc_prevDID, vport->fc_myDID); 835 lpfc_issue_reg_vfi(vport); 836 } 837 } 838 return 0; 839 } 840 841 /** 842 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 843 * @vport: pointer to a host virtual N_Port data structure. 844 * @ndlp: pointer to a node-list data structure. 845 * @sp: pointer to service parameter data structure. 846 * 847 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 848 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 849 * in a point-to-point topology. First, the @vport's N_Port Name is compared 850 * with the received N_Port Name: if the @vport's N_Port Name is greater than 851 * the received N_Port Name lexicographically, this node shall assign local 852 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 853 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 854 * this node shall just wait for the remote node to issue PLOGI and assign 855 * N_Port IDs. 856 * 857 * Return code 858 * 0 - Success 859 * -ENXIO - Fail 860 **/ 861 static int 862 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 863 struct serv_parm *sp) 864 { 865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 866 struct lpfc_hba *phba = vport->phba; 867 LPFC_MBOXQ_t *mbox; 868 int rc; 869 870 spin_lock_irq(shost->host_lock); 871 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 872 vport->fc_flag |= FC_PT2PT; 873 spin_unlock_irq(shost->host_lock); 874 875 /* If we are pt2pt with another NPort, force NPIV off! */ 876 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 877 878 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 879 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 880 lpfc_unregister_fcf_prep(phba); 881 882 spin_lock_irq(shost->host_lock); 883 vport->fc_flag &= ~FC_VFI_REGISTERED; 884 spin_unlock_irq(shost->host_lock); 885 phba->fc_topology_changed = 0; 886 } 887 888 rc = memcmp(&vport->fc_portname, &sp->portName, 889 sizeof(vport->fc_portname)); 890 891 if (rc >= 0) { 892 /* This side will initiate the PLOGI */ 893 spin_lock_irq(shost->host_lock); 894 vport->fc_flag |= FC_PT2PT_PLOGI; 895 spin_unlock_irq(shost->host_lock); 896 897 /* 898 * N_Port ID cannot be 0, set our Id to LocalID 899 * the other side will be RemoteID. 900 */ 901 902 /* not equal */ 903 if (rc) 904 vport->fc_myDID = PT2PT_LocalID; 905 906 /* Decrement ndlp reference count indicating that ndlp can be 907 * safely released when other references to it are done. 908 */ 909 lpfc_nlp_put(ndlp); 910 911 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 912 if (!ndlp) { 913 /* 914 * Cannot find existing Fabric ndlp, so allocate a 915 * new one 916 */ 917 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 918 if (!ndlp) 919 goto fail; 920 } 921 922 memcpy(&ndlp->nlp_portname, &sp->portName, 923 sizeof(struct lpfc_name)); 924 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 925 sizeof(struct lpfc_name)); 926 /* Set state will put ndlp onto node list if not already done */ 927 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 928 spin_lock_irq(&ndlp->lock); 929 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 930 spin_unlock_irq(&ndlp->lock); 931 932 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 933 if (!mbox) 934 goto fail; 935 936 lpfc_config_link(phba, mbox); 937 938 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 939 mbox->vport = vport; 940 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 941 if (rc == MBX_NOT_FINISHED) { 942 mempool_free(mbox, phba->mbox_mem_pool); 943 goto fail; 944 } 945 } else { 946 /* This side will wait for the PLOGI, decrement ndlp reference 947 * count indicating that ndlp can be released when other 948 * references to it are done. 949 */ 950 lpfc_nlp_put(ndlp); 951 952 /* Start discovery - this should just do CLEAR_LA */ 953 lpfc_disc_start(vport); 954 } 955 956 return 0; 957 fail: 958 return -ENXIO; 959 } 960 961 /** 962 * lpfc_cmpl_els_flogi - Completion callback function for flogi 963 * @phba: pointer to lpfc hba data structure. 964 * @cmdiocb: pointer to lpfc command iocb data structure. 965 * @rspiocb: pointer to lpfc response iocb data structure. 966 * 967 * This routine is the top-level completion callback function for issuing 968 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 969 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 970 * retry has been made (either immediately or delayed with lpfc_els_retry() 971 * returning 1), the command IOCB will be released and function returned. 972 * If the retry attempt has been given up (possibly reach the maximum 973 * number of retries), one additional decrement of ndlp reference shall be 974 * invoked before going out after releasing the command IOCB. This will 975 * actually release the remote node (Note, lpfc_els_free_iocb() will also 976 * invoke one decrement of ndlp reference count). If no error reported in 977 * the IOCB status, the command Port ID field is used to determine whether 978 * this is a point-to-point topology or a fabric topology: if the Port ID 979 * field is assigned, it is a fabric topology; otherwise, it is a 980 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 981 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 982 * specific topology completion conditions. 983 **/ 984 static void 985 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 986 struct lpfc_iocbq *rspiocb) 987 { 988 struct lpfc_vport *vport = cmdiocb->vport; 989 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 990 IOCB_t *irsp = &rspiocb->iocb; 991 struct lpfc_nodelist *ndlp = cmdiocb->context1; 992 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 993 struct serv_parm *sp; 994 uint16_t fcf_index; 995 int rc; 996 997 /* Check to see if link went down during discovery */ 998 if (lpfc_els_chk_latt(vport)) { 999 /* One additional decrement on node reference count to 1000 * trigger the release of the node 1001 */ 1002 lpfc_nlp_put(ndlp); 1003 goto out; 1004 } 1005 1006 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1007 "FLOGI cmpl: status:x%x/x%x state:x%x", 1008 irsp->ulpStatus, irsp->un.ulpWord[4], 1009 vport->port_state); 1010 1011 if (irsp->ulpStatus) { 1012 /* 1013 * In case of FIP mode, perform roundrobin FCF failover 1014 * due to new FCF discovery 1015 */ 1016 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1017 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1018 if (phba->link_state < LPFC_LINK_UP) 1019 goto stop_rr_fcf_flogi; 1020 if ((phba->fcoe_cvl_eventtag_attn == 1021 phba->fcoe_cvl_eventtag) && 1022 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1023 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1024 IOERR_SLI_ABORTED)) 1025 goto stop_rr_fcf_flogi; 1026 else 1027 phba->fcoe_cvl_eventtag_attn = 1028 phba->fcoe_cvl_eventtag; 1029 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1030 "2611 FLOGI failed on FCF (x%x), " 1031 "status:x%x/x%x, tmo:x%x, perform " 1032 "roundrobin FCF failover\n", 1033 phba->fcf.current_rec.fcf_indx, 1034 irsp->ulpStatus, irsp->un.ulpWord[4], 1035 irsp->ulpTimeout); 1036 lpfc_sli4_set_fcf_flogi_fail(phba, 1037 phba->fcf.current_rec.fcf_indx); 1038 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1039 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1040 if (rc) 1041 goto out; 1042 } 1043 1044 stop_rr_fcf_flogi: 1045 /* FLOGI failure */ 1046 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1047 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1048 IOERR_LOOP_OPEN_FAILURE))) 1049 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1050 "2858 FLOGI failure Status:x%x/x%x TMO" 1051 ":x%x Data x%x x%x\n", 1052 irsp->ulpStatus, irsp->un.ulpWord[4], 1053 irsp->ulpTimeout, phba->hba_flag, 1054 phba->fcf.fcf_flag); 1055 1056 /* Check for retry */ 1057 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1058 goto out; 1059 1060 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1061 "0150 FLOGI failure Status:x%x/x%x " 1062 "xri x%x TMO:x%x refcnt %d\n", 1063 irsp->ulpStatus, irsp->un.ulpWord[4], 1064 cmdiocb->sli4_xritag, irsp->ulpTimeout, 1065 kref_read(&ndlp->kref)); 1066 1067 /* If this is not a loop open failure, bail out */ 1068 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1069 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1070 IOERR_LOOP_OPEN_FAILURE))) 1071 goto flogifail; 1072 1073 /* FLOGI failed, so there is no fabric */ 1074 spin_lock_irq(shost->host_lock); 1075 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1076 spin_unlock_irq(shost->host_lock); 1077 1078 /* If private loop, then allow max outstanding els to be 1079 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1080 * alpa map would take too long otherwise. 1081 */ 1082 if (phba->alpa_map[0] == 0) 1083 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1084 if ((phba->sli_rev == LPFC_SLI_REV4) && 1085 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1086 (vport->fc_prevDID != vport->fc_myDID) || 1087 phba->fc_topology_changed)) { 1088 if (vport->fc_flag & FC_VFI_REGISTERED) { 1089 if (phba->fc_topology_changed) { 1090 lpfc_unregister_fcf_prep(phba); 1091 spin_lock_irq(shost->host_lock); 1092 vport->fc_flag &= ~FC_VFI_REGISTERED; 1093 spin_unlock_irq(shost->host_lock); 1094 phba->fc_topology_changed = 0; 1095 } else { 1096 lpfc_sli4_unreg_all_rpis(vport); 1097 } 1098 } 1099 1100 /* Do not register VFI if the driver aborted FLOGI */ 1101 if (!lpfc_error_lost_link(irsp)) 1102 lpfc_issue_reg_vfi(vport); 1103 1104 lpfc_nlp_put(ndlp); 1105 goto out; 1106 } 1107 goto flogifail; 1108 } 1109 spin_lock_irq(shost->host_lock); 1110 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1111 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1112 spin_unlock_irq(shost->host_lock); 1113 1114 /* 1115 * The FLogI succeeded. Sync the data for the CPU before 1116 * accessing it. 1117 */ 1118 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1119 if (!prsp) 1120 goto out; 1121 sp = prsp->virt + sizeof(uint32_t); 1122 1123 /* FLOGI completes successfully */ 1124 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1125 "0101 FLOGI completes successfully, I/O tag:x%x, " 1126 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1127 cmdiocb->iotag, cmdiocb->sli4_xritag, 1128 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1129 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1130 vport->port_state, vport->fc_flag, 1131 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1132 1133 if (sp->cmn.priority_tagging) 1134 vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; 1135 1136 if (vport->port_state == LPFC_FLOGI) { 1137 /* 1138 * If Common Service Parameters indicate Nport 1139 * we are point to point, if Fport we are Fabric. 1140 */ 1141 if (sp->cmn.fPort) 1142 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1143 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1144 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1145 else { 1146 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1147 "2831 FLOGI response with cleared Fabric " 1148 "bit fcf_index 0x%x " 1149 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1150 "Fabric Name " 1151 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1152 phba->fcf.current_rec.fcf_indx, 1153 phba->fcf.current_rec.switch_name[0], 1154 phba->fcf.current_rec.switch_name[1], 1155 phba->fcf.current_rec.switch_name[2], 1156 phba->fcf.current_rec.switch_name[3], 1157 phba->fcf.current_rec.switch_name[4], 1158 phba->fcf.current_rec.switch_name[5], 1159 phba->fcf.current_rec.switch_name[6], 1160 phba->fcf.current_rec.switch_name[7], 1161 phba->fcf.current_rec.fabric_name[0], 1162 phba->fcf.current_rec.fabric_name[1], 1163 phba->fcf.current_rec.fabric_name[2], 1164 phba->fcf.current_rec.fabric_name[3], 1165 phba->fcf.current_rec.fabric_name[4], 1166 phba->fcf.current_rec.fabric_name[5], 1167 phba->fcf.current_rec.fabric_name[6], 1168 phba->fcf.current_rec.fabric_name[7]); 1169 1170 lpfc_nlp_put(ndlp); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 if (!rc) { 1179 /* Mark the FCF discovery process done */ 1180 if (phba->hba_flag & HBA_FIP_SUPPORT) 1181 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1182 LOG_ELS, 1183 "2769 FLOGI to FCF (x%x) " 1184 "completed successfully\n", 1185 phba->fcf.current_rec.fcf_indx); 1186 spin_lock_irq(&phba->hbalock); 1187 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1188 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1189 spin_unlock_irq(&phba->hbalock); 1190 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1191 goto out; 1192 } 1193 } else if (vport->port_state > LPFC_FLOGI && 1194 vport->fc_flag & FC_PT2PT) { 1195 /* 1196 * In a p2p topology, it is possible that discovery has 1197 * already progressed, and this completion can be ignored. 1198 * Recheck the indicated topology. 1199 */ 1200 if (!sp->cmn.fPort) 1201 goto out; 1202 } 1203 1204 flogifail: 1205 spin_lock_irq(&phba->hbalock); 1206 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1207 spin_unlock_irq(&phba->hbalock); 1208 1209 if (!lpfc_error_lost_link(irsp)) { 1210 /* FLOGI failed, so just use loop map to make discovery list */ 1211 lpfc_disc_list_loopmap(vport); 1212 1213 /* Start discovery */ 1214 lpfc_disc_start(vport); 1215 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1216 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1217 IOERR_SLI_ABORTED) && 1218 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1219 IOERR_SLI_DOWN))) && 1220 (phba->link_state != LPFC_CLEAR_LA)) { 1221 /* If FLOGI failed enable link interrupt. */ 1222 lpfc_issue_clear_la(phba, vport); 1223 } 1224 out: 1225 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1226 lpfc_els_free_iocb(phba, cmdiocb); 1227 lpfc_nlp_put(ndlp); 1228 } 1229 1230 /** 1231 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1232 * aborted during a link down 1233 * @phba: pointer to lpfc hba data structure. 1234 * @cmdiocb: pointer to lpfc command iocb data structure. 1235 * @rspiocb: pointer to lpfc response iocb data structure. 1236 * 1237 */ 1238 static void 1239 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1240 struct lpfc_iocbq *rspiocb) 1241 { 1242 IOCB_t *irsp; 1243 uint32_t *pcmd; 1244 uint32_t cmd; 1245 1246 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1247 cmd = *pcmd; 1248 irsp = &rspiocb->iocb; 1249 1250 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1251 "6445 ELS completes after LINK_DOWN: " 1252 " Status %x/%x cmd x%x flg x%x\n", 1253 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1254 cmdiocb->iocb_flag); 1255 1256 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1257 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1258 atomic_dec(&phba->fabric_iocb_count); 1259 } 1260 lpfc_els_free_iocb(phba, cmdiocb); 1261 } 1262 1263 /** 1264 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1265 * @vport: pointer to a host virtual N_Port data structure. 1266 * @ndlp: pointer to a node-list data structure. 1267 * @retry: number of retries to the command IOCB. 1268 * 1269 * This routine issues a Fabric Login (FLOGI) Request ELS command 1270 * for a @vport. The initiator service parameters are put into the payload 1271 * of the FLOGI Request IOCB and the top-level callback function pointer 1272 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1273 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1274 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1275 * 1276 * Note that the ndlp reference count will be incremented by 1 for holding the 1277 * ndlp and the reference to ndlp will be stored into the context1 field of 1278 * the IOCB for the completion callback function to the FLOGI ELS command. 1279 * 1280 * Return code 1281 * 0 - successfully issued flogi iocb for @vport 1282 * 1 - failed to issue flogi iocb for @vport 1283 **/ 1284 static int 1285 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1286 uint8_t retry) 1287 { 1288 struct lpfc_hba *phba = vport->phba; 1289 struct serv_parm *sp; 1290 IOCB_t *icmd; 1291 struct lpfc_iocbq *elsiocb; 1292 struct lpfc_iocbq defer_flogi_acc; 1293 uint8_t *pcmd; 1294 uint16_t cmdsize; 1295 uint32_t tmo, did; 1296 int rc; 1297 1298 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1299 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1300 ndlp->nlp_DID, ELS_CMD_FLOGI); 1301 1302 if (!elsiocb) 1303 return 1; 1304 1305 icmd = &elsiocb->iocb; 1306 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1307 1308 /* For FLOGI request, remainder of payload is service parameters */ 1309 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1310 pcmd += sizeof(uint32_t); 1311 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1312 sp = (struct serv_parm *) pcmd; 1313 1314 /* Setup CSPs accordingly for Fabric */ 1315 sp->cmn.e_d_tov = 0; 1316 sp->cmn.w2.r_a_tov = 0; 1317 sp->cmn.virtual_fabric_support = 0; 1318 sp->cls1.classValid = 0; 1319 if (sp->cmn.fcphLow < FC_PH3) 1320 sp->cmn.fcphLow = FC_PH3; 1321 if (sp->cmn.fcphHigh < FC_PH3) 1322 sp->cmn.fcphHigh = FC_PH3; 1323 1324 /* Determine if switch supports priority tagging */ 1325 if (phba->cfg_vmid_priority_tagging) { 1326 sp->cmn.priority_tagging = 1; 1327 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1328 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1329 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1330 sizeof(phba->wwpn)); 1331 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1332 sizeof(phba->wwnn)); 1333 } 1334 } 1335 1336 if (phba->sli_rev == LPFC_SLI_REV4) { 1337 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1338 LPFC_SLI_INTF_IF_TYPE_0) { 1339 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1340 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1341 /* FLOGI needs to be 3 for WQE FCFI */ 1342 /* Set the fcfi to the fcfi we registered with */ 1343 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1344 } 1345 /* Can't do SLI4 class2 without support sequence coalescing */ 1346 sp->cls2.classValid = 0; 1347 sp->cls2.seqDelivery = 0; 1348 } else { 1349 /* Historical, setting sequential-delivery bit for SLI3 */ 1350 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1351 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1352 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1353 sp->cmn.request_multiple_Nport = 1; 1354 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1355 icmd->ulpCt_h = 1; 1356 icmd->ulpCt_l = 0; 1357 } else 1358 sp->cmn.request_multiple_Nport = 0; 1359 } 1360 1361 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1362 icmd->un.elsreq64.myID = 0; 1363 icmd->un.elsreq64.fl = 1; 1364 } 1365 1366 tmo = phba->fc_ratov; 1367 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1368 lpfc_set_disctmo(vport); 1369 phba->fc_ratov = tmo; 1370 1371 phba->fc_stat.elsXmitFLOGI++; 1372 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1373 1374 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1375 "Issue FLOGI: opt:x%x", 1376 phba->sli3_options, 0, 0); 1377 1378 elsiocb->context1 = lpfc_nlp_get(ndlp); 1379 if (!elsiocb->context1) { 1380 lpfc_els_free_iocb(phba, elsiocb); 1381 return 1; 1382 } 1383 1384 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1385 if (rc == IOCB_ERROR) { 1386 lpfc_els_free_iocb(phba, elsiocb); 1387 lpfc_nlp_put(ndlp); 1388 return 1; 1389 } 1390 1391 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1392 1393 /* Check for a deferred FLOGI ACC condition */ 1394 if (phba->defer_flogi_acc_flag) { 1395 did = vport->fc_myDID; 1396 vport->fc_myDID = Fabric_DID; 1397 1398 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1399 1400 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1401 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1402 phba->defer_flogi_acc_ox_id; 1403 1404 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1405 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1406 " ox_id: x%x, hba_flag x%x\n", 1407 phba->defer_flogi_acc_rx_id, 1408 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1409 1410 /* Send deferred FLOGI ACC */ 1411 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1412 ndlp, NULL); 1413 1414 phba->defer_flogi_acc_flag = false; 1415 1416 vport->fc_myDID = did; 1417 } 1418 1419 return 0; 1420 } 1421 1422 /** 1423 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1424 * @phba: pointer to lpfc hba data structure. 1425 * 1426 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1427 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1428 * list and issues an abort IOCB commond on each outstanding IOCB that 1429 * contains a active Fabric_DID ndlp. Note that this function is to issue 1430 * the abort IOCB command on all the outstanding IOCBs, thus when this 1431 * function returns, it does not guarantee all the IOCBs are actually aborted. 1432 * 1433 * Return code 1434 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1435 **/ 1436 int 1437 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1438 { 1439 struct lpfc_sli_ring *pring; 1440 struct lpfc_iocbq *iocb, *next_iocb; 1441 struct lpfc_nodelist *ndlp; 1442 IOCB_t *icmd; 1443 1444 /* Abort outstanding I/O on NPort <nlp_DID> */ 1445 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1446 "0201 Abort outstanding I/O on NPort x%x\n", 1447 Fabric_DID); 1448 1449 pring = lpfc_phba_elsring(phba); 1450 if (unlikely(!pring)) 1451 return -EIO; 1452 1453 /* 1454 * Check the txcmplq for an iocb that matches the nport the driver is 1455 * searching for. 1456 */ 1457 spin_lock_irq(&phba->hbalock); 1458 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1459 icmd = &iocb->iocb; 1460 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1461 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1462 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1463 if ((phba->pport->fc_flag & FC_PT2PT) && 1464 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1465 iocb->fabric_iocb_cmpl = 1466 lpfc_ignore_els_cmpl; 1467 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1468 NULL); 1469 } 1470 } 1471 } 1472 /* Make sure HBA is alive */ 1473 lpfc_issue_hb_tmo(phba); 1474 1475 spin_unlock_irq(&phba->hbalock); 1476 1477 return 0; 1478 } 1479 1480 /** 1481 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1482 * @vport: pointer to a host virtual N_Port data structure. 1483 * 1484 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1485 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1486 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1487 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1488 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1489 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1490 * @vport. 1491 * 1492 * Return code 1493 * 0 - failed to issue initial flogi for @vport 1494 * 1 - successfully issued initial flogi for @vport 1495 **/ 1496 int 1497 lpfc_initial_flogi(struct lpfc_vport *vport) 1498 { 1499 struct lpfc_nodelist *ndlp; 1500 1501 vport->port_state = LPFC_FLOGI; 1502 lpfc_set_disctmo(vport); 1503 1504 /* First look for the Fabric ndlp */ 1505 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1506 if (!ndlp) { 1507 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1508 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1509 if (!ndlp) 1510 return 0; 1511 /* Set the node type */ 1512 ndlp->nlp_type |= NLP_FABRIC; 1513 1514 /* Put ndlp onto node list */ 1515 lpfc_enqueue_node(vport, ndlp); 1516 } 1517 1518 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1519 /* This decrement of reference count to node shall kick off 1520 * the release of the node. 1521 */ 1522 lpfc_nlp_put(ndlp); 1523 return 0; 1524 } 1525 return 1; 1526 } 1527 1528 /** 1529 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1530 * @vport: pointer to a host virtual N_Port data structure. 1531 * 1532 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1533 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1534 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1535 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1536 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1537 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1538 * @vport. 1539 * 1540 * Return code 1541 * 0 - failed to issue initial fdisc for @vport 1542 * 1 - successfully issued initial fdisc for @vport 1543 **/ 1544 int 1545 lpfc_initial_fdisc(struct lpfc_vport *vport) 1546 { 1547 struct lpfc_nodelist *ndlp; 1548 1549 /* First look for the Fabric ndlp */ 1550 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1551 if (!ndlp) { 1552 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1553 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1554 if (!ndlp) 1555 return 0; 1556 1557 /* NPIV is only supported in Fabrics. */ 1558 ndlp->nlp_type |= NLP_FABRIC; 1559 1560 /* Put ndlp onto node list */ 1561 lpfc_enqueue_node(vport, ndlp); 1562 } 1563 1564 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1565 /* decrement node reference count to trigger the release of 1566 * the node. 1567 */ 1568 lpfc_nlp_put(ndlp); 1569 return 0; 1570 } 1571 return 1; 1572 } 1573 1574 /** 1575 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1576 * @vport: pointer to a host virtual N_Port data structure. 1577 * 1578 * This routine checks whether there are more remaining Port Logins 1579 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1580 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1581 * to issue ELS PLOGIs up to the configured discover threads with the 1582 * @vport (@vport->cfg_discovery_threads). The function also decrement 1583 * the @vport's num_disc_node by 1 if it is not already 0. 1584 **/ 1585 void 1586 lpfc_more_plogi(struct lpfc_vport *vport) 1587 { 1588 if (vport->num_disc_nodes) 1589 vport->num_disc_nodes--; 1590 1591 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1592 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1593 "0232 Continue discovery with %d PLOGIs to go " 1594 "Data: x%x x%x x%x\n", 1595 vport->num_disc_nodes, vport->fc_plogi_cnt, 1596 vport->fc_flag, vport->port_state); 1597 /* Check to see if there are more PLOGIs to be sent */ 1598 if (vport->fc_flag & FC_NLP_MORE) 1599 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1600 lpfc_els_disc_plogi(vport); 1601 1602 return; 1603 } 1604 1605 /** 1606 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1607 * @phba: pointer to lpfc hba data structure. 1608 * @prsp: pointer to response IOCB payload. 1609 * @ndlp: pointer to a node-list data structure. 1610 * 1611 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1612 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1613 * The following cases are considered N_Port confirmed: 1614 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1615 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1616 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1617 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1618 * 1) if there is a node on vport list other than the @ndlp with the same 1619 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1620 * on that node to release the RPI associated with the node; 2) if there is 1621 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1622 * into, a new node shall be allocated (or activated). In either case, the 1623 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1624 * be released and the new_ndlp shall be put on to the vport node list and 1625 * its pointer returned as the confirmed node. 1626 * 1627 * Note that before the @ndlp got "released", the keepDID from not-matching 1628 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1629 * of the @ndlp. This is because the release of @ndlp is actually to put it 1630 * into an inactive state on the vport node list and the vport node list 1631 * management algorithm does not allow two node with a same DID. 1632 * 1633 * Return code 1634 * pointer to the PLOGI N_Port @ndlp 1635 **/ 1636 static struct lpfc_nodelist * 1637 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1638 struct lpfc_nodelist *ndlp) 1639 { 1640 struct lpfc_vport *vport = ndlp->vport; 1641 struct lpfc_nodelist *new_ndlp; 1642 struct serv_parm *sp; 1643 uint8_t name[sizeof(struct lpfc_name)]; 1644 uint32_t keepDID = 0, keep_nlp_flag = 0; 1645 uint32_t keep_new_nlp_flag = 0; 1646 uint16_t keep_nlp_state; 1647 u32 keep_nlp_fc4_type = 0; 1648 struct lpfc_nvme_rport *keep_nrport = NULL; 1649 unsigned long *active_rrqs_xri_bitmap = NULL; 1650 1651 /* Fabric nodes can have the same WWPN so we don't bother searching 1652 * by WWPN. Just return the ndlp that was given to us. 1653 */ 1654 if (ndlp->nlp_type & NLP_FABRIC) 1655 return ndlp; 1656 1657 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1658 memset(name, 0, sizeof(struct lpfc_name)); 1659 1660 /* Now we find out if the NPort we are logging into, matches the WWPN 1661 * we have for that ndlp. If not, we have some work to do. 1662 */ 1663 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1664 1665 /* return immediately if the WWPN matches ndlp */ 1666 if (!new_ndlp || (new_ndlp == ndlp)) 1667 return ndlp; 1668 1669 /* 1670 * Unregister from backend if not done yet. Could have been skipped 1671 * due to ADISC 1672 */ 1673 lpfc_nlp_unreg_node(vport, new_ndlp); 1674 1675 if (phba->sli_rev == LPFC_SLI_REV4) { 1676 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1677 GFP_KERNEL); 1678 if (active_rrqs_xri_bitmap) 1679 memset(active_rrqs_xri_bitmap, 0, 1680 phba->cfg_rrq_xri_bitmap_sz); 1681 } 1682 1683 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1684 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1685 "new_ndlp x%x x%x x%x\n", 1686 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1687 (new_ndlp ? new_ndlp->nlp_DID : 0), 1688 (new_ndlp ? new_ndlp->nlp_flag : 0), 1689 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1690 1691 keepDID = new_ndlp->nlp_DID; 1692 1693 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1694 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1695 phba->cfg_rrq_xri_bitmap_sz); 1696 1697 /* At this point in this routine, we know new_ndlp will be 1698 * returned. however, any previous GID_FTs that were done 1699 * would have updated nlp_fc4_type in ndlp, so we must ensure 1700 * new_ndlp has the right value. 1701 */ 1702 if (vport->fc_flag & FC_FABRIC) { 1703 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1704 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1705 } 1706 1707 lpfc_unreg_rpi(vport, new_ndlp); 1708 new_ndlp->nlp_DID = ndlp->nlp_DID; 1709 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1710 if (phba->sli_rev == LPFC_SLI_REV4) 1711 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1712 ndlp->active_rrqs_xri_bitmap, 1713 phba->cfg_rrq_xri_bitmap_sz); 1714 1715 /* Lock both ndlps */ 1716 spin_lock_irq(&ndlp->lock); 1717 spin_lock_irq(&new_ndlp->lock); 1718 keep_new_nlp_flag = new_ndlp->nlp_flag; 1719 keep_nlp_flag = ndlp->nlp_flag; 1720 new_ndlp->nlp_flag = ndlp->nlp_flag; 1721 1722 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1723 if (keep_new_nlp_flag & NLP_UNREG_INP) 1724 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1725 else 1726 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1727 1728 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1729 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1730 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1731 else 1732 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1733 1734 /* 1735 * Retain the DROPPED flag. This will take care of the init 1736 * refcount when affecting the state change 1737 */ 1738 if (keep_new_nlp_flag & NLP_DROPPED) 1739 new_ndlp->nlp_flag |= NLP_DROPPED; 1740 else 1741 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1742 1743 ndlp->nlp_flag = keep_new_nlp_flag; 1744 1745 /* if ndlp had NLP_UNREG_INP set, keep it */ 1746 if (keep_nlp_flag & NLP_UNREG_INP) 1747 ndlp->nlp_flag |= NLP_UNREG_INP; 1748 else 1749 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1750 1751 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1752 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1753 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1754 else 1755 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1756 1757 /* 1758 * Retain the DROPPED flag. This will take care of the init 1759 * refcount when affecting the state change 1760 */ 1761 if (keep_nlp_flag & NLP_DROPPED) 1762 ndlp->nlp_flag |= NLP_DROPPED; 1763 else 1764 ndlp->nlp_flag &= ~NLP_DROPPED; 1765 1766 spin_unlock_irq(&new_ndlp->lock); 1767 spin_unlock_irq(&ndlp->lock); 1768 1769 /* Set nlp_states accordingly */ 1770 keep_nlp_state = new_ndlp->nlp_state; 1771 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1772 1773 /* interchange the nvme remoteport structs */ 1774 keep_nrport = new_ndlp->nrport; 1775 new_ndlp->nrport = ndlp->nrport; 1776 1777 /* Move this back to NPR state */ 1778 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1779 /* The new_ndlp is replacing ndlp totally, so we need 1780 * to put ndlp on UNUSED list and try to free it. 1781 */ 1782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1783 "3179 PLOGI confirm NEW: %x %x\n", 1784 new_ndlp->nlp_DID, keepDID); 1785 1786 /* Two ndlps cannot have the same did on the nodelist. 1787 * Note: for this case, ndlp has a NULL WWPN so setting 1788 * the nlp_fc4_type isn't required. 1789 */ 1790 ndlp->nlp_DID = keepDID; 1791 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1792 if (phba->sli_rev == LPFC_SLI_REV4 && 1793 active_rrqs_xri_bitmap) 1794 memcpy(ndlp->active_rrqs_xri_bitmap, 1795 active_rrqs_xri_bitmap, 1796 phba->cfg_rrq_xri_bitmap_sz); 1797 1798 } else { 1799 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1800 "3180 PLOGI confirm SWAP: %x %x\n", 1801 new_ndlp->nlp_DID, keepDID); 1802 1803 lpfc_unreg_rpi(vport, ndlp); 1804 1805 /* Two ndlps cannot have the same did and the fc4 1806 * type must be transferred because the ndlp is in 1807 * flight. 1808 */ 1809 ndlp->nlp_DID = keepDID; 1810 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1811 1812 if (phba->sli_rev == LPFC_SLI_REV4 && 1813 active_rrqs_xri_bitmap) 1814 memcpy(ndlp->active_rrqs_xri_bitmap, 1815 active_rrqs_xri_bitmap, 1816 phba->cfg_rrq_xri_bitmap_sz); 1817 1818 /* Since we are switching over to the new_ndlp, 1819 * reset the old ndlp state 1820 */ 1821 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1822 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1823 keep_nlp_state = NLP_STE_NPR_NODE; 1824 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1825 ndlp->nrport = keep_nrport; 1826 } 1827 1828 /* 1829 * If ndlp is not associated with any rport we can drop it here else 1830 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1831 */ 1832 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1833 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1834 1835 if (phba->sli_rev == LPFC_SLI_REV4 && 1836 active_rrqs_xri_bitmap) 1837 mempool_free(active_rrqs_xri_bitmap, 1838 phba->active_rrq_pool); 1839 1840 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1841 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1842 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1843 new_ndlp->nlp_fc4_type); 1844 1845 return new_ndlp; 1846 } 1847 1848 /** 1849 * lpfc_end_rscn - Check and handle more rscn for a vport 1850 * @vport: pointer to a host virtual N_Port data structure. 1851 * 1852 * This routine checks whether more Registration State Change 1853 * Notifications (RSCNs) came in while the discovery state machine was in 1854 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1855 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1856 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1857 * handling the RSCNs. 1858 **/ 1859 void 1860 lpfc_end_rscn(struct lpfc_vport *vport) 1861 { 1862 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1863 1864 if (vport->fc_flag & FC_RSCN_MODE) { 1865 /* 1866 * Check to see if more RSCNs came in while we were 1867 * processing this one. 1868 */ 1869 if (vport->fc_rscn_id_cnt || 1870 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1871 lpfc_els_handle_rscn(vport); 1872 else { 1873 spin_lock_irq(shost->host_lock); 1874 vport->fc_flag &= ~FC_RSCN_MODE; 1875 spin_unlock_irq(shost->host_lock); 1876 } 1877 } 1878 } 1879 1880 /** 1881 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1882 * @phba: pointer to lpfc hba data structure. 1883 * @cmdiocb: pointer to lpfc command iocb data structure. 1884 * @rspiocb: pointer to lpfc response iocb data structure. 1885 * 1886 * This routine will call the clear rrq function to free the rrq and 1887 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1888 * exist then the clear_rrq is still called because the rrq needs to 1889 * be freed. 1890 **/ 1891 1892 static void 1893 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1894 struct lpfc_iocbq *rspiocb) 1895 { 1896 struct lpfc_vport *vport = cmdiocb->vport; 1897 IOCB_t *irsp; 1898 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1899 struct lpfc_node_rrq *rrq; 1900 1901 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1902 rrq = cmdiocb->context_un.rrq; 1903 cmdiocb->context_un.rsp_iocb = rspiocb; 1904 1905 irsp = &rspiocb->iocb; 1906 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1907 "RRQ cmpl: status:x%x/x%x did:x%x", 1908 irsp->ulpStatus, irsp->un.ulpWord[4], 1909 irsp->un.elsreq64.remoteID); 1910 1911 /* rrq completes to NPort <nlp_DID> */ 1912 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1913 "2880 RRQ completes to DID x%x " 1914 "Data: x%x x%x x%x x%x x%x\n", 1915 irsp->un.elsreq64.remoteID, 1916 irsp->ulpStatus, irsp->un.ulpWord[4], 1917 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1918 1919 if (irsp->ulpStatus) { 1920 /* Check for retry */ 1921 /* RRQ failed Don't print the vport to vport rjts */ 1922 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1923 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1924 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1925 (phba)->pport->cfg_log_verbose & LOG_ELS) 1926 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1927 "2881 RRQ failure DID:%06X Status:" 1928 "x%x/x%x\n", 1929 ndlp->nlp_DID, irsp->ulpStatus, 1930 irsp->un.ulpWord[4]); 1931 } 1932 1933 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1934 lpfc_els_free_iocb(phba, cmdiocb); 1935 lpfc_nlp_put(ndlp); 1936 return; 1937 } 1938 /** 1939 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1940 * @phba: pointer to lpfc hba data structure. 1941 * @cmdiocb: pointer to lpfc command iocb data structure. 1942 * @rspiocb: pointer to lpfc response iocb data structure. 1943 * 1944 * This routine is the completion callback function for issuing the Port 1945 * Login (PLOGI) command. For PLOGI completion, there must be an active 1946 * ndlp on the vport node list that matches the remote node ID from the 1947 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1948 * ignored and command IOCB released. The PLOGI response IOCB status is 1949 * checked for error conditions. If there is error status reported, PLOGI 1950 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1951 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1952 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1953 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1954 * there are additional N_Port nodes with the vport that need to perform 1955 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1956 * PLOGIs. 1957 **/ 1958 static void 1959 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1960 struct lpfc_iocbq *rspiocb) 1961 { 1962 struct lpfc_vport *vport = cmdiocb->vport; 1963 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1964 IOCB_t *irsp; 1965 struct lpfc_nodelist *ndlp, *free_ndlp; 1966 struct lpfc_dmabuf *prsp; 1967 int disc; 1968 struct serv_parm *sp = NULL; 1969 1970 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1971 cmdiocb->context_un.rsp_iocb = rspiocb; 1972 1973 irsp = &rspiocb->iocb; 1974 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1975 "PLOGI cmpl: status:x%x/x%x did:x%x", 1976 irsp->ulpStatus, irsp->un.ulpWord[4], 1977 irsp->un.elsreq64.remoteID); 1978 1979 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1980 if (!ndlp) { 1981 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1982 "0136 PLOGI completes to NPort x%x " 1983 "with no ndlp. Data: x%x x%x x%x\n", 1984 irsp->un.elsreq64.remoteID, 1985 irsp->ulpStatus, irsp->un.ulpWord[4], 1986 irsp->ulpIoTag); 1987 goto out_freeiocb; 1988 } 1989 1990 /* Since ndlp can be freed in the disc state machine, note if this node 1991 * is being used during discovery. 1992 */ 1993 spin_lock_irq(&ndlp->lock); 1994 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1995 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1996 spin_unlock_irq(&ndlp->lock); 1997 1998 /* PLOGI completes to NPort <nlp_DID> */ 1999 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2000 "0102 PLOGI completes to NPort x%06x " 2001 "Data: x%x x%x x%x x%x x%x\n", 2002 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2003 irsp->ulpStatus, irsp->un.ulpWord[4], 2004 disc, vport->num_disc_nodes); 2005 2006 /* Check to see if link went down during discovery */ 2007 if (lpfc_els_chk_latt(vport)) { 2008 spin_lock_irq(&ndlp->lock); 2009 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2010 spin_unlock_irq(&ndlp->lock); 2011 goto out; 2012 } 2013 2014 if (irsp->ulpStatus) { 2015 /* Check for retry */ 2016 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2017 /* ELS command is being retried */ 2018 if (disc) { 2019 spin_lock_irq(&ndlp->lock); 2020 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2021 spin_unlock_irq(&ndlp->lock); 2022 } 2023 goto out; 2024 } 2025 /* PLOGI failed Don't print the vport to vport rjts */ 2026 if (irsp->ulpStatus != IOSTAT_LS_RJT || 2027 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 2028 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 2029 (phba)->pport->cfg_log_verbose & LOG_ELS) 2030 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2031 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2032 ndlp->nlp_DID, irsp->ulpStatus, 2033 irsp->un.ulpWord[4]); 2034 2035 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2036 if (!lpfc_error_lost_link(irsp)) 2037 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2038 NLP_EVT_CMPL_PLOGI); 2039 2040 /* If a PLOGI collision occurred, the node needs to continue 2041 * with the reglogin process. 2042 */ 2043 spin_lock_irq(&ndlp->lock); 2044 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2045 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2046 spin_unlock_irq(&ndlp->lock); 2047 goto out; 2048 } 2049 spin_unlock_irq(&ndlp->lock); 2050 2051 /* No PLOGI collision and the node is not registered with the 2052 * scsi or nvme transport. It is no longer an active node. Just 2053 * start the device remove process. 2054 */ 2055 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2056 spin_lock_irq(&ndlp->lock); 2057 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2058 spin_unlock_irq(&ndlp->lock); 2059 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2060 NLP_EVT_DEVICE_RM); 2061 } 2062 } else { 2063 /* Good status, call state machine */ 2064 prsp = list_entry(((struct lpfc_dmabuf *) 2065 cmdiocb->context2)->list.next, 2066 struct lpfc_dmabuf, list); 2067 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2068 2069 sp = (struct serv_parm *)((u8 *)prsp->virt + 2070 sizeof(u32)); 2071 2072 ndlp->vmid_support = 0; 2073 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2074 (phba->cfg_vmid_priority_tagging && 2075 sp->cmn.priority_tagging)) { 2076 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2077 "4018 app_hdr_support %d tagging %d DID x%x\n", 2078 sp->cmn.app_hdr_support, 2079 sp->cmn.priority_tagging, 2080 ndlp->nlp_DID); 2081 /* if the dest port supports VMID, mark it in ndlp */ 2082 ndlp->vmid_support = 1; 2083 } 2084 2085 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2086 NLP_EVT_CMPL_PLOGI); 2087 } 2088 2089 if (disc && vport->num_disc_nodes) { 2090 /* Check to see if there are more PLOGIs to be sent */ 2091 lpfc_more_plogi(vport); 2092 2093 if (vport->num_disc_nodes == 0) { 2094 spin_lock_irq(shost->host_lock); 2095 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2096 spin_unlock_irq(shost->host_lock); 2097 2098 lpfc_can_disctmo(vport); 2099 lpfc_end_rscn(vport); 2100 } 2101 } 2102 2103 out: 2104 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2105 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2106 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2107 2108 out_freeiocb: 2109 /* Release the reference on the original I/O request. */ 2110 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2111 2112 lpfc_els_free_iocb(phba, cmdiocb); 2113 lpfc_nlp_put(free_ndlp); 2114 return; 2115 } 2116 2117 /** 2118 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2119 * @vport: pointer to a host virtual N_Port data structure. 2120 * @did: destination port identifier. 2121 * @retry: number of retries to the command IOCB. 2122 * 2123 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2124 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2125 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2126 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2127 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2128 * 2129 * Note that the ndlp reference count will be incremented by 1 for holding 2130 * the ndlp and the reference to ndlp will be stored into the context1 field 2131 * of the IOCB for the completion callback function to the PLOGI ELS command. 2132 * 2133 * Return code 2134 * 0 - Successfully issued a plogi for @vport 2135 * 1 - failed to issue a plogi for @vport 2136 **/ 2137 int 2138 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2139 { 2140 struct lpfc_hba *phba = vport->phba; 2141 struct serv_parm *sp; 2142 struct lpfc_nodelist *ndlp; 2143 struct lpfc_iocbq *elsiocb; 2144 uint8_t *pcmd; 2145 uint16_t cmdsize; 2146 int ret; 2147 2148 ndlp = lpfc_findnode_did(vport, did); 2149 if (!ndlp) 2150 return 1; 2151 2152 /* Defer the processing of the issue PLOGI until after the 2153 * outstanding UNREG_RPI mbox command completes, unless we 2154 * are going offline. This logic does not apply for Fabric DIDs 2155 */ 2156 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2157 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2158 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2159 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2160 "4110 Issue PLOGI x%x deferred " 2161 "on NPort x%x rpi x%x Data: x%px\n", 2162 ndlp->nlp_defer_did, ndlp->nlp_DID, 2163 ndlp->nlp_rpi, ndlp); 2164 2165 /* We can only defer 1st PLOGI */ 2166 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2167 ndlp->nlp_defer_did = did; 2168 return 0; 2169 } 2170 2171 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2172 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2173 ELS_CMD_PLOGI); 2174 if (!elsiocb) 2175 return 1; 2176 2177 spin_lock_irq(&ndlp->lock); 2178 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2179 spin_unlock_irq(&ndlp->lock); 2180 2181 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2182 2183 /* For PLOGI request, remainder of payload is service parameters */ 2184 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2185 pcmd += sizeof(uint32_t); 2186 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2187 sp = (struct serv_parm *) pcmd; 2188 2189 /* 2190 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2191 * to device on remote loops work. 2192 */ 2193 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2194 sp->cmn.altBbCredit = 1; 2195 2196 if (sp->cmn.fcphLow < FC_PH_4_3) 2197 sp->cmn.fcphLow = FC_PH_4_3; 2198 2199 if (sp->cmn.fcphHigh < FC_PH3) 2200 sp->cmn.fcphHigh = FC_PH3; 2201 2202 sp->cmn.valid_vendor_ver_level = 0; 2203 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2204 sp->cmn.bbRcvSizeMsb &= 0xF; 2205 2206 /* Check if the destination port supports VMID */ 2207 ndlp->vmid_support = 0; 2208 if (vport->vmid_priority_tagging) 2209 sp->cmn.priority_tagging = 1; 2210 else if (phba->cfg_vmid_app_header && 2211 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2212 sp->cmn.app_hdr_support = 1; 2213 2214 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2215 "Issue PLOGI: did:x%x", 2216 did, 0, 0); 2217 2218 /* If our firmware supports this feature, convey that 2219 * information to the target using the vendor specific field. 2220 */ 2221 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2222 sp->cmn.valid_vendor_ver_level = 1; 2223 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2224 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2225 } 2226 2227 phba->fc_stat.elsXmitPLOGI++; 2228 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2229 2230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2231 "Issue PLOGI: did:x%x refcnt %d", 2232 did, kref_read(&ndlp->kref), 0); 2233 elsiocb->context1 = lpfc_nlp_get(ndlp); 2234 if (!elsiocb->context1) { 2235 lpfc_els_free_iocb(phba, elsiocb); 2236 return 1; 2237 } 2238 2239 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2240 if (ret) { 2241 lpfc_els_free_iocb(phba, elsiocb); 2242 lpfc_nlp_put(ndlp); 2243 return 1; 2244 } 2245 2246 return 0; 2247 } 2248 2249 /** 2250 * lpfc_cmpl_els_prli - Completion callback function for prli 2251 * @phba: pointer to lpfc hba data structure. 2252 * @cmdiocb: pointer to lpfc command iocb data structure. 2253 * @rspiocb: pointer to lpfc response iocb data structure. 2254 * 2255 * This routine is the completion callback function for a Process Login 2256 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2257 * status. If there is error status reported, PRLI retry shall be attempted 2258 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2259 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2260 * ndlp to mark the PRLI completion. 2261 **/ 2262 static void 2263 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2264 struct lpfc_iocbq *rspiocb) 2265 { 2266 struct lpfc_vport *vport = cmdiocb->vport; 2267 IOCB_t *irsp; 2268 struct lpfc_nodelist *ndlp; 2269 char *mode; 2270 u32 loglevel; 2271 2272 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2273 cmdiocb->context_un.rsp_iocb = rspiocb; 2274 2275 irsp = &(rspiocb->iocb); 2276 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2277 spin_lock_irq(&ndlp->lock); 2278 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2279 2280 /* Driver supports multiple FC4 types. Counters matter. */ 2281 vport->fc_prli_sent--; 2282 ndlp->fc4_prli_sent--; 2283 spin_unlock_irq(&ndlp->lock); 2284 2285 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2286 "PRLI cmpl: status:x%x/x%x did:x%x", 2287 irsp->ulpStatus, irsp->un.ulpWord[4], 2288 ndlp->nlp_DID); 2289 2290 /* PRLI completes to NPort <nlp_DID> */ 2291 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2292 "0103 PRLI completes to NPort x%06x " 2293 "Data: x%x x%x x%x x%x\n", 2294 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2295 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2296 2297 /* Check to see if link went down during discovery */ 2298 if (lpfc_els_chk_latt(vport)) 2299 goto out; 2300 2301 if (irsp->ulpStatus) { 2302 /* Check for retry */ 2303 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2304 /* ELS command is being retried */ 2305 goto out; 2306 } 2307 2308 /* If we don't send GFT_ID to Fabric, a PRLI error 2309 * could be expected. 2310 */ 2311 if ((vport->fc_flag & FC_FABRIC) || 2312 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2313 mode = KERN_ERR; 2314 loglevel = LOG_TRACE_EVENT; 2315 } else { 2316 mode = KERN_INFO; 2317 loglevel = LOG_ELS; 2318 } 2319 2320 /* PRLI failed */ 2321 lpfc_printf_vlog(vport, mode, loglevel, 2322 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2323 "data: x%x\n", 2324 ndlp->nlp_DID, irsp->ulpStatus, 2325 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2326 2327 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2328 if (!lpfc_error_lost_link(irsp)) 2329 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2330 NLP_EVT_CMPL_PRLI); 2331 2332 /* 2333 * For P2P topology, retain the node so that PLOGI can be 2334 * attempted on it again. 2335 */ 2336 if (vport->fc_flag & FC_PT2PT) 2337 goto out; 2338 2339 /* As long as this node is not registered with the SCSI 2340 * or NVMe transport and no other PRLIs are outstanding, 2341 * it is no longer an active node. Otherwise devloss 2342 * handles the final cleanup. 2343 */ 2344 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2345 !ndlp->fc4_prli_sent) { 2346 spin_lock_irq(&ndlp->lock); 2347 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2348 spin_unlock_irq(&ndlp->lock); 2349 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2350 NLP_EVT_DEVICE_RM); 2351 } 2352 } else { 2353 /* Good status, call state machine. However, if another 2354 * PRLI is outstanding, don't call the state machine 2355 * because final disposition to Mapped or Unmapped is 2356 * completed there. 2357 */ 2358 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2359 NLP_EVT_CMPL_PRLI); 2360 } 2361 2362 out: 2363 lpfc_els_free_iocb(phba, cmdiocb); 2364 lpfc_nlp_put(ndlp); 2365 return; 2366 } 2367 2368 /** 2369 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2370 * @vport: pointer to a host virtual N_Port data structure. 2371 * @ndlp: pointer to a node-list data structure. 2372 * @retry: number of retries to the command IOCB. 2373 * 2374 * This routine issues a Process Login (PRLI) ELS command for the 2375 * @vport. The PRLI service parameters are set up in the payload of the 2376 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2377 * is put to the IOCB completion callback func field before invoking the 2378 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2379 * 2380 * Note that the ndlp reference count will be incremented by 1 for holding the 2381 * ndlp and the reference to ndlp will be stored into the context1 field of 2382 * the IOCB for the completion callback function to the PRLI ELS command. 2383 * 2384 * Return code 2385 * 0 - successfully issued prli iocb command for @vport 2386 * 1 - failed to issue prli iocb command for @vport 2387 **/ 2388 int 2389 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2390 uint8_t retry) 2391 { 2392 int rc = 0; 2393 struct lpfc_hba *phba = vport->phba; 2394 PRLI *npr; 2395 struct lpfc_nvme_prli *npr_nvme; 2396 struct lpfc_iocbq *elsiocb; 2397 uint8_t *pcmd; 2398 uint16_t cmdsize; 2399 u32 local_nlp_type, elscmd; 2400 2401 /* 2402 * If we are in RSCN mode, the FC4 types supported from a 2403 * previous GFT_ID command may not be accurate. So, if we 2404 * are a NVME Initiator, always look for the possibility of 2405 * the remote NPort beng a NVME Target. 2406 */ 2407 if (phba->sli_rev == LPFC_SLI_REV4 && 2408 vport->fc_flag & FC_RSCN_MODE && 2409 vport->nvmei_support) 2410 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2411 local_nlp_type = ndlp->nlp_fc4_type; 2412 2413 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2414 * fields here before any of them can complete. 2415 */ 2416 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2417 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2418 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2419 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2420 ndlp->nvme_fb_size = 0; 2421 2422 send_next_prli: 2423 if (local_nlp_type & NLP_FC4_FCP) { 2424 /* Payload is 4 + 16 = 20 x14 bytes. */ 2425 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2426 elscmd = ELS_CMD_PRLI; 2427 } else if (local_nlp_type & NLP_FC4_NVME) { 2428 /* Payload is 4 + 20 = 24 x18 bytes. */ 2429 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2430 elscmd = ELS_CMD_NVMEPRLI; 2431 } else { 2432 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2433 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2434 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2435 return 1; 2436 } 2437 2438 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2439 * FC4 type, implicitly LOGO. 2440 */ 2441 if (phba->sli_rev == LPFC_SLI_REV3 && 2442 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2443 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2444 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2445 ndlp->nlp_type); 2446 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2447 return 1; 2448 } 2449 2450 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2451 ndlp->nlp_DID, elscmd); 2452 if (!elsiocb) 2453 return 1; 2454 2455 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2456 2457 /* For PRLI request, remainder of payload is service parameters */ 2458 memset(pcmd, 0, cmdsize); 2459 2460 if (local_nlp_type & NLP_FC4_FCP) { 2461 /* Remainder of payload is FCP PRLI parameter page. 2462 * Note: this data structure is defined as 2463 * BE/LE in the structure definition so no 2464 * byte swap call is made. 2465 */ 2466 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2467 pcmd += sizeof(uint32_t); 2468 npr = (PRLI *)pcmd; 2469 2470 /* 2471 * If our firmware version is 3.20 or later, 2472 * set the following bits for FC-TAPE support. 2473 */ 2474 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2475 npr->ConfmComplAllowed = 1; 2476 npr->Retry = 1; 2477 npr->TaskRetryIdReq = 1; 2478 } 2479 npr->estabImagePair = 1; 2480 npr->readXferRdyDis = 1; 2481 if (vport->cfg_first_burst_size) 2482 npr->writeXferRdyDis = 1; 2483 2484 /* For FCP support */ 2485 npr->prliType = PRLI_FCP_TYPE; 2486 npr->initiatorFunc = 1; 2487 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2488 2489 /* Remove FCP type - processed. */ 2490 local_nlp_type &= ~NLP_FC4_FCP; 2491 } else if (local_nlp_type & NLP_FC4_NVME) { 2492 /* Remainder of payload is NVME PRLI parameter page. 2493 * This data structure is the newer definition that 2494 * uses bf macros so a byte swap is required. 2495 */ 2496 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2497 pcmd += sizeof(uint32_t); 2498 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2499 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2500 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2501 if (phba->nsler) { 2502 bf_set(prli_nsler, npr_nvme, 1); 2503 bf_set(prli_conf, npr_nvme, 1); 2504 } 2505 2506 /* Only initiators request first burst. */ 2507 if ((phba->cfg_nvme_enable_fb) && 2508 !phba->nvmet_support) 2509 bf_set(prli_fba, npr_nvme, 1); 2510 2511 if (phba->nvmet_support) { 2512 bf_set(prli_tgt, npr_nvme, 1); 2513 bf_set(prli_disc, npr_nvme, 1); 2514 } else { 2515 bf_set(prli_init, npr_nvme, 1); 2516 bf_set(prli_conf, npr_nvme, 1); 2517 } 2518 2519 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2520 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2521 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2522 2523 /* Remove NVME type - processed. */ 2524 local_nlp_type &= ~NLP_FC4_NVME; 2525 } 2526 2527 phba->fc_stat.elsXmitPRLI++; 2528 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2529 spin_lock_irq(&ndlp->lock); 2530 ndlp->nlp_flag |= NLP_PRLI_SND; 2531 2532 /* The vport counters are used for lpfc_scan_finished, but 2533 * the ndlp is used to track outstanding PRLIs for different 2534 * FC4 types. 2535 */ 2536 vport->fc_prli_sent++; 2537 ndlp->fc4_prli_sent++; 2538 spin_unlock_irq(&ndlp->lock); 2539 2540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2541 "Issue PRLI: did:x%x refcnt %d", 2542 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2543 elsiocb->context1 = lpfc_nlp_get(ndlp); 2544 if (!elsiocb->context1) { 2545 lpfc_els_free_iocb(phba, elsiocb); 2546 goto err; 2547 } 2548 2549 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2550 if (rc == IOCB_ERROR) { 2551 lpfc_els_free_iocb(phba, elsiocb); 2552 lpfc_nlp_put(ndlp); 2553 goto err; 2554 } 2555 2556 2557 /* The driver supports 2 FC4 types. Make sure 2558 * a PRLI is issued for all types before exiting. 2559 */ 2560 if (phba->sli_rev == LPFC_SLI_REV4 && 2561 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2562 goto send_next_prli; 2563 else 2564 return 0; 2565 2566 err: 2567 spin_lock_irq(&ndlp->lock); 2568 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2569 spin_unlock_irq(&ndlp->lock); 2570 return 1; 2571 } 2572 2573 /** 2574 * lpfc_rscn_disc - Perform rscn discovery for a vport 2575 * @vport: pointer to a host virtual N_Port data structure. 2576 * 2577 * This routine performs Registration State Change Notification (RSCN) 2578 * discovery for a @vport. If the @vport's node port recovery count is not 2579 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2580 * the nodes that need recovery. If none of the PLOGI were needed through 2581 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2582 * invoked to check and handle possible more RSCN came in during the period 2583 * of processing the current ones. 2584 **/ 2585 static void 2586 lpfc_rscn_disc(struct lpfc_vport *vport) 2587 { 2588 lpfc_can_disctmo(vport); 2589 2590 /* RSCN discovery */ 2591 /* go thru NPR nodes and issue ELS PLOGIs */ 2592 if (vport->fc_npr_cnt) 2593 if (lpfc_els_disc_plogi(vport)) 2594 return; 2595 2596 lpfc_end_rscn(vport); 2597 } 2598 2599 /** 2600 * lpfc_adisc_done - Complete the adisc phase of discovery 2601 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2602 * 2603 * This function is called when the final ADISC is completed during discovery. 2604 * This function handles clearing link attention or issuing reg_vpi depending 2605 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2606 * discovery. 2607 * This function is called with no locks held. 2608 **/ 2609 static void 2610 lpfc_adisc_done(struct lpfc_vport *vport) 2611 { 2612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2613 struct lpfc_hba *phba = vport->phba; 2614 2615 /* 2616 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2617 * and continue discovery. 2618 */ 2619 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2620 !(vport->fc_flag & FC_RSCN_MODE) && 2621 (phba->sli_rev < LPFC_SLI_REV4)) { 2622 2623 /* 2624 * If link is down, clear_la and reg_vpi will be done after 2625 * flogi following a link up event 2626 */ 2627 if (!lpfc_is_link_up(phba)) 2628 return; 2629 2630 /* The ADISCs are complete. Doesn't matter if they 2631 * succeeded or failed because the ADISC completion 2632 * routine guarantees to call the state machine and 2633 * the RPI is either unregistered (failed ADISC response) 2634 * or the RPI is still valid and the node is marked 2635 * mapped for a target. The exchanges should be in the 2636 * correct state. This code is specific to SLI3. 2637 */ 2638 lpfc_issue_clear_la(phba, vport); 2639 lpfc_issue_reg_vpi(phba, vport); 2640 return; 2641 } 2642 /* 2643 * For SLI2, we need to set port_state to READY 2644 * and continue discovery. 2645 */ 2646 if (vport->port_state < LPFC_VPORT_READY) { 2647 /* If we get here, there is nothing to ADISC */ 2648 lpfc_issue_clear_la(phba, vport); 2649 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2650 vport->num_disc_nodes = 0; 2651 /* go thru NPR list, issue ELS PLOGIs */ 2652 if (vport->fc_npr_cnt) 2653 lpfc_els_disc_plogi(vport); 2654 if (!vport->num_disc_nodes) { 2655 spin_lock_irq(shost->host_lock); 2656 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2657 spin_unlock_irq(shost->host_lock); 2658 lpfc_can_disctmo(vport); 2659 lpfc_end_rscn(vport); 2660 } 2661 } 2662 vport->port_state = LPFC_VPORT_READY; 2663 } else 2664 lpfc_rscn_disc(vport); 2665 } 2666 2667 /** 2668 * lpfc_more_adisc - Issue more adisc as needed 2669 * @vport: pointer to a host virtual N_Port data structure. 2670 * 2671 * This routine determines whether there are more ndlps on a @vport 2672 * node list need to have Address Discover (ADISC) issued. If so, it will 2673 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2674 * remaining nodes which need to have ADISC sent. 2675 **/ 2676 void 2677 lpfc_more_adisc(struct lpfc_vport *vport) 2678 { 2679 if (vport->num_disc_nodes) 2680 vport->num_disc_nodes--; 2681 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2682 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2683 "0210 Continue discovery with %d ADISCs to go " 2684 "Data: x%x x%x x%x\n", 2685 vport->num_disc_nodes, vport->fc_adisc_cnt, 2686 vport->fc_flag, vport->port_state); 2687 /* Check to see if there are more ADISCs to be sent */ 2688 if (vport->fc_flag & FC_NLP_MORE) { 2689 lpfc_set_disctmo(vport); 2690 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2691 lpfc_els_disc_adisc(vport); 2692 } 2693 if (!vport->num_disc_nodes) 2694 lpfc_adisc_done(vport); 2695 return; 2696 } 2697 2698 /** 2699 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2700 * @phba: pointer to lpfc hba data structure. 2701 * @cmdiocb: pointer to lpfc command iocb data structure. 2702 * @rspiocb: pointer to lpfc response iocb data structure. 2703 * 2704 * This routine is the completion function for issuing the Address Discover 2705 * (ADISC) command. It first checks to see whether link went down during 2706 * the discovery process. If so, the node will be marked as node port 2707 * recovery for issuing discover IOCB by the link attention handler and 2708 * exit. Otherwise, the response status is checked. If error was reported 2709 * in the response status, the ADISC command shall be retried by invoking 2710 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2711 * the response status, the state machine is invoked to set transition 2712 * with respect to NLP_EVT_CMPL_ADISC event. 2713 **/ 2714 static void 2715 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2716 struct lpfc_iocbq *rspiocb) 2717 { 2718 struct lpfc_vport *vport = cmdiocb->vport; 2719 IOCB_t *irsp; 2720 struct lpfc_nodelist *ndlp; 2721 int disc; 2722 2723 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2724 cmdiocb->context_un.rsp_iocb = rspiocb; 2725 2726 irsp = &(rspiocb->iocb); 2727 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2728 2729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2730 "ADISC cmpl: status:x%x/x%x did:x%x", 2731 irsp->ulpStatus, irsp->un.ulpWord[4], 2732 ndlp->nlp_DID); 2733 2734 /* Since ndlp can be freed in the disc state machine, note if this node 2735 * is being used during discovery. 2736 */ 2737 spin_lock_irq(&ndlp->lock); 2738 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2739 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2740 spin_unlock_irq(&ndlp->lock); 2741 /* ADISC completes to NPort <nlp_DID> */ 2742 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2743 "0104 ADISC completes to NPort x%x " 2744 "Data: x%x x%x x%x x%x x%x\n", 2745 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2746 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2747 /* Check to see if link went down during discovery */ 2748 if (lpfc_els_chk_latt(vport)) { 2749 spin_lock_irq(&ndlp->lock); 2750 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2751 spin_unlock_irq(&ndlp->lock); 2752 goto out; 2753 } 2754 2755 if (irsp->ulpStatus) { 2756 /* Check for retry */ 2757 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2758 /* ELS command is being retried */ 2759 if (disc) { 2760 spin_lock_irq(&ndlp->lock); 2761 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2762 spin_unlock_irq(&ndlp->lock); 2763 lpfc_set_disctmo(vport); 2764 } 2765 goto out; 2766 } 2767 /* ADISC failed */ 2768 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2769 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2770 ndlp->nlp_DID, irsp->ulpStatus, 2771 irsp->un.ulpWord[4]); 2772 2773 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2774 NLP_EVT_CMPL_ADISC); 2775 2776 /* As long as this node is not registered with the SCSI or NVMe 2777 * transport, it is no longer an active node. Otherwise 2778 * devloss handles the final cleanup. 2779 */ 2780 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2781 spin_lock_irq(&ndlp->lock); 2782 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2783 spin_unlock_irq(&ndlp->lock); 2784 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2785 NLP_EVT_DEVICE_RM); 2786 } 2787 } else 2788 /* Good status, call state machine */ 2789 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2790 NLP_EVT_CMPL_ADISC); 2791 2792 /* Check to see if there are more ADISCs to be sent */ 2793 if (disc && vport->num_disc_nodes) 2794 lpfc_more_adisc(vport); 2795 out: 2796 lpfc_els_free_iocb(phba, cmdiocb); 2797 lpfc_nlp_put(ndlp); 2798 return; 2799 } 2800 2801 /** 2802 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2803 * @vport: pointer to a virtual N_Port data structure. 2804 * @ndlp: pointer to a node-list data structure. 2805 * @retry: number of retries to the command IOCB. 2806 * 2807 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2808 * @vport. It prepares the payload of the ADISC ELS command, updates the 2809 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2810 * to issue the ADISC ELS command. 2811 * 2812 * Note that the ndlp reference count will be incremented by 1 for holding the 2813 * ndlp and the reference to ndlp will be stored into the context1 field of 2814 * the IOCB for the completion callback function to the ADISC ELS command. 2815 * 2816 * Return code 2817 * 0 - successfully issued adisc 2818 * 1 - failed to issue adisc 2819 **/ 2820 int 2821 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2822 uint8_t retry) 2823 { 2824 int rc = 0; 2825 struct lpfc_hba *phba = vport->phba; 2826 ADISC *ap; 2827 struct lpfc_iocbq *elsiocb; 2828 uint8_t *pcmd; 2829 uint16_t cmdsize; 2830 2831 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2832 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2833 ndlp->nlp_DID, ELS_CMD_ADISC); 2834 if (!elsiocb) 2835 return 1; 2836 2837 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2838 2839 /* For ADISC request, remainder of payload is service parameters */ 2840 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2841 pcmd += sizeof(uint32_t); 2842 2843 /* Fill in ADISC payload */ 2844 ap = (ADISC *) pcmd; 2845 ap->hardAL_PA = phba->fc_pref_ALPA; 2846 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2847 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2848 ap->DID = be32_to_cpu(vport->fc_myDID); 2849 2850 phba->fc_stat.elsXmitADISC++; 2851 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2852 spin_lock_irq(&ndlp->lock); 2853 ndlp->nlp_flag |= NLP_ADISC_SND; 2854 spin_unlock_irq(&ndlp->lock); 2855 elsiocb->context1 = lpfc_nlp_get(ndlp); 2856 if (!elsiocb->context1) { 2857 lpfc_els_free_iocb(phba, elsiocb); 2858 goto err; 2859 } 2860 2861 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2862 "Issue ADISC: did:x%x refcnt %d", 2863 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2864 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2865 if (rc == IOCB_ERROR) { 2866 lpfc_els_free_iocb(phba, elsiocb); 2867 lpfc_nlp_put(ndlp); 2868 goto err; 2869 } 2870 2871 return 0; 2872 2873 err: 2874 spin_lock_irq(&ndlp->lock); 2875 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2876 spin_unlock_irq(&ndlp->lock); 2877 return 1; 2878 } 2879 2880 /** 2881 * lpfc_cmpl_els_logo - Completion callback function for logo 2882 * @phba: pointer to lpfc hba data structure. 2883 * @cmdiocb: pointer to lpfc command iocb data structure. 2884 * @rspiocb: pointer to lpfc response iocb data structure. 2885 * 2886 * This routine is the completion function for issuing the ELS Logout (LOGO) 2887 * command. If no error status was reported from the LOGO response, the 2888 * state machine of the associated ndlp shall be invoked for transition with 2889 * respect to NLP_EVT_CMPL_LOGO event. 2890 **/ 2891 static void 2892 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2893 struct lpfc_iocbq *rspiocb) 2894 { 2895 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2896 struct lpfc_vport *vport = ndlp->vport; 2897 IOCB_t *irsp; 2898 unsigned long flags; 2899 uint32_t skip_recovery = 0; 2900 int wake_up_waiter = 0; 2901 2902 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2903 cmdiocb->context_un.rsp_iocb = rspiocb; 2904 2905 irsp = &(rspiocb->iocb); 2906 spin_lock_irq(&ndlp->lock); 2907 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2908 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2909 wake_up_waiter = 1; 2910 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2911 } 2912 spin_unlock_irq(&ndlp->lock); 2913 2914 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2915 "LOGO cmpl: status:x%x/x%x did:x%x", 2916 irsp->ulpStatus, irsp->un.ulpWord[4], 2917 ndlp->nlp_DID); 2918 2919 /* LOGO completes to NPort <nlp_DID> */ 2920 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2921 "0105 LOGO completes to NPort x%x " 2922 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2923 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2924 irsp->ulpStatus, irsp->un.ulpWord[4], 2925 irsp->ulpTimeout, vport->num_disc_nodes); 2926 2927 if (lpfc_els_chk_latt(vport)) { 2928 skip_recovery = 1; 2929 goto out; 2930 } 2931 2932 /* The LOGO will not be retried on failure. A LOGO was 2933 * issued to the remote rport and a ACC or RJT or no Answer are 2934 * all acceptable. Note the failure and move forward with 2935 * discovery. The PLOGI will retry. 2936 */ 2937 if (irsp->ulpStatus) { 2938 /* LOGO failed */ 2939 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2940 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2941 ndlp->nlp_DID, irsp->ulpStatus, 2942 irsp->un.ulpWord[4]); 2943 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2944 if (lpfc_error_lost_link(irsp)) { 2945 skip_recovery = 1; 2946 goto out; 2947 } 2948 } 2949 2950 /* Call state machine. This will unregister the rpi if needed. */ 2951 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2952 2953 /* The driver sets this flag for an NPIV instance that doesn't want to 2954 * log into the remote port. 2955 */ 2956 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2957 spin_lock_irq(&ndlp->lock); 2958 if (phba->sli_rev == LPFC_SLI_REV4) 2959 ndlp->nlp_flag |= NLP_RELEASE_RPI; 2960 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2961 spin_unlock_irq(&ndlp->lock); 2962 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2963 NLP_EVT_DEVICE_RM); 2964 lpfc_els_free_iocb(phba, cmdiocb); 2965 lpfc_nlp_put(ndlp); 2966 2967 /* Presume the node was released. */ 2968 return; 2969 } 2970 2971 out: 2972 /* Driver is done with the IO. */ 2973 lpfc_els_free_iocb(phba, cmdiocb); 2974 lpfc_nlp_put(ndlp); 2975 2976 /* At this point, the LOGO processing is complete. NOTE: For a 2977 * pt2pt topology, we are assuming the NPortID will only change 2978 * on link up processing. For a LOGO / PLOGI initiated by the 2979 * Initiator, we are assuming the NPortID is not going to change. 2980 */ 2981 2982 if (wake_up_waiter && ndlp->logo_waitq) 2983 wake_up(ndlp->logo_waitq); 2984 /* 2985 * If the node is a target, the handling attempts to recover the port. 2986 * For any other port type, the rpi is unregistered as an implicit 2987 * LOGO. 2988 */ 2989 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2990 skip_recovery == 0) { 2991 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2992 spin_lock_irqsave(&ndlp->lock, flags); 2993 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2994 spin_unlock_irqrestore(&ndlp->lock, flags); 2995 2996 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2997 "3187 LOGO completes to NPort x%x: Start " 2998 "Recovery Data: x%x x%x x%x x%x\n", 2999 ndlp->nlp_DID, irsp->ulpStatus, 3000 irsp->un.ulpWord[4], irsp->ulpTimeout, 3001 vport->num_disc_nodes); 3002 lpfc_disc_start(vport); 3003 return; 3004 } 3005 3006 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3007 * driver sends a LOGO to the rport to cleanup. For fabric and 3008 * initiator ports cleanup the node as long as it the node is not 3009 * register with the transport. 3010 */ 3011 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3012 spin_lock_irq(&ndlp->lock); 3013 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3014 spin_unlock_irq(&ndlp->lock); 3015 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3016 NLP_EVT_DEVICE_RM); 3017 } 3018 } 3019 3020 /** 3021 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3022 * @vport: pointer to a virtual N_Port data structure. 3023 * @ndlp: pointer to a node-list data structure. 3024 * @retry: number of retries to the command IOCB. 3025 * 3026 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3027 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3028 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3029 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3030 * 3031 * Note that the ndlp reference count will be incremented by 1 for holding the 3032 * ndlp and the reference to ndlp will be stored into the context1 field of 3033 * the IOCB for the completion callback function to the LOGO ELS command. 3034 * 3035 * Callers of this routine are expected to unregister the RPI first 3036 * 3037 * Return code 3038 * 0 - successfully issued logo 3039 * 1 - failed to issue logo 3040 **/ 3041 int 3042 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3043 uint8_t retry) 3044 { 3045 struct lpfc_hba *phba = vport->phba; 3046 struct lpfc_iocbq *elsiocb; 3047 uint8_t *pcmd; 3048 uint16_t cmdsize; 3049 int rc; 3050 3051 spin_lock_irq(&ndlp->lock); 3052 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3053 spin_unlock_irq(&ndlp->lock); 3054 return 0; 3055 } 3056 spin_unlock_irq(&ndlp->lock); 3057 3058 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3059 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3060 ndlp->nlp_DID, ELS_CMD_LOGO); 3061 if (!elsiocb) 3062 return 1; 3063 3064 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3065 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3066 pcmd += sizeof(uint32_t); 3067 3068 /* Fill in LOGO payload */ 3069 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3070 pcmd += sizeof(uint32_t); 3071 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3072 3073 phba->fc_stat.elsXmitLOGO++; 3074 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 3075 spin_lock_irq(&ndlp->lock); 3076 ndlp->nlp_flag |= NLP_LOGO_SND; 3077 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3078 spin_unlock_irq(&ndlp->lock); 3079 elsiocb->context1 = lpfc_nlp_get(ndlp); 3080 if (!elsiocb->context1) { 3081 lpfc_els_free_iocb(phba, elsiocb); 3082 goto err; 3083 } 3084 3085 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3086 "Issue LOGO: did:x%x refcnt %d", 3087 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3088 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3089 if (rc == IOCB_ERROR) { 3090 lpfc_els_free_iocb(phba, elsiocb); 3091 lpfc_nlp_put(ndlp); 3092 goto err; 3093 } 3094 3095 spin_lock_irq(&ndlp->lock); 3096 ndlp->nlp_prev_state = ndlp->nlp_state; 3097 spin_unlock_irq(&ndlp->lock); 3098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3099 return 0; 3100 3101 err: 3102 spin_lock_irq(&ndlp->lock); 3103 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3104 spin_unlock_irq(&ndlp->lock); 3105 return 1; 3106 } 3107 3108 /** 3109 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3110 * @phba: pointer to lpfc hba data structure. 3111 * @cmdiocb: pointer to lpfc command iocb data structure. 3112 * @rspiocb: pointer to lpfc response iocb data structure. 3113 * 3114 * This routine is a generic completion callback function for ELS commands. 3115 * Specifically, it is the callback function which does not need to perform 3116 * any command specific operations. It is currently used by the ELS command 3117 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3118 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3119 * Other than certain debug loggings, this callback function simply invokes the 3120 * lpfc_els_chk_latt() routine to check whether link went down during the 3121 * discovery process. 3122 **/ 3123 static void 3124 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3125 struct lpfc_iocbq *rspiocb) 3126 { 3127 struct lpfc_vport *vport = cmdiocb->vport; 3128 struct lpfc_nodelist *free_ndlp; 3129 IOCB_t *irsp; 3130 3131 irsp = &rspiocb->iocb; 3132 3133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3134 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3135 irsp->ulpStatus, irsp->un.ulpWord[4], 3136 irsp->un.elsreq64.remoteID); 3137 3138 /* ELS cmd tag <ulpIoTag> completes */ 3139 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3140 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3141 irsp->ulpIoTag, irsp->ulpStatus, 3142 irsp->un.ulpWord[4], irsp->ulpTimeout); 3143 3144 /* Check to see if link went down during discovery */ 3145 lpfc_els_chk_latt(vport); 3146 3147 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3148 3149 lpfc_els_free_iocb(phba, cmdiocb); 3150 lpfc_nlp_put(free_ndlp); 3151 } 3152 3153 /** 3154 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3155 * @vport: pointer to lpfc_vport data structure. 3156 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3157 * 3158 * This routine registers the rpi assigned to the fabric controller 3159 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3160 * state triggering a registration with the SCSI transport. 3161 * 3162 * This routine is single out because the fabric controller node 3163 * does not receive a PLOGI. This routine is consumed by the 3164 * SCR and RDF ELS commands. Callers are expected to qualify 3165 * with SLI4 first. 3166 **/ 3167 static int 3168 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3169 { 3170 int rc = 0; 3171 struct lpfc_hba *phba = vport->phba; 3172 struct lpfc_nodelist *ns_ndlp; 3173 LPFC_MBOXQ_t *mbox; 3174 struct lpfc_dmabuf *mp; 3175 3176 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3177 return rc; 3178 3179 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3180 if (!ns_ndlp) 3181 return -ENODEV; 3182 3183 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3184 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3185 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3186 ns_ndlp->nlp_state); 3187 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3188 return -ENODEV; 3189 3190 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3191 if (!mbox) { 3192 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3193 "0936 %s: no memory for reg_login " 3194 "Data: x%x x%x x%x x%x\n", __func__, 3195 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3196 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3197 return -ENOMEM; 3198 } 3199 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3200 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3201 if (rc) { 3202 rc = -EACCES; 3203 goto out; 3204 } 3205 3206 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3207 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3208 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3209 if (!mbox->ctx_ndlp) { 3210 rc = -ENOMEM; 3211 goto out_mem; 3212 } 3213 3214 mbox->vport = vport; 3215 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3216 if (rc == MBX_NOT_FINISHED) { 3217 rc = -ENODEV; 3218 lpfc_nlp_put(fc_ndlp); 3219 goto out_mem; 3220 } 3221 /* Success path. Exit. */ 3222 lpfc_nlp_set_state(vport, fc_ndlp, 3223 NLP_STE_REG_LOGIN_ISSUE); 3224 return 0; 3225 3226 out_mem: 3227 fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3228 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 3229 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3230 kfree(mp); 3231 3232 out: 3233 mempool_free(mbox, phba->mbox_mem_pool); 3234 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3235 "0938 %s: failed to format reg_login " 3236 "Data: x%x x%x x%x x%x\n", __func__, 3237 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3238 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3239 return rc; 3240 } 3241 3242 /** 3243 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3244 * @phba: pointer to lpfc hba data structure. 3245 * @cmdiocb: pointer to lpfc command iocb data structure. 3246 * @rspiocb: pointer to lpfc response iocb data structure. 3247 * 3248 * This routine is a generic completion callback function for Discovery ELS cmd. 3249 * Currently used by the ELS command issuing routines for the ELS State Change 3250 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3251 * These commands will be retried once only for ELS timeout errors. 3252 **/ 3253 static void 3254 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3255 struct lpfc_iocbq *rspiocb) 3256 { 3257 struct lpfc_vport *vport = cmdiocb->vport; 3258 IOCB_t *irsp; 3259 struct lpfc_els_rdf_rsp *prdf; 3260 struct lpfc_dmabuf *pcmd, *prsp; 3261 u32 *pdata; 3262 u32 cmd; 3263 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3264 3265 irsp = &rspiocb->iocb; 3266 3267 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3268 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3269 irsp->ulpStatus, irsp->un.ulpWord[4], 3270 irsp->un.elsreq64.remoteID); 3271 /* ELS cmd tag <ulpIoTag> completes */ 3272 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3273 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3274 "x%x\n", 3275 irsp->ulpIoTag, irsp->ulpStatus, 3276 irsp->un.ulpWord[4], irsp->ulpTimeout, 3277 cmdiocb->retry); 3278 3279 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3280 if (!pcmd) 3281 goto out; 3282 3283 pdata = (u32 *)pcmd->virt; 3284 if (!pdata) 3285 goto out; 3286 cmd = *pdata; 3287 3288 /* Only 1 retry for ELS Timeout only */ 3289 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3290 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3291 IOERR_SEQUENCE_TIMEOUT)) { 3292 cmdiocb->retry++; 3293 if (cmdiocb->retry <= 1) { 3294 switch (cmd) { 3295 case ELS_CMD_SCR: 3296 lpfc_issue_els_scr(vport, cmdiocb->retry); 3297 break; 3298 case ELS_CMD_EDC: 3299 lpfc_issue_els_edc(vport, cmdiocb->retry); 3300 break; 3301 case ELS_CMD_RDF: 3302 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3303 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3304 break; 3305 } 3306 goto out; 3307 } 3308 phba->fc_stat.elsRetryExceeded++; 3309 } 3310 if (cmd == ELS_CMD_EDC) { 3311 /* must be called before checking uplStatus and returning */ 3312 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3313 return; 3314 } 3315 if (irsp->ulpStatus) { 3316 /* ELS discovery cmd completes with error */ 3317 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3318 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3319 irsp->ulpStatus, irsp->un.ulpWord[4]); 3320 goto out; 3321 } 3322 3323 /* The RDF response doesn't have any impact on the running driver 3324 * but the notification descriptors are dumped here for support. 3325 */ 3326 if (cmd == ELS_CMD_RDF) { 3327 int i; 3328 3329 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3330 if (!prsp) 3331 goto out; 3332 3333 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3334 if (!prdf) 3335 goto out; 3336 3337 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3338 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3339 lpfc_printf_vlog(vport, KERN_INFO, 3340 LOG_ELS | LOG_CGN_MGMT, 3341 "4677 Fabric RDF Notification Grant " 3342 "Data: 0x%08x Reg: %x %x\n", 3343 be32_to_cpu( 3344 prdf->reg_d1.desc_tags[i]), 3345 phba->cgn_reg_signal, 3346 phba->cgn_reg_fpin); 3347 } 3348 3349 out: 3350 /* Check to see if link went down during discovery */ 3351 lpfc_els_chk_latt(vport); 3352 lpfc_els_free_iocb(phba, cmdiocb); 3353 lpfc_nlp_put(ndlp); 3354 return; 3355 } 3356 3357 /** 3358 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3359 * @vport: pointer to a host virtual N_Port data structure. 3360 * @retry: retry counter for the command IOCB. 3361 * 3362 * This routine issues a State Change Request (SCR) to a fabric node 3363 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3364 * first search the @vport node list to find the matching ndlp. If no such 3365 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3366 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3367 * routine is invoked to send the SCR IOCB. 3368 * 3369 * Note that the ndlp reference count will be incremented by 1 for holding the 3370 * ndlp and the reference to ndlp will be stored into the context1 field of 3371 * the IOCB for the completion callback function to the SCR ELS command. 3372 * 3373 * Return code 3374 * 0 - Successfully issued scr command 3375 * 1 - Failed to issue scr command 3376 **/ 3377 int 3378 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3379 { 3380 int rc = 0; 3381 struct lpfc_hba *phba = vport->phba; 3382 struct lpfc_iocbq *elsiocb; 3383 uint8_t *pcmd; 3384 uint16_t cmdsize; 3385 struct lpfc_nodelist *ndlp; 3386 3387 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3388 3389 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3390 if (!ndlp) { 3391 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3392 if (!ndlp) 3393 return 1; 3394 lpfc_enqueue_node(vport, ndlp); 3395 } 3396 3397 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3398 ndlp->nlp_DID, ELS_CMD_SCR); 3399 if (!elsiocb) 3400 return 1; 3401 3402 if (phba->sli_rev == LPFC_SLI_REV4) { 3403 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3404 if (rc) { 3405 lpfc_els_free_iocb(phba, elsiocb); 3406 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3407 "0937 %s: Failed to reg fc node, rc %d\n", 3408 __func__, rc); 3409 return 1; 3410 } 3411 } 3412 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3413 3414 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3415 pcmd += sizeof(uint32_t); 3416 3417 /* For SCR, remainder of payload is SCR parameter page */ 3418 memset(pcmd, 0, sizeof(SCR)); 3419 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3420 3421 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3422 "Issue SCR: did:x%x", 3423 ndlp->nlp_DID, 0, 0); 3424 3425 phba->fc_stat.elsXmitSCR++; 3426 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3427 elsiocb->context1 = lpfc_nlp_get(ndlp); 3428 if (!elsiocb->context1) { 3429 lpfc_els_free_iocb(phba, elsiocb); 3430 return 1; 3431 } 3432 3433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3434 "Issue SCR: did:x%x refcnt %d", 3435 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3436 3437 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3438 if (rc == IOCB_ERROR) { 3439 lpfc_els_free_iocb(phba, elsiocb); 3440 lpfc_nlp_put(ndlp); 3441 return 1; 3442 } 3443 3444 return 0; 3445 } 3446 3447 /** 3448 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3449 * or the other nport (pt2pt). 3450 * @vport: pointer to a host virtual N_Port data structure. 3451 * @retry: number of retries to the command IOCB. 3452 * 3453 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3454 * when connected to a fabric, or to the remote port when connected 3455 * in point-to-point mode. When sent to the Fabric Controller, it will 3456 * replay the RSCN to registered recipients. 3457 * 3458 * Note that the ndlp reference count will be incremented by 1 for holding the 3459 * ndlp and the reference to ndlp will be stored into the context1 field of 3460 * the IOCB for the completion callback function to the RSCN ELS command. 3461 * 3462 * Return code 3463 * 0 - Successfully issued RSCN command 3464 * 1 - Failed to issue RSCN command 3465 **/ 3466 int 3467 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3468 { 3469 int rc = 0; 3470 struct lpfc_hba *phba = vport->phba; 3471 struct lpfc_iocbq *elsiocb; 3472 struct lpfc_nodelist *ndlp; 3473 struct { 3474 struct fc_els_rscn rscn; 3475 struct fc_els_rscn_page portid; 3476 } *event; 3477 uint32_t nportid; 3478 uint16_t cmdsize = sizeof(*event); 3479 3480 /* Not supported for private loop */ 3481 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3482 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3483 return 1; 3484 3485 if (vport->fc_flag & FC_PT2PT) { 3486 /* find any mapped nport - that would be the other nport */ 3487 ndlp = lpfc_findnode_mapped(vport); 3488 if (!ndlp) 3489 return 1; 3490 } else { 3491 nportid = FC_FID_FCTRL; 3492 /* find the fabric controller node */ 3493 ndlp = lpfc_findnode_did(vport, nportid); 3494 if (!ndlp) { 3495 /* if one didn't exist, make one */ 3496 ndlp = lpfc_nlp_init(vport, nportid); 3497 if (!ndlp) 3498 return 1; 3499 lpfc_enqueue_node(vport, ndlp); 3500 } 3501 } 3502 3503 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3504 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3505 3506 if (!elsiocb) 3507 return 1; 3508 3509 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3510 3511 event->rscn.rscn_cmd = ELS_RSCN; 3512 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3513 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3514 3515 nportid = vport->fc_myDID; 3516 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3517 event->portid.rscn_page_flags = 0; 3518 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3519 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3520 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3521 3522 phba->fc_stat.elsXmitRSCN++; 3523 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3524 elsiocb->context1 = lpfc_nlp_get(ndlp); 3525 if (!elsiocb->context1) { 3526 lpfc_els_free_iocb(phba, elsiocb); 3527 return 1; 3528 } 3529 3530 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3531 "Issue RSCN: did:x%x", 3532 ndlp->nlp_DID, 0, 0); 3533 3534 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3535 if (rc == IOCB_ERROR) { 3536 lpfc_els_free_iocb(phba, elsiocb); 3537 lpfc_nlp_put(ndlp); 3538 return 1; 3539 } 3540 3541 return 0; 3542 } 3543 3544 /** 3545 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3546 * @vport: pointer to a host virtual N_Port data structure. 3547 * @nportid: N_Port identifier to the remote node. 3548 * @retry: number of retries to the command IOCB. 3549 * 3550 * This routine issues a Fibre Channel Address Resolution Response 3551 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3552 * is passed into the function. It first search the @vport node list to find 3553 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3554 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3555 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3556 * 3557 * Note that the ndlp reference count will be incremented by 1 for holding the 3558 * ndlp and the reference to ndlp will be stored into the context1 field of 3559 * the IOCB for the completion callback function to the FARPR ELS command. 3560 * 3561 * Return code 3562 * 0 - Successfully issued farpr command 3563 * 1 - Failed to issue farpr command 3564 **/ 3565 static int 3566 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3567 { 3568 int rc = 0; 3569 struct lpfc_hba *phba = vport->phba; 3570 struct lpfc_iocbq *elsiocb; 3571 FARP *fp; 3572 uint8_t *pcmd; 3573 uint32_t *lp; 3574 uint16_t cmdsize; 3575 struct lpfc_nodelist *ondlp; 3576 struct lpfc_nodelist *ndlp; 3577 3578 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3579 3580 ndlp = lpfc_findnode_did(vport, nportid); 3581 if (!ndlp) { 3582 ndlp = lpfc_nlp_init(vport, nportid); 3583 if (!ndlp) 3584 return 1; 3585 lpfc_enqueue_node(vport, ndlp); 3586 } 3587 3588 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3589 ndlp->nlp_DID, ELS_CMD_RNID); 3590 if (!elsiocb) 3591 return 1; 3592 3593 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3594 3595 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3596 pcmd += sizeof(uint32_t); 3597 3598 /* Fill in FARPR payload */ 3599 fp = (FARP *) (pcmd); 3600 memset(fp, 0, sizeof(FARP)); 3601 lp = (uint32_t *) pcmd; 3602 *lp++ = be32_to_cpu(nportid); 3603 *lp++ = be32_to_cpu(vport->fc_myDID); 3604 fp->Rflags = 0; 3605 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3606 3607 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3608 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3609 ondlp = lpfc_findnode_did(vport, nportid); 3610 if (ondlp) { 3611 memcpy(&fp->OportName, &ondlp->nlp_portname, 3612 sizeof(struct lpfc_name)); 3613 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3614 sizeof(struct lpfc_name)); 3615 } 3616 3617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3618 "Issue FARPR: did:x%x", 3619 ndlp->nlp_DID, 0, 0); 3620 3621 phba->fc_stat.elsXmitFARPR++; 3622 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3623 elsiocb->context1 = lpfc_nlp_get(ndlp); 3624 if (!elsiocb->context1) { 3625 lpfc_els_free_iocb(phba, elsiocb); 3626 return 1; 3627 } 3628 3629 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3630 if (rc == IOCB_ERROR) { 3631 /* The additional lpfc_nlp_put will cause the following 3632 * lpfc_els_free_iocb routine to trigger the release of 3633 * the node. 3634 */ 3635 lpfc_els_free_iocb(phba, elsiocb); 3636 lpfc_nlp_put(ndlp); 3637 return 1; 3638 } 3639 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3640 * trigger the release of the node. 3641 */ 3642 /* Don't release reference count as RDF is likely outstanding */ 3643 return 0; 3644 } 3645 3646 /** 3647 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3648 * @vport: pointer to a host virtual N_Port data structure. 3649 * @retry: retry counter for the command IOCB. 3650 * 3651 * This routine issues an ELS RDF to the Fabric Controller to register 3652 * for diagnostic functions. 3653 * 3654 * Note that the ndlp reference count will be incremented by 1 for holding the 3655 * ndlp and the reference to ndlp will be stored into the context1 field of 3656 * the IOCB for the completion callback function to the RDF ELS command. 3657 * 3658 * Return code 3659 * 0 - Successfully issued rdf command 3660 * 1 - Failed to issue rdf command 3661 **/ 3662 int 3663 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3664 { 3665 struct lpfc_hba *phba = vport->phba; 3666 struct lpfc_iocbq *elsiocb; 3667 struct lpfc_els_rdf_req *prdf; 3668 struct lpfc_nodelist *ndlp; 3669 uint16_t cmdsize; 3670 int rc; 3671 3672 cmdsize = sizeof(*prdf); 3673 3674 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3675 if (!ndlp) { 3676 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3677 if (!ndlp) 3678 return -ENODEV; 3679 lpfc_enqueue_node(vport, ndlp); 3680 } 3681 3682 /* RDF ELS is not required on an NPIV VN_Port. */ 3683 if (vport->port_type == LPFC_NPIV_PORT) 3684 return -EACCES; 3685 3686 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3687 ndlp->nlp_DID, ELS_CMD_RDF); 3688 if (!elsiocb) 3689 return -ENOMEM; 3690 3691 /* Configure the payload for the supported FPIN events. */ 3692 prdf = (struct lpfc_els_rdf_req *) 3693 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3694 memset(prdf, 0, cmdsize); 3695 prdf->rdf.fpin_cmd = ELS_RDF; 3696 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3697 sizeof(struct fc_els_rdf)); 3698 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3699 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3700 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3701 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3702 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3703 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3704 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3705 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3706 3707 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3708 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3709 ndlp->nlp_DID, phba->cgn_reg_signal, 3710 phba->cgn_reg_fpin); 3711 3712 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3713 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3714 elsiocb->context1 = lpfc_nlp_get(ndlp); 3715 if (!elsiocb->context1) { 3716 lpfc_els_free_iocb(phba, elsiocb); 3717 return -EIO; 3718 } 3719 3720 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3721 "Issue RDF: did:x%x refcnt %d", 3722 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3723 3724 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3725 if (rc == IOCB_ERROR) { 3726 lpfc_els_free_iocb(phba, elsiocb); 3727 lpfc_nlp_put(ndlp); 3728 return -EIO; 3729 } 3730 return 0; 3731 } 3732 3733 /** 3734 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3735 * @vport: pointer to a host virtual N_Port data structure. 3736 * @cmdiocb: pointer to lpfc command iocb data structure. 3737 * @ndlp: pointer to a node-list data structure. 3738 * 3739 * A received RDF implies a possible change to fabric supported diagnostic 3740 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3741 * RDF request to reregister for supported diagnostic functions. 3742 * 3743 * Return code 3744 * 0 - Success 3745 * -EIO - Failed to process received RDF 3746 **/ 3747 static int 3748 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3749 struct lpfc_nodelist *ndlp) 3750 { 3751 /* Send LS_ACC */ 3752 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3753 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3754 "1623 Failed to RDF_ACC from x%x for x%x\n", 3755 ndlp->nlp_DID, vport->fc_myDID); 3756 return -EIO; 3757 } 3758 3759 /* Issue new RDF for reregistering */ 3760 if (lpfc_issue_els_rdf(vport, 0)) { 3761 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3762 "2623 Failed to re register RDF for x%x\n", 3763 vport->fc_myDID); 3764 return -EIO; 3765 } 3766 3767 return 0; 3768 } 3769 3770 /** 3771 * lpfc_least_capable_settings - helper function for EDC rsp processing 3772 * @phba: pointer to lpfc hba data structure. 3773 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3774 * 3775 * This helper routine determines the least capable setting for 3776 * congestion signals, signal freq, including scale, from the 3777 * congestion detection descriptor in the EDC rsp. The routine 3778 * sets @phba values in preparation for a set_featues mailbox. 3779 **/ 3780 static void 3781 lpfc_least_capable_settings(struct lpfc_hba *phba, 3782 struct fc_diag_cg_sig_desc *pcgd) 3783 { 3784 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3785 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3786 struct lpfc_cgn_info *cp; 3787 u32 crc; 3788 u16 sig_freq; 3789 3790 /* Get rsp signal and frequency capabilities. */ 3791 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3792 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3793 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3794 3795 /* If the Fport does not support signals. Set FPIN only */ 3796 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3797 goto out_no_support; 3798 3799 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3800 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3801 * to milliSeconds. 3802 */ 3803 switch (rsp_sig_freq_scale) { 3804 case EDC_CG_SIGFREQ_SEC: 3805 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3806 break; 3807 case EDC_CG_SIGFREQ_MSEC: 3808 rsp_sig_freq_cyc = 1; 3809 break; 3810 default: 3811 goto out_no_support; 3812 } 3813 3814 /* Convenient shorthand. */ 3815 drv_sig_cap = phba->cgn_reg_signal; 3816 3817 /* Choose the least capable frequency. */ 3818 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3819 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3820 3821 /* Should be some common signals support. Settle on least capable 3822 * signal and adjust FPIN values. Initialize defaults to ease the 3823 * decision. 3824 */ 3825 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3826 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3827 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3828 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3829 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3830 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3831 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3832 } 3833 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3834 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3835 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3836 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3837 } 3838 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3839 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3840 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3841 } 3842 } 3843 3844 if (!phba->cgn_i) 3845 return; 3846 3847 /* Update signal frequency in congestion info buffer */ 3848 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 3849 3850 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 3851 * are received by the HBA 3852 */ 3853 sig_freq = phba->cgn_sig_freq; 3854 3855 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 3856 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3857 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 3858 cp->cgn_alarm_freq = cpu_to_le16(sig_freq); 3859 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3860 } 3861 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 3862 cp->cgn_info_crc = cpu_to_le32(crc); 3863 return; 3864 3865 out_no_support: 3866 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3867 phba->cgn_sig_freq = 0; 3868 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3869 } 3870 3871 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3872 FC_LS_TLV_DTAG_INIT); 3873 3874 /** 3875 * lpfc_cmpl_els_edc - Completion callback function for EDC 3876 * @phba: pointer to lpfc hba data structure. 3877 * @cmdiocb: pointer to lpfc command iocb data structure. 3878 * @rspiocb: pointer to lpfc response iocb data structure. 3879 * 3880 * This routine is the completion callback function for issuing the Exchange 3881 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3882 * notify the FPort of its Congestion and Link Fault capabilities. This 3883 * routine parses the FPort's response and decides on the least common 3884 * values applicable to both FPort and NPort for Warnings and Alarms that 3885 * are communicated via hardware signals. 3886 **/ 3887 static void 3888 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3889 struct lpfc_iocbq *rspiocb) 3890 { 3891 IOCB_t *irsp; 3892 struct fc_els_edc_resp *edc_rsp; 3893 struct fc_tlv_desc *tlv; 3894 struct fc_diag_cg_sig_desc *pcgd; 3895 struct fc_diag_lnkflt_desc *plnkflt; 3896 struct lpfc_dmabuf *pcmd, *prsp; 3897 const char *dtag_nm; 3898 u32 *pdata, dtag; 3899 int desc_cnt = 0, bytes_remain; 3900 bool rcv_cap_desc = false; 3901 struct lpfc_nodelist *ndlp; 3902 3903 irsp = &rspiocb->iocb; 3904 ndlp = cmdiocb->context1; 3905 3906 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3907 "EDC cmpl: status:x%x/x%x did:x%x", 3908 irsp->ulpStatus, irsp->un.ulpWord[4], 3909 irsp->un.elsreq64.remoteID); 3910 3911 /* ELS cmd tag <ulpIoTag> completes */ 3912 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3913 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3914 irsp->ulpIoTag, irsp->ulpStatus, 3915 irsp->un.ulpWord[4], irsp->ulpTimeout); 3916 3917 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3918 if (!pcmd) 3919 goto out; 3920 3921 pdata = (u32 *)pcmd->virt; 3922 if (!pdata) 3923 goto out; 3924 3925 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3926 if (irsp->ulpStatus) 3927 goto out; 3928 3929 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3930 if (!prsp) 3931 goto out; 3932 3933 edc_rsp = prsp->virt; 3934 if (!edc_rsp) 3935 goto out; 3936 3937 /* ELS cmd tag <ulpIoTag> completes */ 3938 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3939 "4676 Fabric EDC Rsp: " 3940 "0x%02x, 0x%08x\n", 3941 edc_rsp->acc_hdr.la_cmd, 3942 be32_to_cpu(edc_rsp->desc_list_len)); 3943 3944 /* 3945 * Payload length in bytes is the response descriptor list 3946 * length minus the 12 bytes of Link Service Request 3947 * Information descriptor in the reply. 3948 */ 3949 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 3950 sizeof(struct fc_els_lsri_desc); 3951 if (bytes_remain <= 0) 3952 goto out; 3953 3954 tlv = edc_rsp->desc; 3955 3956 /* 3957 * cycle through EDC diagnostic descriptors to find the 3958 * congestion signaling capability descriptor 3959 */ 3960 while (bytes_remain) { 3961 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 3962 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 3963 "6461 Truncated TLV hdr on " 3964 "Diagnostic descriptor[%d]\n", 3965 desc_cnt); 3966 goto out; 3967 } 3968 3969 dtag = be32_to_cpu(tlv->desc_tag); 3970 switch (dtag) { 3971 case ELS_DTAG_LNK_FAULT_CAP: 3972 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 3973 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 3974 sizeof(struct fc_diag_lnkflt_desc)) { 3975 lpfc_printf_log( 3976 phba, KERN_WARNING, LOG_CGN_MGMT, 3977 "6462 Truncated Link Fault Diagnostic " 3978 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 3979 desc_cnt, bytes_remain, 3980 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 3981 sizeof(struct fc_diag_cg_sig_desc)); 3982 goto out; 3983 } 3984 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 3985 lpfc_printf_log( 3986 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3987 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 3988 "0x%08x 0x%08x 0x%08x\n", 3989 be32_to_cpu(plnkflt->desc_tag), 3990 be32_to_cpu(plnkflt->desc_len), 3991 be32_to_cpu( 3992 plnkflt->degrade_activate_threshold), 3993 be32_to_cpu( 3994 plnkflt->degrade_deactivate_threshold), 3995 be32_to_cpu(plnkflt->fec_degrade_interval)); 3996 break; 3997 case ELS_DTAG_CG_SIGNAL_CAP: 3998 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 3999 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4000 sizeof(struct fc_diag_cg_sig_desc)) { 4001 lpfc_printf_log( 4002 phba, KERN_WARNING, LOG_CGN_MGMT, 4003 "6463 Truncated Cgn Signal Diagnostic " 4004 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4005 desc_cnt, bytes_remain, 4006 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4007 sizeof(struct fc_diag_cg_sig_desc)); 4008 goto out; 4009 } 4010 4011 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4012 lpfc_printf_log( 4013 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4014 "4616 CGN Desc Data: 0x%08x 0x%08x " 4015 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4016 be32_to_cpu(pcgd->desc_tag), 4017 be32_to_cpu(pcgd->desc_len), 4018 be32_to_cpu(pcgd->xmt_signal_capability), 4019 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4020 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4021 be32_to_cpu(pcgd->rcv_signal_capability), 4022 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4023 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4024 4025 /* Compare driver and Fport capabilities and choose 4026 * least common. 4027 */ 4028 lpfc_least_capable_settings(phba, pcgd); 4029 rcv_cap_desc = true; 4030 break; 4031 default: 4032 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4033 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4034 "4919 unknown Diagnostic " 4035 "Descriptor[%d]: tag x%x (%s)\n", 4036 desc_cnt, dtag, dtag_nm); 4037 } 4038 4039 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4040 tlv = fc_tlv_next_desc(tlv); 4041 desc_cnt++; 4042 } 4043 4044 out: 4045 if (!rcv_cap_desc) { 4046 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4047 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4048 phba->cgn_sig_freq = 0; 4049 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4050 "4202 EDC rsp error - sending RDF " 4051 "for FPIN only.\n"); 4052 } 4053 4054 lpfc_config_cgn_signal(phba); 4055 4056 /* Check to see if link went down during discovery */ 4057 lpfc_els_chk_latt(phba->pport); 4058 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4059 "EDC Cmpl: did:x%x refcnt %d", 4060 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4061 lpfc_els_free_iocb(phba, cmdiocb); 4062 lpfc_nlp_put(ndlp); 4063 } 4064 4065 static void 4066 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4067 { 4068 /* We are assuming cgd was zero'ed before calling this routine */ 4069 4070 /* Configure the congestion detection capability */ 4071 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4072 4073 /* Descriptor len doesn't include the tag or len fields. */ 4074 cgd->desc_len = cpu_to_be32( 4075 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4076 4077 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4078 * xmt_signal_frequency.count already set to 0. 4079 * xmt_signal_frequency.units already set to 0. 4080 */ 4081 4082 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4083 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4084 * rcv_signal_frequency.count already set to 0. 4085 * rcv_signal_frequency.units already set to 0. 4086 */ 4087 phba->cgn_sig_freq = 0; 4088 return; 4089 } 4090 switch (phba->cgn_reg_signal) { 4091 case EDC_CG_SIG_WARN_ONLY: 4092 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4093 break; 4094 case EDC_CG_SIG_WARN_ALARM: 4095 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4096 break; 4097 default: 4098 /* rcv_signal_capability left 0 thus no support */ 4099 break; 4100 } 4101 4102 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4103 * the completion we settle on the higher frequency. 4104 */ 4105 cgd->rcv_signal_frequency.count = 4106 cpu_to_be16(lpfc_fabric_cgn_frequency); 4107 cgd->rcv_signal_frequency.units = 4108 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4109 } 4110 4111 /** 4112 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4113 * @vport: pointer to a host virtual N_Port data structure. 4114 * @retry: retry counter for the command iocb. 4115 * 4116 * This routine issues an ELS EDC to the F-Port Controller to communicate 4117 * this N_Port's support of hardware signals in its Congestion 4118 * Capabilities Descriptor. 4119 * 4120 * Note: This routine does not check if one or more signals are 4121 * set in the cgn_reg_signal parameter. The caller makes the 4122 * decision to enforce cgn_reg_signal as nonzero or zero depending 4123 * on the conditions. During Fabric requests, the driver 4124 * requires cgn_reg_signals to be nonzero. But a dynamic request 4125 * to set the congestion mode to OFF from Monitor or Manage 4126 * would correctly issue an EDC with no signals enabled to 4127 * turn off switch functionality and then update the FW. 4128 * 4129 * Return code 4130 * 0 - Successfully issued edc command 4131 * 1 - Failed to issue edc command 4132 **/ 4133 int 4134 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4135 { 4136 struct lpfc_hba *phba = vport->phba; 4137 struct lpfc_iocbq *elsiocb; 4138 struct lpfc_els_edc_req *edc_req; 4139 struct fc_diag_cg_sig_desc *cgn_desc; 4140 u16 cmdsize; 4141 struct lpfc_nodelist *ndlp; 4142 u8 *pcmd = NULL; 4143 u32 edc_req_size, cgn_desc_size; 4144 int rc; 4145 4146 if (vport->port_type == LPFC_NPIV_PORT) 4147 return -EACCES; 4148 4149 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4150 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4151 return -ENODEV; 4152 4153 /* If HBA doesn't support signals, drop into RDF */ 4154 if (!phba->cgn_init_reg_signal) 4155 goto try_rdf; 4156 4157 edc_req_size = sizeof(struct fc_els_edc); 4158 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4159 cmdsize = edc_req_size + cgn_desc_size; 4160 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4161 ndlp->nlp_DID, ELS_CMD_EDC); 4162 if (!elsiocb) 4163 goto try_rdf; 4164 4165 /* Configure the payload for the supported Diagnostics capabilities. */ 4166 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 4167 memset(pcmd, 0, cmdsize); 4168 edc_req = (struct lpfc_els_edc_req *)pcmd; 4169 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4170 edc_req->edc.edc_cmd = ELS_EDC; 4171 4172 cgn_desc = &edc_req->cgn_desc; 4173 4174 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4175 4176 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4177 4178 lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, 4179 "4623 Xmit EDC to remote " 4180 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4181 ndlp->nlp_DID, phba->cgn_reg_signal, 4182 phba->cgn_reg_fpin); 4183 4184 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 4185 elsiocb->context1 = lpfc_nlp_get(ndlp); 4186 if (!elsiocb->context1) { 4187 lpfc_els_free_iocb(phba, elsiocb); 4188 return -EIO; 4189 } 4190 4191 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4192 "Issue EDC: did:x%x refcnt %d", 4193 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4194 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4195 if (rc == IOCB_ERROR) { 4196 /* The additional lpfc_nlp_put will cause the following 4197 * lpfc_els_free_iocb routine to trigger the rlease of 4198 * the node. 4199 */ 4200 lpfc_els_free_iocb(phba, elsiocb); 4201 lpfc_nlp_put(ndlp); 4202 goto try_rdf; 4203 } 4204 return 0; 4205 try_rdf: 4206 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4207 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4208 rc = lpfc_issue_els_rdf(vport, 0); 4209 return rc; 4210 } 4211 4212 /** 4213 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4214 * @vport: pointer to a host virtual N_Port data structure. 4215 * @nlp: pointer to a node-list data structure. 4216 * 4217 * This routine cancels the timer with a delayed IOCB-command retry for 4218 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4219 * removes the ELS retry event if it presents. In addition, if the 4220 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4221 * commands are sent for the @vport's nodes that require issuing discovery 4222 * ADISC. 4223 **/ 4224 void 4225 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4226 { 4227 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4228 struct lpfc_work_evt *evtp; 4229 4230 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4231 return; 4232 spin_lock_irq(&nlp->lock); 4233 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4234 spin_unlock_irq(&nlp->lock); 4235 del_timer_sync(&nlp->nlp_delayfunc); 4236 nlp->nlp_last_elscmd = 0; 4237 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4238 list_del_init(&nlp->els_retry_evt.evt_listp); 4239 /* Decrement nlp reference count held for the delayed retry */ 4240 evtp = &nlp->els_retry_evt; 4241 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4242 } 4243 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4244 spin_lock_irq(&nlp->lock); 4245 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4246 spin_unlock_irq(&nlp->lock); 4247 if (vport->num_disc_nodes) { 4248 if (vport->port_state < LPFC_VPORT_READY) { 4249 /* Check if there are more ADISCs to be sent */ 4250 lpfc_more_adisc(vport); 4251 } else { 4252 /* Check if there are more PLOGIs to be sent */ 4253 lpfc_more_plogi(vport); 4254 if (vport->num_disc_nodes == 0) { 4255 spin_lock_irq(shost->host_lock); 4256 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4257 spin_unlock_irq(shost->host_lock); 4258 lpfc_can_disctmo(vport); 4259 lpfc_end_rscn(vport); 4260 } 4261 } 4262 } 4263 } 4264 return; 4265 } 4266 4267 /** 4268 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4269 * @t: pointer to the timer function associated data (ndlp). 4270 * 4271 * This routine is invoked by the ndlp delayed-function timer to check 4272 * whether there is any pending ELS retry event(s) with the node. If not, it 4273 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4274 * adds the delayed events to the HBA work list and invokes the 4275 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4276 * event. Note that lpfc_nlp_get() is called before posting the event to 4277 * the work list to hold reference count of ndlp so that it guarantees the 4278 * reference to ndlp will still be available when the worker thread gets 4279 * to the event associated with the ndlp. 4280 **/ 4281 void 4282 lpfc_els_retry_delay(struct timer_list *t) 4283 { 4284 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4285 struct lpfc_vport *vport = ndlp->vport; 4286 struct lpfc_hba *phba = vport->phba; 4287 unsigned long flags; 4288 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4289 4290 spin_lock_irqsave(&phba->hbalock, flags); 4291 if (!list_empty(&evtp->evt_listp)) { 4292 spin_unlock_irqrestore(&phba->hbalock, flags); 4293 return; 4294 } 4295 4296 /* We need to hold the node by incrementing the reference 4297 * count until the queued work is done 4298 */ 4299 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4300 if (evtp->evt_arg1) { 4301 evtp->evt = LPFC_EVT_ELS_RETRY; 4302 list_add_tail(&evtp->evt_listp, &phba->work_list); 4303 lpfc_worker_wake_up(phba); 4304 } 4305 spin_unlock_irqrestore(&phba->hbalock, flags); 4306 return; 4307 } 4308 4309 /** 4310 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4311 * @ndlp: pointer to a node-list data structure. 4312 * 4313 * This routine is the worker-thread handler for processing the @ndlp delayed 4314 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4315 * the last ELS command from the associated ndlp and invokes the proper ELS 4316 * function according to the delayed ELS command to retry the command. 4317 **/ 4318 void 4319 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4320 { 4321 struct lpfc_vport *vport = ndlp->vport; 4322 uint32_t cmd, retry; 4323 4324 spin_lock_irq(&ndlp->lock); 4325 cmd = ndlp->nlp_last_elscmd; 4326 ndlp->nlp_last_elscmd = 0; 4327 4328 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4329 spin_unlock_irq(&ndlp->lock); 4330 return; 4331 } 4332 4333 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4334 spin_unlock_irq(&ndlp->lock); 4335 /* 4336 * If a discovery event readded nlp_delayfunc after timer 4337 * firing and before processing the timer, cancel the 4338 * nlp_delayfunc. 4339 */ 4340 del_timer_sync(&ndlp->nlp_delayfunc); 4341 retry = ndlp->nlp_retry; 4342 ndlp->nlp_retry = 0; 4343 4344 switch (cmd) { 4345 case ELS_CMD_FLOGI: 4346 lpfc_issue_els_flogi(vport, ndlp, retry); 4347 break; 4348 case ELS_CMD_PLOGI: 4349 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4350 ndlp->nlp_prev_state = ndlp->nlp_state; 4351 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4352 } 4353 break; 4354 case ELS_CMD_ADISC: 4355 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4356 ndlp->nlp_prev_state = ndlp->nlp_state; 4357 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4358 } 4359 break; 4360 case ELS_CMD_PRLI: 4361 case ELS_CMD_NVMEPRLI: 4362 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4363 ndlp->nlp_prev_state = ndlp->nlp_state; 4364 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4365 } 4366 break; 4367 case ELS_CMD_LOGO: 4368 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4369 ndlp->nlp_prev_state = ndlp->nlp_state; 4370 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4371 } 4372 break; 4373 case ELS_CMD_FDISC: 4374 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4375 lpfc_issue_els_fdisc(vport, ndlp, retry); 4376 break; 4377 } 4378 return; 4379 } 4380 4381 /** 4382 * lpfc_link_reset - Issue link reset 4383 * @vport: pointer to a virtual N_Port data structure. 4384 * 4385 * This routine performs link reset by sending INIT_LINK mailbox command. 4386 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4387 * INIT_LINK mailbox command. 4388 * 4389 * Return code 4390 * 0 - Link reset initiated successfully 4391 * 1 - Failed to initiate link reset 4392 **/ 4393 int 4394 lpfc_link_reset(struct lpfc_vport *vport) 4395 { 4396 struct lpfc_hba *phba = vport->phba; 4397 LPFC_MBOXQ_t *mbox; 4398 uint32_t control; 4399 int rc; 4400 4401 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4402 "2851 Attempt link reset\n"); 4403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4404 if (!mbox) { 4405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4406 "2852 Failed to allocate mbox memory"); 4407 return 1; 4408 } 4409 4410 /* Enable Link attention interrupts */ 4411 if (phba->sli_rev <= LPFC_SLI_REV3) { 4412 spin_lock_irq(&phba->hbalock); 4413 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4414 control = readl(phba->HCregaddr); 4415 control |= HC_LAINT_ENA; 4416 writel(control, phba->HCregaddr); 4417 readl(phba->HCregaddr); /* flush */ 4418 spin_unlock_irq(&phba->hbalock); 4419 } 4420 4421 lpfc_init_link(phba, mbox, phba->cfg_topology, 4422 phba->cfg_link_speed); 4423 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4424 mbox->vport = vport; 4425 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4426 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4428 "2853 Failed to issue INIT_LINK " 4429 "mbox command, rc:x%x\n", rc); 4430 mempool_free(mbox, phba->mbox_mem_pool); 4431 return 1; 4432 } 4433 4434 return 0; 4435 } 4436 4437 /** 4438 * lpfc_els_retry - Make retry decision on an els command iocb 4439 * @phba: pointer to lpfc hba data structure. 4440 * @cmdiocb: pointer to lpfc command iocb data structure. 4441 * @rspiocb: pointer to lpfc response iocb data structure. 4442 * 4443 * This routine makes a retry decision on an ELS command IOCB, which has 4444 * failed. The following ELS IOCBs use this function for retrying the command 4445 * when previously issued command responsed with error status: FLOGI, PLOGI, 4446 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4447 * returned error status, it makes the decision whether a retry shall be 4448 * issued for the command, and whether a retry shall be made immediately or 4449 * delayed. In the former case, the corresponding ELS command issuing-function 4450 * is called to retry the command. In the later case, the ELS command shall 4451 * be posted to the ndlp delayed event and delayed function timer set to the 4452 * ndlp for the delayed command issusing. 4453 * 4454 * Return code 4455 * 0 - No retry of els command is made 4456 * 1 - Immediate or delayed retry of els command is made 4457 **/ 4458 static int 4459 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4460 struct lpfc_iocbq *rspiocb) 4461 { 4462 struct lpfc_vport *vport = cmdiocb->vport; 4463 IOCB_t *irsp = &rspiocb->iocb; 4464 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4465 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4466 uint32_t *elscmd; 4467 struct ls_rjt stat; 4468 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4469 int logerr = 0; 4470 uint32_t cmd = 0; 4471 uint32_t did; 4472 int link_reset = 0, rc; 4473 4474 4475 /* Note: context2 may be 0 for internal driver abort 4476 * of delays ELS command. 4477 */ 4478 4479 if (pcmd && pcmd->virt) { 4480 elscmd = (uint32_t *) (pcmd->virt); 4481 cmd = *elscmd++; 4482 } 4483 4484 if (ndlp) 4485 did = ndlp->nlp_DID; 4486 else { 4487 /* We should only hit this case for retrying PLOGI */ 4488 did = irsp->un.elsreq64.remoteID; 4489 ndlp = lpfc_findnode_did(vport, did); 4490 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4491 return 0; 4492 } 4493 4494 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4495 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4496 *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did); 4497 4498 switch (irsp->ulpStatus) { 4499 case IOSTAT_FCP_RSP_ERROR: 4500 break; 4501 case IOSTAT_REMOTE_STOP: 4502 if (phba->sli_rev == LPFC_SLI_REV4) { 4503 /* This IO was aborted by the target, we don't 4504 * know the rxid and because we did not send the 4505 * ABTS we cannot generate and RRQ. 4506 */ 4507 lpfc_set_rrq_active(phba, ndlp, 4508 cmdiocb->sli4_lxritag, 0, 0); 4509 } 4510 break; 4511 case IOSTAT_LOCAL_REJECT: 4512 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 4513 case IOERR_LOOP_OPEN_FAILURE: 4514 if (cmd == ELS_CMD_FLOGI) { 4515 if (PCI_DEVICE_ID_HORNET == 4516 phba->pcidev->device) { 4517 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4518 phba->pport->fc_myDID = 0; 4519 phba->alpa_map[0] = 0; 4520 phba->alpa_map[1] = 0; 4521 } 4522 } 4523 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4524 delay = 1000; 4525 retry = 1; 4526 break; 4527 4528 case IOERR_ILLEGAL_COMMAND: 4529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4530 "0124 Retry illegal cmd x%x " 4531 "retry:x%x delay:x%x\n", 4532 cmd, cmdiocb->retry, delay); 4533 retry = 1; 4534 /* All command's retry policy */ 4535 maxretry = 8; 4536 if (cmdiocb->retry > 2) 4537 delay = 1000; 4538 break; 4539 4540 case IOERR_NO_RESOURCES: 4541 logerr = 1; /* HBA out of resources */ 4542 retry = 1; 4543 if (cmdiocb->retry > 100) 4544 delay = 100; 4545 maxretry = 250; 4546 break; 4547 4548 case IOERR_ILLEGAL_FRAME: 4549 delay = 100; 4550 retry = 1; 4551 break; 4552 4553 case IOERR_INVALID_RPI: 4554 if (cmd == ELS_CMD_PLOGI && 4555 did == NameServer_DID) { 4556 /* Continue forever if plogi to */ 4557 /* the nameserver fails */ 4558 maxretry = 0; 4559 delay = 100; 4560 } 4561 retry = 1; 4562 break; 4563 4564 case IOERR_SEQUENCE_TIMEOUT: 4565 if (cmd == ELS_CMD_PLOGI && 4566 did == NameServer_DID && 4567 (cmdiocb->retry + 1) == maxretry) { 4568 /* Reset the Link */ 4569 link_reset = 1; 4570 break; 4571 } 4572 retry = 1; 4573 delay = 100; 4574 break; 4575 case IOERR_SLI_ABORTED: 4576 /* Retry ELS PLOGI command? 4577 * Possibly the rport just wasn't ready. 4578 */ 4579 if (cmd == ELS_CMD_PLOGI) { 4580 /* No retry if state change */ 4581 if (ndlp && 4582 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4583 goto out_retry; 4584 retry = 1; 4585 maxretry = 2; 4586 } 4587 break; 4588 } 4589 break; 4590 4591 case IOSTAT_NPORT_RJT: 4592 case IOSTAT_FABRIC_RJT: 4593 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 4594 retry = 1; 4595 break; 4596 } 4597 break; 4598 4599 case IOSTAT_NPORT_BSY: 4600 case IOSTAT_FABRIC_BSY: 4601 logerr = 1; /* Fabric / Remote NPort out of resources */ 4602 retry = 1; 4603 break; 4604 4605 case IOSTAT_LS_RJT: 4606 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 4607 /* Added for Vendor specifc support 4608 * Just keep retrying for these Rsn / Exp codes 4609 */ 4610 switch (stat.un.b.lsRjtRsnCode) { 4611 case LSRJT_UNABLE_TPC: 4612 /* The driver has a VALID PLOGI but the rport has 4613 * rejected the PRLI - can't do it now. Delay 4614 * for 1 second and try again. 4615 * 4616 * However, if explanation is REQ_UNSUPPORTED there's 4617 * no point to retry PRLI. 4618 */ 4619 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4620 stat.un.b.lsRjtRsnCodeExp != 4621 LSEXP_REQ_UNSUPPORTED) { 4622 delay = 1000; 4623 maxretry = lpfc_max_els_tries + 1; 4624 retry = 1; 4625 break; 4626 } 4627 4628 /* Legacy bug fix code for targets with PLOGI delays. */ 4629 if (stat.un.b.lsRjtRsnCodeExp == 4630 LSEXP_CMD_IN_PROGRESS) { 4631 if (cmd == ELS_CMD_PLOGI) { 4632 delay = 1000; 4633 maxretry = 48; 4634 } 4635 retry = 1; 4636 break; 4637 } 4638 if (stat.un.b.lsRjtRsnCodeExp == 4639 LSEXP_CANT_GIVE_DATA) { 4640 if (cmd == ELS_CMD_PLOGI) { 4641 delay = 1000; 4642 maxretry = 48; 4643 } 4644 retry = 1; 4645 break; 4646 } 4647 if (cmd == ELS_CMD_PLOGI) { 4648 delay = 1000; 4649 maxretry = lpfc_max_els_tries + 1; 4650 retry = 1; 4651 break; 4652 } 4653 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4654 (cmd == ELS_CMD_FDISC) && 4655 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4656 lpfc_printf_vlog(vport, KERN_ERR, 4657 LOG_TRACE_EVENT, 4658 "0125 FDISC Failed (x%x). " 4659 "Fabric out of resources\n", 4660 stat.un.lsRjtError); 4661 lpfc_vport_set_state(vport, 4662 FC_VPORT_NO_FABRIC_RSCS); 4663 } 4664 break; 4665 4666 case LSRJT_LOGICAL_BSY: 4667 if ((cmd == ELS_CMD_PLOGI) || 4668 (cmd == ELS_CMD_PRLI) || 4669 (cmd == ELS_CMD_NVMEPRLI)) { 4670 delay = 1000; 4671 maxretry = 48; 4672 } else if (cmd == ELS_CMD_FDISC) { 4673 /* FDISC retry policy */ 4674 maxretry = 48; 4675 if (cmdiocb->retry >= 32) 4676 delay = 1000; 4677 } 4678 retry = 1; 4679 break; 4680 4681 case LSRJT_LOGICAL_ERR: 4682 /* There are some cases where switches return this 4683 * error when they are not ready and should be returning 4684 * Logical Busy. We should delay every time. 4685 */ 4686 if (cmd == ELS_CMD_FDISC && 4687 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4688 maxretry = 3; 4689 delay = 1000; 4690 retry = 1; 4691 } else if (cmd == ELS_CMD_FLOGI && 4692 stat.un.b.lsRjtRsnCodeExp == 4693 LSEXP_NOTHING_MORE) { 4694 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4695 retry = 1; 4696 lpfc_printf_vlog(vport, KERN_ERR, 4697 LOG_TRACE_EVENT, 4698 "0820 FLOGI Failed (x%x). " 4699 "BBCredit Not Supported\n", 4700 stat.un.lsRjtError); 4701 } 4702 break; 4703 4704 case LSRJT_PROTOCOL_ERR: 4705 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4706 (cmd == ELS_CMD_FDISC) && 4707 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4708 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4709 ) { 4710 lpfc_printf_vlog(vport, KERN_ERR, 4711 LOG_TRACE_EVENT, 4712 "0122 FDISC Failed (x%x). " 4713 "Fabric Detected Bad WWN\n", 4714 stat.un.lsRjtError); 4715 lpfc_vport_set_state(vport, 4716 FC_VPORT_FABRIC_REJ_WWN); 4717 } 4718 break; 4719 case LSRJT_VENDOR_UNIQUE: 4720 if ((stat.un.b.vendorUnique == 0x45) && 4721 (cmd == ELS_CMD_FLOGI)) { 4722 goto out_retry; 4723 } 4724 break; 4725 case LSRJT_CMD_UNSUPPORTED: 4726 /* lpfc nvmet returns this type of LS_RJT when it 4727 * receives an FCP PRLI because lpfc nvmet only 4728 * support NVME. ELS request is terminated for FCP4 4729 * on this rport. 4730 */ 4731 if (stat.un.b.lsRjtRsnCodeExp == 4732 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4733 spin_lock_irq(&ndlp->lock); 4734 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4735 spin_unlock_irq(&ndlp->lock); 4736 retry = 0; 4737 goto out_retry; 4738 } 4739 break; 4740 } 4741 break; 4742 4743 case IOSTAT_INTERMED_RSP: 4744 case IOSTAT_BA_RJT: 4745 break; 4746 4747 default: 4748 break; 4749 } 4750 4751 if (link_reset) { 4752 rc = lpfc_link_reset(vport); 4753 if (rc) { 4754 /* Do not give up. Retry PLOGI one more time and attempt 4755 * link reset if PLOGI fails again. 4756 */ 4757 retry = 1; 4758 delay = 100; 4759 goto out_retry; 4760 } 4761 return 1; 4762 } 4763 4764 if (did == FDMI_DID) 4765 retry = 1; 4766 4767 if ((cmd == ELS_CMD_FLOGI) && 4768 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4769 !lpfc_error_lost_link(irsp)) { 4770 /* FLOGI retry policy */ 4771 retry = 1; 4772 /* retry FLOGI forever */ 4773 if (phba->link_flag != LS_LOOPBACK_MODE) 4774 maxretry = 0; 4775 else 4776 maxretry = 2; 4777 4778 if (cmdiocb->retry >= 100) 4779 delay = 5000; 4780 else if (cmdiocb->retry >= 32) 4781 delay = 1000; 4782 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4783 /* retry FDISCs every second up to devloss */ 4784 retry = 1; 4785 maxretry = vport->cfg_devloss_tmo; 4786 delay = 1000; 4787 } 4788 4789 cmdiocb->retry++; 4790 if (maxretry && (cmdiocb->retry >= maxretry)) { 4791 phba->fc_stat.elsRetryExceeded++; 4792 retry = 0; 4793 } 4794 4795 if ((vport->load_flag & FC_UNLOADING) != 0) 4796 retry = 0; 4797 4798 out_retry: 4799 if (retry) { 4800 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4801 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4802 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4803 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4804 "2849 Stop retry ELS command " 4805 "x%x to remote NPORT x%x, " 4806 "Data: x%x x%x\n", cmd, did, 4807 cmdiocb->retry, delay); 4808 return 0; 4809 } 4810 } 4811 4812 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4813 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4814 "0107 Retry ELS command x%x to remote " 4815 "NPORT x%x Data: x%x x%x\n", 4816 cmd, did, cmdiocb->retry, delay); 4817 4818 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4819 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4820 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4821 IOERR_NO_RESOURCES))) { 4822 /* Don't reset timer for no resources */ 4823 4824 /* If discovery / RSCN timer is running, reset it */ 4825 if (timer_pending(&vport->fc_disctmo) || 4826 (vport->fc_flag & FC_RSCN_MODE)) 4827 lpfc_set_disctmo(vport); 4828 } 4829 4830 phba->fc_stat.elsXmitRetry++; 4831 if (ndlp && delay) { 4832 phba->fc_stat.elsDelayRetry++; 4833 ndlp->nlp_retry = cmdiocb->retry; 4834 4835 /* delay is specified in milliseconds */ 4836 mod_timer(&ndlp->nlp_delayfunc, 4837 jiffies + msecs_to_jiffies(delay)); 4838 spin_lock_irq(&ndlp->lock); 4839 ndlp->nlp_flag |= NLP_DELAY_TMO; 4840 spin_unlock_irq(&ndlp->lock); 4841 4842 ndlp->nlp_prev_state = ndlp->nlp_state; 4843 if ((cmd == ELS_CMD_PRLI) || 4844 (cmd == ELS_CMD_NVMEPRLI)) 4845 lpfc_nlp_set_state(vport, ndlp, 4846 NLP_STE_PRLI_ISSUE); 4847 else if (cmd != ELS_CMD_ADISC) 4848 lpfc_nlp_set_state(vport, ndlp, 4849 NLP_STE_NPR_NODE); 4850 ndlp->nlp_last_elscmd = cmd; 4851 4852 return 1; 4853 } 4854 switch (cmd) { 4855 case ELS_CMD_FLOGI: 4856 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4857 return 1; 4858 case ELS_CMD_FDISC: 4859 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4860 return 1; 4861 case ELS_CMD_PLOGI: 4862 if (ndlp) { 4863 ndlp->nlp_prev_state = ndlp->nlp_state; 4864 lpfc_nlp_set_state(vport, ndlp, 4865 NLP_STE_PLOGI_ISSUE); 4866 } 4867 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4868 return 1; 4869 case ELS_CMD_ADISC: 4870 ndlp->nlp_prev_state = ndlp->nlp_state; 4871 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4872 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4873 return 1; 4874 case ELS_CMD_PRLI: 4875 case ELS_CMD_NVMEPRLI: 4876 ndlp->nlp_prev_state = ndlp->nlp_state; 4877 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4878 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4879 return 1; 4880 case ELS_CMD_LOGO: 4881 ndlp->nlp_prev_state = ndlp->nlp_state; 4882 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4883 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4884 return 1; 4885 } 4886 } 4887 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4888 if (logerr) { 4889 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4890 "0137 No retry ELS command x%x to remote " 4891 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4892 cmd, did, irsp->ulpStatus, 4893 irsp->un.ulpWord[4]); 4894 } 4895 else { 4896 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4897 "0108 No retry ELS command x%x to remote " 4898 "NPORT x%x Retried:%d Error:x%x/%x\n", 4899 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4900 irsp->un.ulpWord[4]); 4901 } 4902 return 0; 4903 } 4904 4905 /** 4906 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4907 * @phba: pointer to lpfc hba data structure. 4908 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4909 * 4910 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4911 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4912 * checks to see whether there is a lpfc DMA buffer associated with the 4913 * response of the command IOCB. If so, it will be released before releasing 4914 * the lpfc DMA buffer associated with the IOCB itself. 4915 * 4916 * Return code 4917 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4918 **/ 4919 static int 4920 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4921 { 4922 struct lpfc_dmabuf *buf_ptr; 4923 4924 /* Free the response before processing the command. */ 4925 if (!list_empty(&buf_ptr1->list)) { 4926 list_remove_head(&buf_ptr1->list, buf_ptr, 4927 struct lpfc_dmabuf, 4928 list); 4929 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4930 kfree(buf_ptr); 4931 } 4932 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4933 kfree(buf_ptr1); 4934 return 0; 4935 } 4936 4937 /** 4938 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4939 * @phba: pointer to lpfc hba data structure. 4940 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4941 * 4942 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4943 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4944 * pool. 4945 * 4946 * Return code 4947 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4948 **/ 4949 static int 4950 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4951 { 4952 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4953 kfree(buf_ptr); 4954 return 0; 4955 } 4956 4957 /** 4958 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4959 * @phba: pointer to lpfc hba data structure. 4960 * @elsiocb: pointer to lpfc els command iocb data structure. 4961 * 4962 * This routine frees a command IOCB and its associated resources. The 4963 * command IOCB data structure contains the reference to various associated 4964 * resources, these fields must be set to NULL if the associated reference 4965 * not present: 4966 * context1 - reference to ndlp 4967 * context2 - reference to cmd 4968 * context2->next - reference to rsp 4969 * context3 - reference to bpl 4970 * 4971 * It first properly decrements the reference count held on ndlp for the 4972 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4973 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4974 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4975 * adds the DMA buffer the @phba data structure for the delayed release. 4976 * If reference to the Buffer Pointer List (BPL) is present, the 4977 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4978 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4979 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4980 * 4981 * Return code 4982 * 0 - Success (currently, always return 0) 4983 **/ 4984 int 4985 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4986 { 4987 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4988 4989 /* The I/O iocb is complete. Clear the context1 data. */ 4990 elsiocb->context1 = NULL; 4991 4992 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4993 if (elsiocb->context2) { 4994 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4995 /* Firmware could still be in progress of DMAing 4996 * payload, so don't free data buffer till after 4997 * a hbeat. 4998 */ 4999 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 5000 buf_ptr = elsiocb->context2; 5001 elsiocb->context2 = NULL; 5002 if (buf_ptr) { 5003 buf_ptr1 = NULL; 5004 spin_lock_irq(&phba->hbalock); 5005 if (!list_empty(&buf_ptr->list)) { 5006 list_remove_head(&buf_ptr->list, 5007 buf_ptr1, struct lpfc_dmabuf, 5008 list); 5009 INIT_LIST_HEAD(&buf_ptr1->list); 5010 list_add_tail(&buf_ptr1->list, 5011 &phba->elsbuf); 5012 phba->elsbuf_cnt++; 5013 } 5014 INIT_LIST_HEAD(&buf_ptr->list); 5015 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5016 phba->elsbuf_cnt++; 5017 spin_unlock_irq(&phba->hbalock); 5018 } 5019 } else { 5020 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 5021 lpfc_els_free_data(phba, buf_ptr1); 5022 elsiocb->context2 = NULL; 5023 } 5024 } 5025 5026 if (elsiocb->context3) { 5027 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 5028 lpfc_els_free_bpl(phba, buf_ptr); 5029 elsiocb->context3 = NULL; 5030 } 5031 lpfc_sli_release_iocbq(phba, elsiocb); 5032 return 0; 5033 } 5034 5035 /** 5036 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5037 * @phba: pointer to lpfc hba data structure. 5038 * @cmdiocb: pointer to lpfc command iocb data structure. 5039 * @rspiocb: pointer to lpfc response iocb data structure. 5040 * 5041 * This routine is the completion callback function to the Logout (LOGO) 5042 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5043 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5044 * release the ndlp if it has the last reference remaining (reference count 5045 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 5046 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5047 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5048 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5049 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5050 * IOCB data structure. 5051 **/ 5052 static void 5053 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5054 struct lpfc_iocbq *rspiocb) 5055 { 5056 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5057 struct lpfc_vport *vport = cmdiocb->vport; 5058 IOCB_t *irsp; 5059 5060 irsp = &rspiocb->iocb; 5061 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5062 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5063 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 5064 /* ACC to LOGO completes to NPort <nlp_DID> */ 5065 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5066 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5067 "Data: x%x x%x x%x\n", 5068 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5069 ndlp->nlp_state, ndlp->nlp_rpi); 5070 5071 /* This clause allows the LOGO ACC to complete and free resources 5072 * for the Fabric Domain Controller. It does deliberately skip 5073 * the unreg_rpi and release rpi because some fabrics send RDP 5074 * requests after logging out from the initiator. 5075 */ 5076 if (ndlp->nlp_type & NLP_FABRIC && 5077 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5078 goto out; 5079 5080 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5081 5082 /* If PLOGI is being retried, PLOGI completion will cleanup the 5083 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5084 * progress on nodes discovered from last RSCN. 5085 */ 5086 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5087 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5088 goto out; 5089 5090 /* NPort Recovery mode or node is just allocated */ 5091 if (!lpfc_nlp_not_used(ndlp)) { 5092 /* A LOGO is completing and the node is in NPR state. 5093 * Just unregister the RPI because the node is still 5094 * required. 5095 */ 5096 lpfc_unreg_rpi(vport, ndlp); 5097 } else { 5098 /* Indicate the node has already released, should 5099 * not reference to it from within lpfc_els_free_iocb. 5100 */ 5101 cmdiocb->context1 = NULL; 5102 } 5103 } 5104 out: 5105 /* 5106 * The driver received a LOGO from the rport and has ACK'd it. 5107 * At this point, the driver is done so release the IOCB 5108 */ 5109 lpfc_els_free_iocb(phba, cmdiocb); 5110 lpfc_nlp_put(ndlp); 5111 } 5112 5113 /** 5114 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5115 * @phba: pointer to lpfc hba data structure. 5116 * @pmb: pointer to the driver internal queue element for mailbox command. 5117 * 5118 * This routine is the completion callback function for unregister default 5119 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5120 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5121 * decrements the ndlp reference count held for this completion callback 5122 * function. After that, it invokes the lpfc_nlp_not_used() to check 5123 * whether there is only one reference left on the ndlp. If so, it will 5124 * perform one more decrement and trigger the release of the ndlp. 5125 **/ 5126 void 5127 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5128 { 5129 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 5130 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 5131 u32 mbx_flag = pmb->mbox_flag; 5132 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5133 5134 pmb->ctx_buf = NULL; 5135 pmb->ctx_ndlp = NULL; 5136 5137 if (ndlp) { 5138 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5139 "0006 rpi x%x DID:%x flg:%x %d x%px " 5140 "mbx_cmd x%x mbx_flag x%x x%px\n", 5141 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5142 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5143 mbx_flag, pmb); 5144 5145 /* This ends the default/temporary RPI cleanup logic for this 5146 * ndlp and the node and rpi needs to be released. Free the rpi 5147 * first on an UNREG_LOGIN and then release the final 5148 * references. 5149 */ 5150 spin_lock_irq(&ndlp->lock); 5151 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5152 if (mbx_cmd == MBX_UNREG_LOGIN) 5153 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5154 spin_unlock_irq(&ndlp->lock); 5155 lpfc_nlp_put(ndlp); 5156 lpfc_drop_node(ndlp->vport, ndlp); 5157 } 5158 5159 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5160 kfree(mp); 5161 mempool_free(pmb, phba->mbox_mem_pool); 5162 return; 5163 } 5164 5165 /** 5166 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5167 * @phba: pointer to lpfc hba data structure. 5168 * @cmdiocb: pointer to lpfc command iocb data structure. 5169 * @rspiocb: pointer to lpfc response iocb data structure. 5170 * 5171 * This routine is the completion callback function for ELS Response IOCB 5172 * command. In normal case, this callback function just properly sets the 5173 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5174 * field in the command IOCB is not NULL, the referred mailbox command will 5175 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5176 * the IOCB. 5177 **/ 5178 static void 5179 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5180 struct lpfc_iocbq *rspiocb) 5181 { 5182 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5183 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5184 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5185 IOCB_t *irsp; 5186 LPFC_MBOXQ_t *mbox = NULL; 5187 struct lpfc_dmabuf *mp = NULL; 5188 5189 irsp = &rspiocb->iocb; 5190 5191 if (!vport) { 5192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5193 "3177 ELS response failed\n"); 5194 goto out; 5195 } 5196 if (cmdiocb->context_un.mbox) 5197 mbox = cmdiocb->context_un.mbox; 5198 5199 /* Check to see if link went down during discovery */ 5200 if (!ndlp || lpfc_els_chk_latt(vport)) { 5201 if (mbox) { 5202 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5203 if (mp) { 5204 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5205 kfree(mp); 5206 } 5207 mempool_free(mbox, phba->mbox_mem_pool); 5208 } 5209 goto out; 5210 } 5211 5212 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5213 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5214 irsp->ulpStatus, irsp->un.ulpWord[4], 5215 cmdiocb->iocb.un.elsreq64.remoteID); 5216 /* ELS response tag <ulpIoTag> completes */ 5217 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5218 "0110 ELS response tag x%x completes " 5219 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n", 5220 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 5221 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 5222 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5223 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox); 5224 if (mbox) { 5225 if ((rspiocb->iocb.ulpStatus == 0) && 5226 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5227 if (!lpfc_unreg_rpi(vport, ndlp) && 5228 (!(vport->fc_flag & FC_PT2PT))) { 5229 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 5230 lpfc_printf_vlog(vport, KERN_INFO, 5231 LOG_DISCOVERY, 5232 "0314 PLOGI recov " 5233 "DID x%x " 5234 "Data: x%x x%x x%x\n", 5235 ndlp->nlp_DID, 5236 ndlp->nlp_state, 5237 ndlp->nlp_rpi, 5238 ndlp->nlp_flag); 5239 mp = mbox->ctx_buf; 5240 if (mp) { 5241 lpfc_mbuf_free(phba, mp->virt, 5242 mp->phys); 5243 kfree(mp); 5244 } 5245 mempool_free(mbox, phba->mbox_mem_pool); 5246 goto out; 5247 } 5248 } 5249 5250 /* Increment reference count to ndlp to hold the 5251 * reference to ndlp for the callback function. 5252 */ 5253 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5254 if (!mbox->ctx_ndlp) 5255 goto out; 5256 5257 mbox->vport = vport; 5258 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5259 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5260 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5261 } 5262 else { 5263 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5264 ndlp->nlp_prev_state = ndlp->nlp_state; 5265 lpfc_nlp_set_state(vport, ndlp, 5266 NLP_STE_REG_LOGIN_ISSUE); 5267 } 5268 5269 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5270 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5271 != MBX_NOT_FINISHED) 5272 goto out; 5273 5274 /* Decrement the ndlp reference count we 5275 * set for this failed mailbox command. 5276 */ 5277 lpfc_nlp_put(ndlp); 5278 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5279 5280 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5281 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5282 "0138 ELS rsp: Cannot issue reg_login for x%x " 5283 "Data: x%x x%x x%x\n", 5284 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5285 ndlp->nlp_rpi); 5286 } 5287 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5288 if (mp) { 5289 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5290 kfree(mp); 5291 } 5292 mempool_free(mbox, phba->mbox_mem_pool); 5293 } 5294 out: 5295 if (ndlp && shost) { 5296 spin_lock_irq(&ndlp->lock); 5297 if (mbox) 5298 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5299 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5300 spin_unlock_irq(&ndlp->lock); 5301 } 5302 5303 /* An SLI4 NPIV instance wants to drop the node at this point under 5304 * these conditions and release the RPI. 5305 */ 5306 if (phba->sli_rev == LPFC_SLI_REV4 && 5307 (vport && vport->port_type == LPFC_NPIV_PORT) && 5308 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5309 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5310 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5311 spin_lock_irq(&ndlp->lock); 5312 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5313 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5314 spin_unlock_irq(&ndlp->lock); 5315 lpfc_drop_node(vport, ndlp); 5316 } 5317 5318 /* Release the originating I/O reference. */ 5319 lpfc_els_free_iocb(phba, cmdiocb); 5320 lpfc_nlp_put(ndlp); 5321 return; 5322 } 5323 5324 /** 5325 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5326 * @vport: pointer to a host virtual N_Port data structure. 5327 * @flag: the els command code to be accepted. 5328 * @oldiocb: pointer to the original lpfc command iocb data structure. 5329 * @ndlp: pointer to a node-list data structure. 5330 * @mbox: pointer to the driver internal queue element for mailbox command. 5331 * 5332 * This routine prepares and issues an Accept (ACC) response IOCB 5333 * command. It uses the @flag to properly set up the IOCB field for the 5334 * specific ACC response command to be issued and invokes the 5335 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5336 * @mbox pointer is passed in, it will be put into the context_un.mbox 5337 * field of the IOCB for the completion callback function to issue the 5338 * mailbox command to the HBA later when callback is invoked. 5339 * 5340 * Note that the ndlp reference count will be incremented by 1 for holding the 5341 * ndlp and the reference to ndlp will be stored into the context1 field of 5342 * the IOCB for the completion callback function to the corresponding 5343 * response ELS IOCB command. 5344 * 5345 * Return code 5346 * 0 - Successfully issued acc response 5347 * 1 - Failed to issue acc response 5348 **/ 5349 int 5350 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5351 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5352 LPFC_MBOXQ_t *mbox) 5353 { 5354 struct lpfc_hba *phba = vport->phba; 5355 IOCB_t *icmd; 5356 IOCB_t *oldcmd; 5357 struct lpfc_iocbq *elsiocb; 5358 uint8_t *pcmd; 5359 struct serv_parm *sp; 5360 uint16_t cmdsize; 5361 int rc; 5362 ELS_PKT *els_pkt_ptr; 5363 struct fc_els_rdf_resp *rdf_resp; 5364 5365 oldcmd = &oldiocb->iocb; 5366 5367 switch (flag) { 5368 case ELS_CMD_ACC: 5369 cmdsize = sizeof(uint32_t); 5370 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5371 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5372 if (!elsiocb) { 5373 spin_lock_irq(&ndlp->lock); 5374 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5375 spin_unlock_irq(&ndlp->lock); 5376 return 1; 5377 } 5378 5379 icmd = &elsiocb->iocb; 5380 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5381 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5382 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5383 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5384 pcmd += sizeof(uint32_t); 5385 5386 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5387 "Issue ACC: did:x%x flg:x%x", 5388 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5389 break; 5390 case ELS_CMD_FLOGI: 5391 case ELS_CMD_PLOGI: 5392 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5393 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5394 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5395 if (!elsiocb) 5396 return 1; 5397 5398 icmd = &elsiocb->iocb; 5399 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5400 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5401 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5402 5403 if (mbox) 5404 elsiocb->context_un.mbox = mbox; 5405 5406 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5407 pcmd += sizeof(uint32_t); 5408 sp = (struct serv_parm *)pcmd; 5409 5410 if (flag == ELS_CMD_FLOGI) { 5411 /* Copy the received service parameters back */ 5412 memcpy(sp, &phba->fc_fabparam, 5413 sizeof(struct serv_parm)); 5414 5415 /* Clear the F_Port bit */ 5416 sp->cmn.fPort = 0; 5417 5418 /* Mark all class service parameters as invalid */ 5419 sp->cls1.classValid = 0; 5420 sp->cls2.classValid = 0; 5421 sp->cls3.classValid = 0; 5422 sp->cls4.classValid = 0; 5423 5424 /* Copy our worldwide names */ 5425 memcpy(&sp->portName, &vport->fc_sparam.portName, 5426 sizeof(struct lpfc_name)); 5427 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5428 sizeof(struct lpfc_name)); 5429 } else { 5430 memcpy(pcmd, &vport->fc_sparam, 5431 sizeof(struct serv_parm)); 5432 5433 sp->cmn.valid_vendor_ver_level = 0; 5434 memset(sp->un.vendorVersion, 0, 5435 sizeof(sp->un.vendorVersion)); 5436 sp->cmn.bbRcvSizeMsb &= 0xF; 5437 5438 /* If our firmware supports this feature, convey that 5439 * info to the target using the vendor specific field. 5440 */ 5441 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5442 sp->cmn.valid_vendor_ver_level = 1; 5443 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5444 sp->un.vv.flags = 5445 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5446 } 5447 } 5448 5449 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5450 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5451 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5452 break; 5453 case ELS_CMD_PRLO: 5454 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5455 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5456 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5457 if (!elsiocb) 5458 return 1; 5459 5460 icmd = &elsiocb->iocb; 5461 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5462 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5463 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5464 5465 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 5466 sizeof(uint32_t) + sizeof(PRLO)); 5467 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5468 els_pkt_ptr = (ELS_PKT *) pcmd; 5469 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5470 5471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5472 "Issue ACC PRLO: did:x%x flg:x%x", 5473 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5474 break; 5475 case ELS_CMD_RDF: 5476 cmdsize = sizeof(*rdf_resp); 5477 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5478 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5479 if (!elsiocb) 5480 return 1; 5481 5482 icmd = &elsiocb->iocb; 5483 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5484 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5485 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5486 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5487 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5488 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5489 5490 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5491 rdf_resp->desc_list_len = cpu_to_be32(12); 5492 5493 /* FC-LS-5 specifies LS REQ Information descriptor */ 5494 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5495 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5496 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5497 break; 5498 default: 5499 return 1; 5500 } 5501 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5502 spin_lock_irq(&ndlp->lock); 5503 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5504 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5505 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5506 spin_unlock_irq(&ndlp->lock); 5507 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 5508 } else { 5509 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5510 } 5511 5512 phba->fc_stat.elsXmitACC++; 5513 elsiocb->context1 = lpfc_nlp_get(ndlp); 5514 if (!elsiocb->context1) { 5515 lpfc_els_free_iocb(phba, elsiocb); 5516 return 1; 5517 } 5518 5519 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5520 if (rc == IOCB_ERROR) { 5521 lpfc_els_free_iocb(phba, elsiocb); 5522 lpfc_nlp_put(ndlp); 5523 return 1; 5524 } 5525 5526 /* Xmit ELS ACC response tag <ulpIoTag> */ 5527 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5528 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5529 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5530 "RPI: x%x, fc_flag x%x refcnt %d\n", 5531 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5532 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5533 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5534 return 0; 5535 } 5536 5537 /** 5538 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5539 * @vport: pointer to a virtual N_Port data structure. 5540 * @rejectError: reject response to issue 5541 * @oldiocb: pointer to the original lpfc command iocb data structure. 5542 * @ndlp: pointer to a node-list data structure. 5543 * @mbox: pointer to the driver internal queue element for mailbox command. 5544 * 5545 * This routine prepares and issue an Reject (RJT) response IOCB 5546 * command. If a @mbox pointer is passed in, it will be put into the 5547 * context_un.mbox field of the IOCB for the completion callback function 5548 * to issue to the HBA later. 5549 * 5550 * Note that the ndlp reference count will be incremented by 1 for holding the 5551 * ndlp and the reference to ndlp will be stored into the context1 field of 5552 * the IOCB for the completion callback function to the reject response 5553 * ELS IOCB command. 5554 * 5555 * Return code 5556 * 0 - Successfully issued reject response 5557 * 1 - Failed to issue reject response 5558 **/ 5559 int 5560 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5561 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5562 LPFC_MBOXQ_t *mbox) 5563 { 5564 int rc; 5565 struct lpfc_hba *phba = vport->phba; 5566 IOCB_t *icmd; 5567 IOCB_t *oldcmd; 5568 struct lpfc_iocbq *elsiocb; 5569 uint8_t *pcmd; 5570 uint16_t cmdsize; 5571 5572 cmdsize = 2 * sizeof(uint32_t); 5573 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5574 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5575 if (!elsiocb) 5576 return 1; 5577 5578 icmd = &elsiocb->iocb; 5579 oldcmd = &oldiocb->iocb; 5580 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5581 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5582 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5583 5584 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5585 pcmd += sizeof(uint32_t); 5586 *((uint32_t *) (pcmd)) = rejectError; 5587 5588 if (mbox) 5589 elsiocb->context_un.mbox = mbox; 5590 5591 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5592 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5593 "0129 Xmit ELS RJT x%x response tag x%x " 5594 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5595 "rpi x%x\n", 5596 rejectError, elsiocb->iotag, 5597 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 5598 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5599 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5600 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5601 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5602 5603 phba->fc_stat.elsXmitLSRJT++; 5604 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5605 elsiocb->context1 = lpfc_nlp_get(ndlp); 5606 if (!elsiocb->context1) { 5607 lpfc_els_free_iocb(phba, elsiocb); 5608 return 1; 5609 } 5610 5611 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5612 * node's assigned RPI gets released provided this node is not already 5613 * registered with the transport. 5614 */ 5615 if (phba->sli_rev == LPFC_SLI_REV4 && 5616 vport->port_type == LPFC_NPIV_PORT && 5617 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5618 spin_lock_irq(&ndlp->lock); 5619 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5620 spin_unlock_irq(&ndlp->lock); 5621 } 5622 5623 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5624 if (rc == IOCB_ERROR) { 5625 lpfc_els_free_iocb(phba, elsiocb); 5626 lpfc_nlp_put(ndlp); 5627 return 1; 5628 } 5629 5630 return 0; 5631 } 5632 5633 /** 5634 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5635 * @vport: pointer to a host virtual N_Port data structure. 5636 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5637 * @ndlp: NPort to where rsp is directed 5638 * 5639 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5640 * this N_Port's support of hardware signals in its Congestion 5641 * Capabilities Descriptor. 5642 * 5643 * Return code 5644 * 0 - Successfully issued edc rsp command 5645 * 1 - Failed to issue edc rsp command 5646 **/ 5647 static int 5648 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5649 struct lpfc_nodelist *ndlp) 5650 { 5651 struct lpfc_hba *phba = vport->phba; 5652 struct lpfc_els_edc_rsp *edc_rsp; 5653 struct lpfc_iocbq *elsiocb; 5654 IOCB_t *icmd, *cmd; 5655 uint8_t *pcmd; 5656 int cmdsize, rc; 5657 5658 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5659 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5660 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5661 if (!elsiocb) 5662 return 1; 5663 5664 icmd = &elsiocb->iocb; 5665 cmd = &cmdiocb->iocb; 5666 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5667 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5668 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5669 memset(pcmd, 0, cmdsize); 5670 5671 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5672 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5673 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5674 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5675 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5676 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5677 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5678 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5679 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5680 5681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5682 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5683 ndlp->nlp_DID, ndlp->nlp_flag, 5684 kref_read(&ndlp->kref)); 5685 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5686 5687 phba->fc_stat.elsXmitACC++; 5688 elsiocb->context1 = lpfc_nlp_get(ndlp); 5689 if (!elsiocb->context1) { 5690 lpfc_els_free_iocb(phba, elsiocb); 5691 return 1; 5692 } 5693 5694 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5695 if (rc == IOCB_ERROR) { 5696 lpfc_els_free_iocb(phba, elsiocb); 5697 lpfc_nlp_put(ndlp); 5698 return 1; 5699 } 5700 5701 /* Xmit ELS ACC response tag <ulpIoTag> */ 5702 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5703 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5704 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5705 "RPI: x%x, fc_flag x%x\n", 5706 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5707 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5708 ndlp->nlp_rpi, vport->fc_flag); 5709 5710 return 0; 5711 } 5712 5713 /** 5714 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5715 * @vport: pointer to a virtual N_Port data structure. 5716 * @oldiocb: pointer to the original lpfc command iocb data structure. 5717 * @ndlp: pointer to a node-list data structure. 5718 * 5719 * This routine prepares and issues an Accept (ACC) response to Address 5720 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5721 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5722 * 5723 * Note that the ndlp reference count will be incremented by 1 for holding the 5724 * ndlp and the reference to ndlp will be stored into the context1 field of 5725 * the IOCB for the completion callback function to the ADISC Accept response 5726 * ELS IOCB command. 5727 * 5728 * Return code 5729 * 0 - Successfully issued acc adisc response 5730 * 1 - Failed to issue adisc acc response 5731 **/ 5732 int 5733 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5734 struct lpfc_nodelist *ndlp) 5735 { 5736 struct lpfc_hba *phba = vport->phba; 5737 ADISC *ap; 5738 IOCB_t *icmd, *oldcmd; 5739 struct lpfc_iocbq *elsiocb; 5740 uint8_t *pcmd; 5741 uint16_t cmdsize; 5742 int rc; 5743 5744 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5745 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5746 ndlp->nlp_DID, ELS_CMD_ACC); 5747 if (!elsiocb) 5748 return 1; 5749 5750 icmd = &elsiocb->iocb; 5751 oldcmd = &oldiocb->iocb; 5752 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5753 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5754 5755 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5756 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5757 "0130 Xmit ADISC ACC response iotag x%x xri: " 5758 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5759 elsiocb->iotag, elsiocb->iocb.ulpContext, 5760 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5761 ndlp->nlp_rpi); 5762 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5763 5764 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5765 pcmd += sizeof(uint32_t); 5766 5767 ap = (ADISC *) (pcmd); 5768 ap->hardAL_PA = phba->fc_pref_ALPA; 5769 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5770 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5771 ap->DID = be32_to_cpu(vport->fc_myDID); 5772 5773 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5774 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5775 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5776 5777 phba->fc_stat.elsXmitACC++; 5778 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5779 elsiocb->context1 = lpfc_nlp_get(ndlp); 5780 if (!elsiocb->context1) { 5781 lpfc_els_free_iocb(phba, elsiocb); 5782 return 1; 5783 } 5784 5785 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5786 if (rc == IOCB_ERROR) { 5787 lpfc_els_free_iocb(phba, elsiocb); 5788 lpfc_nlp_put(ndlp); 5789 return 1; 5790 } 5791 5792 /* Xmit ELS ACC response tag <ulpIoTag> */ 5793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5794 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5795 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5796 "RPI: x%x, fc_flag x%x\n", 5797 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5798 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5799 ndlp->nlp_rpi, vport->fc_flag); 5800 return 0; 5801 } 5802 5803 /** 5804 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5805 * @vport: pointer to a virtual N_Port data structure. 5806 * @oldiocb: pointer to the original lpfc command iocb data structure. 5807 * @ndlp: pointer to a node-list data structure. 5808 * 5809 * This routine prepares and issues an Accept (ACC) response to Process 5810 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5811 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5812 * 5813 * Note that the ndlp reference count will be incremented by 1 for holding the 5814 * ndlp and the reference to ndlp will be stored into the context1 field of 5815 * the IOCB for the completion callback function to the PRLI Accept response 5816 * ELS IOCB command. 5817 * 5818 * Return code 5819 * 0 - Successfully issued acc prli response 5820 * 1 - Failed to issue acc prli response 5821 **/ 5822 int 5823 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5824 struct lpfc_nodelist *ndlp) 5825 { 5826 struct lpfc_hba *phba = vport->phba; 5827 PRLI *npr; 5828 struct lpfc_nvme_prli *npr_nvme; 5829 lpfc_vpd_t *vpd; 5830 IOCB_t *icmd; 5831 IOCB_t *oldcmd; 5832 struct lpfc_iocbq *elsiocb; 5833 uint8_t *pcmd; 5834 uint16_t cmdsize; 5835 uint32_t prli_fc4_req, *req_payload; 5836 struct lpfc_dmabuf *req_buf; 5837 int rc; 5838 u32 elsrspcmd; 5839 5840 /* Need the incoming PRLI payload to determine if the ACC is for an 5841 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5842 */ 5843 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5844 req_payload = (((uint32_t *)req_buf->virt) + 1); 5845 5846 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5847 prli_fc4_req = be32_to_cpu(*req_payload); 5848 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5849 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5850 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5851 prli_fc4_req, *((uint32_t *)req_payload)); 5852 5853 if (prli_fc4_req == PRLI_FCP_TYPE) { 5854 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5855 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5856 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5857 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5858 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5859 } else { 5860 return 1; 5861 } 5862 5863 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5864 ndlp->nlp_DID, elsrspcmd); 5865 if (!elsiocb) 5866 return 1; 5867 5868 icmd = &elsiocb->iocb; 5869 oldcmd = &oldiocb->iocb; 5870 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5871 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5872 5873 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5874 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5875 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5876 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5877 elsiocb->iotag, elsiocb->iocb.ulpContext, 5878 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5879 ndlp->nlp_rpi); 5880 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5881 memset(pcmd, 0, cmdsize); 5882 5883 *((uint32_t *)(pcmd)) = elsrspcmd; 5884 pcmd += sizeof(uint32_t); 5885 5886 /* For PRLI, remainder of payload is PRLI parameter page */ 5887 vpd = &phba->vpd; 5888 5889 if (prli_fc4_req == PRLI_FCP_TYPE) { 5890 /* 5891 * If the remote port is a target and our firmware version 5892 * is 3.20 or later, set the following bits for FC-TAPE 5893 * support. 5894 */ 5895 npr = (PRLI *) pcmd; 5896 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5897 (vpd->rev.feaLevelHigh >= 0x02)) { 5898 npr->ConfmComplAllowed = 1; 5899 npr->Retry = 1; 5900 npr->TaskRetryIdReq = 1; 5901 } 5902 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5903 npr->estabImagePair = 1; 5904 npr->readXferRdyDis = 1; 5905 npr->ConfmComplAllowed = 1; 5906 npr->prliType = PRLI_FCP_TYPE; 5907 npr->initiatorFunc = 1; 5908 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5909 /* Respond with an NVME PRLI Type */ 5910 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5911 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5912 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5913 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5914 if (phba->nvmet_support) { 5915 bf_set(prli_tgt, npr_nvme, 1); 5916 bf_set(prli_disc, npr_nvme, 1); 5917 if (phba->cfg_nvme_enable_fb) { 5918 bf_set(prli_fba, npr_nvme, 1); 5919 5920 /* TBD. Target mode needs to post buffers 5921 * that support the configured first burst 5922 * byte size. 5923 */ 5924 bf_set(prli_fb_sz, npr_nvme, 5925 phba->cfg_nvmet_fb_size); 5926 } 5927 } else { 5928 bf_set(prli_init, npr_nvme, 1); 5929 } 5930 5931 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5932 "6015 NVME issue PRLI ACC word1 x%08x " 5933 "word4 x%08x word5 x%08x flag x%x, " 5934 "fcp_info x%x nlp_type x%x\n", 5935 npr_nvme->word1, npr_nvme->word4, 5936 npr_nvme->word5, ndlp->nlp_flag, 5937 ndlp->nlp_fcp_info, ndlp->nlp_type); 5938 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5939 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5940 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5941 } else 5942 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5943 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5944 prli_fc4_req, ndlp->nlp_fc4_type, 5945 ndlp->nlp_DID); 5946 5947 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5948 "Issue ACC PRLI: did:x%x flg:x%x", 5949 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5950 5951 phba->fc_stat.elsXmitACC++; 5952 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5953 elsiocb->context1 = lpfc_nlp_get(ndlp); 5954 if (!elsiocb->context1) { 5955 lpfc_els_free_iocb(phba, elsiocb); 5956 return 1; 5957 } 5958 5959 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5960 if (rc == IOCB_ERROR) { 5961 lpfc_els_free_iocb(phba, elsiocb); 5962 lpfc_nlp_put(ndlp); 5963 return 1; 5964 } 5965 5966 return 0; 5967 } 5968 5969 /** 5970 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5971 * @vport: pointer to a virtual N_Port data structure. 5972 * @format: rnid command format. 5973 * @oldiocb: pointer to the original lpfc command iocb data structure. 5974 * @ndlp: pointer to a node-list data structure. 5975 * 5976 * This routine issues a Request Node Identification Data (RNID) Accept 5977 * (ACC) response. It constructs the RNID ACC response command according to 5978 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5979 * issue the response. 5980 * 5981 * Note that the ndlp reference count will be incremented by 1 for holding the 5982 * ndlp and the reference to ndlp will be stored into the context1 field of 5983 * the IOCB for the completion callback function. 5984 * 5985 * Return code 5986 * 0 - Successfully issued acc rnid response 5987 * 1 - Failed to issue acc rnid response 5988 **/ 5989 static int 5990 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5991 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5992 { 5993 struct lpfc_hba *phba = vport->phba; 5994 RNID *rn; 5995 IOCB_t *icmd, *oldcmd; 5996 struct lpfc_iocbq *elsiocb; 5997 uint8_t *pcmd; 5998 uint16_t cmdsize; 5999 int rc; 6000 6001 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6002 + (2 * sizeof(struct lpfc_name)); 6003 if (format) 6004 cmdsize += sizeof(RNID_TOP_DISC); 6005 6006 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6007 ndlp->nlp_DID, ELS_CMD_ACC); 6008 if (!elsiocb) 6009 return 1; 6010 6011 icmd = &elsiocb->iocb; 6012 oldcmd = &oldiocb->iocb; 6013 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6014 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 6015 6016 /* Xmit RNID ACC response tag <ulpIoTag> */ 6017 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6018 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6019 elsiocb->iotag, elsiocb->iocb.ulpContext); 6020 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6021 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6022 pcmd += sizeof(uint32_t); 6023 6024 memset(pcmd, 0, sizeof(RNID)); 6025 rn = (RNID *) (pcmd); 6026 rn->Format = format; 6027 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6028 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6029 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6030 switch (format) { 6031 case 0: 6032 rn->SpecificLen = 0; 6033 break; 6034 case RNID_TOPOLOGY_DISC: 6035 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6036 memcpy(&rn->un.topologyDisc.portName, 6037 &vport->fc_portname, sizeof(struct lpfc_name)); 6038 rn->un.topologyDisc.unitType = RNID_HBA; 6039 rn->un.topologyDisc.physPort = 0; 6040 rn->un.topologyDisc.attachedNodes = 0; 6041 break; 6042 default: 6043 rn->CommonLen = 0; 6044 rn->SpecificLen = 0; 6045 break; 6046 } 6047 6048 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6049 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6050 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6051 6052 phba->fc_stat.elsXmitACC++; 6053 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6054 elsiocb->context1 = lpfc_nlp_get(ndlp); 6055 if (!elsiocb->context1) { 6056 lpfc_els_free_iocb(phba, elsiocb); 6057 return 1; 6058 } 6059 6060 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6061 if (rc == IOCB_ERROR) { 6062 lpfc_els_free_iocb(phba, elsiocb); 6063 lpfc_nlp_put(ndlp); 6064 return 1; 6065 } 6066 6067 return 0; 6068 } 6069 6070 /** 6071 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6072 * @vport: pointer to a virtual N_Port data structure. 6073 * @iocb: pointer to the lpfc command iocb data structure. 6074 * @ndlp: pointer to a node-list data structure. 6075 * 6076 * Return 6077 **/ 6078 static void 6079 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6080 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6081 { 6082 struct lpfc_hba *phba = vport->phba; 6083 uint8_t *pcmd; 6084 struct RRQ *rrq; 6085 uint16_t rxid; 6086 uint16_t xri; 6087 struct lpfc_node_rrq *prrq; 6088 6089 6090 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 6091 pcmd += sizeof(uint32_t); 6092 rrq = (struct RRQ *)pcmd; 6093 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6094 rxid = bf_get(rrq_rxid, rrq); 6095 6096 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6097 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6098 " x%x x%x\n", 6099 be32_to_cpu(bf_get(rrq_did, rrq)), 6100 bf_get(rrq_oxid, rrq), 6101 rxid, 6102 iocb->iotag, iocb->iocb.ulpContext); 6103 6104 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6105 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6106 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6107 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6108 xri = bf_get(rrq_oxid, rrq); 6109 else 6110 xri = rxid; 6111 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6112 if (prrq) 6113 lpfc_clr_rrq_active(phba, xri, prrq); 6114 return; 6115 } 6116 6117 /** 6118 * lpfc_els_rsp_echo_acc - Issue echo acc response 6119 * @vport: pointer to a virtual N_Port data structure. 6120 * @data: pointer to echo data to return in the accept. 6121 * @oldiocb: pointer to the original lpfc command iocb data structure. 6122 * @ndlp: pointer to a node-list data structure. 6123 * 6124 * Return code 6125 * 0 - Successfully issued acc echo response 6126 * 1 - Failed to issue acc echo response 6127 **/ 6128 static int 6129 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6130 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6131 { 6132 struct lpfc_hba *phba = vport->phba; 6133 struct lpfc_iocbq *elsiocb; 6134 uint8_t *pcmd; 6135 uint16_t cmdsize; 6136 int rc; 6137 6138 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6139 6140 /* The accumulated length can exceed the BPL_SIZE. For 6141 * now, use this as the limit 6142 */ 6143 if (cmdsize > LPFC_BPL_SIZE) 6144 cmdsize = LPFC_BPL_SIZE; 6145 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6146 ndlp->nlp_DID, ELS_CMD_ACC); 6147 if (!elsiocb) 6148 return 1; 6149 6150 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 6151 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 6152 6153 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6154 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6155 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6156 elsiocb->iotag, elsiocb->iocb.ulpContext); 6157 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6158 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6159 pcmd += sizeof(uint32_t); 6160 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6161 6162 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6163 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6164 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6165 6166 phba->fc_stat.elsXmitACC++; 6167 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6168 elsiocb->context1 = lpfc_nlp_get(ndlp); 6169 if (!elsiocb->context1) { 6170 lpfc_els_free_iocb(phba, elsiocb); 6171 return 1; 6172 } 6173 6174 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6175 if (rc == IOCB_ERROR) { 6176 lpfc_els_free_iocb(phba, elsiocb); 6177 lpfc_nlp_put(ndlp); 6178 return 1; 6179 } 6180 6181 return 0; 6182 } 6183 6184 /** 6185 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6186 * @vport: pointer to a host virtual N_Port data structure. 6187 * 6188 * This routine issues Address Discover (ADISC) ELS commands to those 6189 * N_Ports which are in node port recovery state and ADISC has not been issued 6190 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6191 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6192 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6193 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6194 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6195 * IOCBs quit for later pick up. On the other hand, after walking through 6196 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6197 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6198 * no more ADISC need to be sent. 6199 * 6200 * Return code 6201 * The number of N_Ports with adisc issued. 6202 **/ 6203 int 6204 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6205 { 6206 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6207 struct lpfc_nodelist *ndlp, *next_ndlp; 6208 int sentadisc = 0; 6209 6210 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6211 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6212 6213 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6214 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6215 continue; 6216 6217 spin_lock_irq(&ndlp->lock); 6218 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6219 spin_unlock_irq(&ndlp->lock); 6220 6221 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6222 /* This node was marked for ADISC but was not picked 6223 * for discovery. This is possible if the node was 6224 * missing in gidft response. 6225 * 6226 * At time of marking node for ADISC, we skipped unreg 6227 * from backend 6228 */ 6229 lpfc_nlp_unreg_node(vport, ndlp); 6230 lpfc_unreg_rpi(vport, ndlp); 6231 continue; 6232 } 6233 6234 ndlp->nlp_prev_state = ndlp->nlp_state; 6235 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6236 lpfc_issue_els_adisc(vport, ndlp, 0); 6237 sentadisc++; 6238 vport->num_disc_nodes++; 6239 if (vport->num_disc_nodes >= 6240 vport->cfg_discovery_threads) { 6241 spin_lock_irq(shost->host_lock); 6242 vport->fc_flag |= FC_NLP_MORE; 6243 spin_unlock_irq(shost->host_lock); 6244 break; 6245 } 6246 6247 } 6248 if (sentadisc == 0) { 6249 spin_lock_irq(shost->host_lock); 6250 vport->fc_flag &= ~FC_NLP_MORE; 6251 spin_unlock_irq(shost->host_lock); 6252 } 6253 return sentadisc; 6254 } 6255 6256 /** 6257 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6258 * @vport: pointer to a host virtual N_Port data structure. 6259 * 6260 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6261 * which are in node port recovery state, with a @vport. Each time an ELS 6262 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6263 * the per @vport number of discover count (num_disc_nodes) shall be 6264 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6265 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6266 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6267 * later pick up. On the other hand, after walking through all the ndlps with 6268 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6269 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6270 * PLOGI need to be sent. 6271 * 6272 * Return code 6273 * The number of N_Ports with plogi issued. 6274 **/ 6275 int 6276 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6277 { 6278 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6279 struct lpfc_nodelist *ndlp, *next_ndlp; 6280 int sentplogi = 0; 6281 6282 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6283 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6284 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6285 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6286 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6287 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6288 ndlp->nlp_prev_state = ndlp->nlp_state; 6289 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6290 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6291 sentplogi++; 6292 vport->num_disc_nodes++; 6293 if (vport->num_disc_nodes >= 6294 vport->cfg_discovery_threads) { 6295 spin_lock_irq(shost->host_lock); 6296 vport->fc_flag |= FC_NLP_MORE; 6297 spin_unlock_irq(shost->host_lock); 6298 break; 6299 } 6300 } 6301 } 6302 6303 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6304 "6452 Discover PLOGI %d flag x%x\n", 6305 sentplogi, vport->fc_flag); 6306 6307 if (sentplogi) { 6308 lpfc_set_disctmo(vport); 6309 } 6310 else { 6311 spin_lock_irq(shost->host_lock); 6312 vport->fc_flag &= ~FC_NLP_MORE; 6313 spin_unlock_irq(shost->host_lock); 6314 } 6315 return sentplogi; 6316 } 6317 6318 static uint32_t 6319 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6320 uint32_t word0) 6321 { 6322 6323 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6324 desc->payload.els_req = word0; 6325 desc->length = cpu_to_be32(sizeof(desc->payload)); 6326 6327 return sizeof(struct fc_rdp_link_service_desc); 6328 } 6329 6330 static uint32_t 6331 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6332 uint8_t *page_a0, uint8_t *page_a2) 6333 { 6334 uint16_t wavelength; 6335 uint16_t temperature; 6336 uint16_t rx_power; 6337 uint16_t tx_bias; 6338 uint16_t tx_power; 6339 uint16_t vcc; 6340 uint16_t flag = 0; 6341 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6342 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6343 6344 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6345 6346 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6347 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6348 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6349 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6350 6351 if ((trasn_code_byte4->fc_sw_laser) || 6352 (trasn_code_byte5->fc_sw_laser_sl) || 6353 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6354 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6355 } else if (trasn_code_byte4->fc_lw_laser) { 6356 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6357 page_a0[SSF_WAVELENGTH_B0]; 6358 if (wavelength == SFP_WAVELENGTH_LC1310) 6359 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6360 if (wavelength == SFP_WAVELENGTH_LL1550) 6361 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6362 } 6363 /* check if its SFP+ */ 6364 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6365 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6366 << SFP_FLAG_CT_SHIFT; 6367 6368 /* check if its OPTICAL */ 6369 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6370 SFP_FLAG_IS_OPTICAL_PORT : 0) 6371 << SFP_FLAG_IS_OPTICAL_SHIFT; 6372 6373 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6374 page_a2[SFF_TEMPERATURE_B0]); 6375 vcc = (page_a2[SFF_VCC_B1] << 8 | 6376 page_a2[SFF_VCC_B0]); 6377 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6378 page_a2[SFF_TXPOWER_B0]); 6379 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6380 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6381 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6382 page_a2[SFF_RXPOWER_B0]); 6383 desc->sfp_info.temperature = cpu_to_be16(temperature); 6384 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6385 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6386 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6387 desc->sfp_info.vcc = cpu_to_be16(vcc); 6388 6389 desc->sfp_info.flags = cpu_to_be16(flag); 6390 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6391 6392 return sizeof(struct fc_rdp_sfp_desc); 6393 } 6394 6395 static uint32_t 6396 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6397 READ_LNK_VAR *stat) 6398 { 6399 uint32_t type; 6400 6401 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6402 6403 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6404 6405 desc->info.port_type = cpu_to_be32(type); 6406 6407 desc->info.link_status.link_failure_cnt = 6408 cpu_to_be32(stat->linkFailureCnt); 6409 desc->info.link_status.loss_of_synch_cnt = 6410 cpu_to_be32(stat->lossSyncCnt); 6411 desc->info.link_status.loss_of_signal_cnt = 6412 cpu_to_be32(stat->lossSignalCnt); 6413 desc->info.link_status.primitive_seq_proto_err = 6414 cpu_to_be32(stat->primSeqErrCnt); 6415 desc->info.link_status.invalid_trans_word = 6416 cpu_to_be32(stat->invalidXmitWord); 6417 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6418 6419 desc->length = cpu_to_be32(sizeof(desc->info)); 6420 6421 return sizeof(struct fc_rdp_link_error_status_desc); 6422 } 6423 6424 static uint32_t 6425 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6426 struct lpfc_vport *vport) 6427 { 6428 uint32_t bbCredit; 6429 6430 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6431 6432 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6433 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6434 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6435 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6436 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6437 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6438 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6439 } else { 6440 desc->bbc_info.attached_port_bbc = 0; 6441 } 6442 6443 desc->bbc_info.rtt = 0; 6444 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6445 6446 return sizeof(struct fc_rdp_bbc_desc); 6447 } 6448 6449 static uint32_t 6450 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6451 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6452 { 6453 uint32_t flags = 0; 6454 6455 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6456 6457 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6458 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6459 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6460 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6461 6462 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6463 flags |= RDP_OET_HIGH_ALARM; 6464 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6465 flags |= RDP_OET_LOW_ALARM; 6466 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6467 flags |= RDP_OET_HIGH_WARNING; 6468 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6469 flags |= RDP_OET_LOW_WARNING; 6470 6471 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6472 desc->oed_info.function_flags = cpu_to_be32(flags); 6473 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6474 return sizeof(struct fc_rdp_oed_sfp_desc); 6475 } 6476 6477 static uint32_t 6478 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6479 struct fc_rdp_oed_sfp_desc *desc, 6480 uint8_t *page_a2) 6481 { 6482 uint32_t flags = 0; 6483 6484 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6485 6486 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6487 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6488 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6489 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6490 6491 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6492 flags |= RDP_OET_HIGH_ALARM; 6493 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6494 flags |= RDP_OET_LOW_ALARM; 6495 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6496 flags |= RDP_OET_HIGH_WARNING; 6497 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6498 flags |= RDP_OET_LOW_WARNING; 6499 6500 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6501 desc->oed_info.function_flags = cpu_to_be32(flags); 6502 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6503 return sizeof(struct fc_rdp_oed_sfp_desc); 6504 } 6505 6506 static uint32_t 6507 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6508 struct fc_rdp_oed_sfp_desc *desc, 6509 uint8_t *page_a2) 6510 { 6511 uint32_t flags = 0; 6512 6513 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6514 6515 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6516 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6517 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6518 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6519 6520 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6521 flags |= RDP_OET_HIGH_ALARM; 6522 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6523 flags |= RDP_OET_LOW_ALARM; 6524 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6525 flags |= RDP_OET_HIGH_WARNING; 6526 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6527 flags |= RDP_OET_LOW_WARNING; 6528 6529 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6530 desc->oed_info.function_flags = cpu_to_be32(flags); 6531 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6532 return sizeof(struct fc_rdp_oed_sfp_desc); 6533 } 6534 6535 static uint32_t 6536 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6537 struct fc_rdp_oed_sfp_desc *desc, 6538 uint8_t *page_a2) 6539 { 6540 uint32_t flags = 0; 6541 6542 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6543 6544 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6545 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6546 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6547 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6548 6549 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6550 flags |= RDP_OET_HIGH_ALARM; 6551 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6552 flags |= RDP_OET_LOW_ALARM; 6553 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6554 flags |= RDP_OET_HIGH_WARNING; 6555 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6556 flags |= RDP_OET_LOW_WARNING; 6557 6558 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6559 desc->oed_info.function_flags = cpu_to_be32(flags); 6560 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6561 return sizeof(struct fc_rdp_oed_sfp_desc); 6562 } 6563 6564 6565 static uint32_t 6566 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6567 struct fc_rdp_oed_sfp_desc *desc, 6568 uint8_t *page_a2) 6569 { 6570 uint32_t flags = 0; 6571 6572 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6573 6574 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6575 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6576 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6577 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6578 6579 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6580 flags |= RDP_OET_HIGH_ALARM; 6581 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6582 flags |= RDP_OET_LOW_ALARM; 6583 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6584 flags |= RDP_OET_HIGH_WARNING; 6585 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6586 flags |= RDP_OET_LOW_WARNING; 6587 6588 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6589 desc->oed_info.function_flags = cpu_to_be32(flags); 6590 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6591 return sizeof(struct fc_rdp_oed_sfp_desc); 6592 } 6593 6594 static uint32_t 6595 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6596 uint8_t *page_a0, struct lpfc_vport *vport) 6597 { 6598 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6599 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6600 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6601 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6602 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6603 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6604 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6605 return sizeof(struct fc_rdp_opd_sfp_desc); 6606 } 6607 6608 static uint32_t 6609 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6610 { 6611 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6612 return 0; 6613 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6614 6615 desc->info.CorrectedBlocks = 6616 cpu_to_be32(stat->fecCorrBlkCount); 6617 desc->info.UncorrectableBlocks = 6618 cpu_to_be32(stat->fecUncorrBlkCount); 6619 6620 desc->length = cpu_to_be32(sizeof(desc->info)); 6621 6622 return sizeof(struct fc_fec_rdp_desc); 6623 } 6624 6625 static uint32_t 6626 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6627 { 6628 uint16_t rdp_cap = 0; 6629 uint16_t rdp_speed; 6630 6631 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6632 6633 switch (phba->fc_linkspeed) { 6634 case LPFC_LINK_SPEED_1GHZ: 6635 rdp_speed = RDP_PS_1GB; 6636 break; 6637 case LPFC_LINK_SPEED_2GHZ: 6638 rdp_speed = RDP_PS_2GB; 6639 break; 6640 case LPFC_LINK_SPEED_4GHZ: 6641 rdp_speed = RDP_PS_4GB; 6642 break; 6643 case LPFC_LINK_SPEED_8GHZ: 6644 rdp_speed = RDP_PS_8GB; 6645 break; 6646 case LPFC_LINK_SPEED_10GHZ: 6647 rdp_speed = RDP_PS_10GB; 6648 break; 6649 case LPFC_LINK_SPEED_16GHZ: 6650 rdp_speed = RDP_PS_16GB; 6651 break; 6652 case LPFC_LINK_SPEED_32GHZ: 6653 rdp_speed = RDP_PS_32GB; 6654 break; 6655 case LPFC_LINK_SPEED_64GHZ: 6656 rdp_speed = RDP_PS_64GB; 6657 break; 6658 case LPFC_LINK_SPEED_128GHZ: 6659 rdp_speed = RDP_PS_128GB; 6660 break; 6661 case LPFC_LINK_SPEED_256GHZ: 6662 rdp_speed = RDP_PS_256GB; 6663 break; 6664 default: 6665 rdp_speed = RDP_PS_UNKNOWN; 6666 break; 6667 } 6668 6669 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6670 6671 if (phba->lmt & LMT_256Gb) 6672 rdp_cap |= RDP_PS_256GB; 6673 if (phba->lmt & LMT_128Gb) 6674 rdp_cap |= RDP_PS_128GB; 6675 if (phba->lmt & LMT_64Gb) 6676 rdp_cap |= RDP_PS_64GB; 6677 if (phba->lmt & LMT_32Gb) 6678 rdp_cap |= RDP_PS_32GB; 6679 if (phba->lmt & LMT_16Gb) 6680 rdp_cap |= RDP_PS_16GB; 6681 if (phba->lmt & LMT_10Gb) 6682 rdp_cap |= RDP_PS_10GB; 6683 if (phba->lmt & LMT_8Gb) 6684 rdp_cap |= RDP_PS_8GB; 6685 if (phba->lmt & LMT_4Gb) 6686 rdp_cap |= RDP_PS_4GB; 6687 if (phba->lmt & LMT_2Gb) 6688 rdp_cap |= RDP_PS_2GB; 6689 if (phba->lmt & LMT_1Gb) 6690 rdp_cap |= RDP_PS_1GB; 6691 6692 if (rdp_cap == 0) 6693 rdp_cap = RDP_CAP_UNKNOWN; 6694 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6695 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6696 6697 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6698 desc->length = cpu_to_be32(sizeof(desc->info)); 6699 return sizeof(struct fc_rdp_port_speed_desc); 6700 } 6701 6702 static uint32_t 6703 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6704 struct lpfc_vport *vport) 6705 { 6706 6707 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6708 6709 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6710 sizeof(desc->port_names.wwnn)); 6711 6712 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6713 sizeof(desc->port_names.wwpn)); 6714 6715 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6716 return sizeof(struct fc_rdp_port_name_desc); 6717 } 6718 6719 static uint32_t 6720 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6721 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6722 { 6723 6724 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6725 if (vport->fc_flag & FC_FABRIC) { 6726 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6727 sizeof(desc->port_names.wwnn)); 6728 6729 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6730 sizeof(desc->port_names.wwpn)); 6731 } else { /* Point to Point */ 6732 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6733 sizeof(desc->port_names.wwnn)); 6734 6735 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6736 sizeof(desc->port_names.wwpn)); 6737 } 6738 6739 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6740 return sizeof(struct fc_rdp_port_name_desc); 6741 } 6742 6743 static void 6744 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6745 int status) 6746 { 6747 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6748 struct lpfc_vport *vport = ndlp->vport; 6749 struct lpfc_iocbq *elsiocb; 6750 struct ulp_bde64 *bpl; 6751 IOCB_t *icmd; 6752 uint8_t *pcmd; 6753 struct ls_rjt *stat; 6754 struct fc_rdp_res_frame *rdp_res; 6755 uint32_t cmdsize, len; 6756 uint16_t *flag_ptr; 6757 int rc; 6758 6759 if (status != SUCCESS) 6760 goto error; 6761 6762 /* This will change once we know the true size of the RDP payload */ 6763 cmdsize = sizeof(struct fc_rdp_res_frame); 6764 6765 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6766 lpfc_max_els_tries, rdp_context->ndlp, 6767 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6768 if (!elsiocb) 6769 goto free_rdp_context; 6770 6771 icmd = &elsiocb->iocb; 6772 icmd->ulpContext = rdp_context->rx_id; 6773 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6774 6775 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6776 "2171 Xmit RDP response tag x%x xri x%x, " 6777 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6778 elsiocb->iotag, elsiocb->iocb.ulpContext, 6779 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6780 ndlp->nlp_rpi); 6781 rdp_res = (struct fc_rdp_res_frame *) 6782 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6783 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6784 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6785 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6786 6787 /* Update Alarm and Warning */ 6788 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6789 phba->sfp_alarm |= *flag_ptr; 6790 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6791 phba->sfp_warning |= *flag_ptr; 6792 6793 /* For RDP payload */ 6794 len = 8; 6795 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6796 (len + pcmd), ELS_CMD_RDP); 6797 6798 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6799 rdp_context->page_a0, rdp_context->page_a2); 6800 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6801 phba); 6802 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6803 (len + pcmd), &rdp_context->link_stat); 6804 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6805 (len + pcmd), vport); 6806 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6807 (len + pcmd), vport, ndlp); 6808 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6809 &rdp_context->link_stat); 6810 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6811 &rdp_context->link_stat, vport); 6812 len += lpfc_rdp_res_oed_temp_desc(phba, 6813 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6814 rdp_context->page_a2); 6815 len += lpfc_rdp_res_oed_voltage_desc(phba, 6816 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6817 rdp_context->page_a2); 6818 len += lpfc_rdp_res_oed_txbias_desc(phba, 6819 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6820 rdp_context->page_a2); 6821 len += lpfc_rdp_res_oed_txpower_desc(phba, 6822 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6823 rdp_context->page_a2); 6824 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6825 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6826 rdp_context->page_a2); 6827 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6828 rdp_context->page_a0, vport); 6829 6830 rdp_res->length = cpu_to_be32(len - 8); 6831 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6832 6833 /* Now that we know the true size of the payload, update the BPL */ 6834 bpl = (struct ulp_bde64 *) 6835 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6836 bpl->tus.f.bdeSize = len; 6837 bpl->tus.f.bdeFlags = 0; 6838 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6839 6840 phba->fc_stat.elsXmitACC++; 6841 elsiocb->context1 = lpfc_nlp_get(ndlp); 6842 if (!elsiocb->context1) { 6843 lpfc_els_free_iocb(phba, elsiocb); 6844 goto free_rdp_context; 6845 } 6846 6847 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6848 if (rc == IOCB_ERROR) { 6849 lpfc_els_free_iocb(phba, elsiocb); 6850 lpfc_nlp_put(ndlp); 6851 } 6852 6853 goto free_rdp_context; 6854 6855 error: 6856 cmdsize = 2 * sizeof(uint32_t); 6857 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6858 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6859 if (!elsiocb) 6860 goto free_rdp_context; 6861 6862 icmd = &elsiocb->iocb; 6863 icmd->ulpContext = rdp_context->rx_id; 6864 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6865 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6866 6867 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6868 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6869 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6870 6871 phba->fc_stat.elsXmitLSRJT++; 6872 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6873 elsiocb->context1 = lpfc_nlp_get(ndlp); 6874 if (!elsiocb->context1) { 6875 lpfc_els_free_iocb(phba, elsiocb); 6876 goto free_rdp_context; 6877 } 6878 6879 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6880 if (rc == IOCB_ERROR) { 6881 lpfc_els_free_iocb(phba, elsiocb); 6882 lpfc_nlp_put(ndlp); 6883 } 6884 6885 free_rdp_context: 6886 /* This reference put is for the original unsolicited RDP. If the 6887 * iocb prep failed, there is no reference to remove. 6888 */ 6889 lpfc_nlp_put(ndlp); 6890 kfree(rdp_context); 6891 } 6892 6893 static int 6894 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6895 { 6896 LPFC_MBOXQ_t *mbox = NULL; 6897 struct lpfc_dmabuf *mp; 6898 int rc; 6899 6900 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6901 if (!mbox) { 6902 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6903 "7105 failed to allocate mailbox memory"); 6904 return 1; 6905 } 6906 6907 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6908 goto prep_mbox_fail; 6909 mbox->vport = rdp_context->ndlp->vport; 6910 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6911 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6912 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6913 if (rc == MBX_NOT_FINISHED) { 6914 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 6915 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6916 goto issue_mbox_fail; 6917 } 6918 6919 return 0; 6920 6921 prep_mbox_fail: 6922 issue_mbox_fail: 6923 mempool_free(mbox, phba->mbox_mem_pool); 6924 return 1; 6925 } 6926 6927 /* 6928 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6929 * @vport: pointer to a host virtual N_Port data structure. 6930 * @cmdiocb: pointer to lpfc command iocb data structure. 6931 * @ndlp: pointer to a node-list data structure. 6932 * 6933 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6934 * IOCB. First, the payload of the unsolicited RDP is checked. 6935 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6936 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6937 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6938 * gather all data and send RDP response. 6939 * 6940 * Return code 6941 * 0 - Sent the acc response 6942 * 1 - Sent the reject response. 6943 */ 6944 static int 6945 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6946 struct lpfc_nodelist *ndlp) 6947 { 6948 struct lpfc_hba *phba = vport->phba; 6949 struct lpfc_dmabuf *pcmd; 6950 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6951 struct fc_rdp_req_frame *rdp_req; 6952 struct lpfc_rdp_context *rdp_context; 6953 IOCB_t *cmd = NULL; 6954 struct ls_rjt stat; 6955 6956 if (phba->sli_rev < LPFC_SLI_REV4 || 6957 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6958 LPFC_SLI_INTF_IF_TYPE_2) { 6959 rjt_err = LSRJT_UNABLE_TPC; 6960 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6961 goto error; 6962 } 6963 6964 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6965 rjt_err = LSRJT_UNABLE_TPC; 6966 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6967 goto error; 6968 } 6969 6970 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6971 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6972 6973 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6974 "2422 ELS RDP Request " 6975 "dec len %d tag x%x port_id %d len %d\n", 6976 be32_to_cpu(rdp_req->rdp_des_length), 6977 be32_to_cpu(rdp_req->nport_id_desc.tag), 6978 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6979 be32_to_cpu(rdp_req->nport_id_desc.length)); 6980 6981 if (sizeof(struct fc_rdp_nport_desc) != 6982 be32_to_cpu(rdp_req->rdp_des_length)) 6983 goto rjt_logerr; 6984 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6985 goto rjt_logerr; 6986 if (RDP_NPORT_ID_SIZE != 6987 be32_to_cpu(rdp_req->nport_id_desc.length)) 6988 goto rjt_logerr; 6989 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6990 if (!rdp_context) { 6991 rjt_err = LSRJT_UNABLE_TPC; 6992 goto error; 6993 } 6994 6995 cmd = &cmdiocb->iocb; 6996 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6997 if (!rdp_context->ndlp) { 6998 kfree(rdp_context); 6999 rjt_err = LSRJT_UNABLE_TPC; 7000 goto error; 7001 } 7002 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 7003 rdp_context->rx_id = cmd->ulpContext; 7004 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7005 if (lpfc_get_rdp_info(phba, rdp_context)) { 7006 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7007 "2423 Unable to send mailbox"); 7008 kfree(rdp_context); 7009 rjt_err = LSRJT_UNABLE_TPC; 7010 lpfc_nlp_put(ndlp); 7011 goto error; 7012 } 7013 7014 return 0; 7015 7016 rjt_logerr: 7017 rjt_err = LSRJT_LOGICAL_ERR; 7018 7019 error: 7020 memset(&stat, 0, sizeof(stat)); 7021 stat.un.b.lsRjtRsnCode = rjt_err; 7022 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7023 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7024 return 1; 7025 } 7026 7027 7028 static void 7029 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7030 { 7031 MAILBOX_t *mb; 7032 IOCB_t *icmd; 7033 uint8_t *pcmd; 7034 struct lpfc_iocbq *elsiocb; 7035 struct lpfc_nodelist *ndlp; 7036 struct ls_rjt *stat; 7037 union lpfc_sli4_cfg_shdr *shdr; 7038 struct lpfc_lcb_context *lcb_context; 7039 struct fc_lcb_res_frame *lcb_res; 7040 uint32_t cmdsize, shdr_status, shdr_add_status; 7041 int rc; 7042 7043 mb = &pmb->u.mb; 7044 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7045 ndlp = lcb_context->ndlp; 7046 pmb->ctx_ndlp = NULL; 7047 pmb->ctx_buf = NULL; 7048 7049 shdr = (union lpfc_sli4_cfg_shdr *) 7050 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7053 7054 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7055 "0194 SET_BEACON_CONFIG mailbox " 7056 "completed with status x%x add_status x%x," 7057 " mbx status x%x\n", 7058 shdr_status, shdr_add_status, mb->mbxStatus); 7059 7060 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7061 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7062 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7063 mempool_free(pmb, phba->mbox_mem_pool); 7064 goto error; 7065 } 7066 7067 mempool_free(pmb, phba->mbox_mem_pool); 7068 cmdsize = sizeof(struct fc_lcb_res_frame); 7069 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7070 lpfc_max_els_tries, ndlp, 7071 ndlp->nlp_DID, ELS_CMD_ACC); 7072 7073 /* Decrement the ndlp reference count from previous mbox command */ 7074 lpfc_nlp_put(ndlp); 7075 7076 if (!elsiocb) 7077 goto free_lcb_context; 7078 7079 lcb_res = (struct fc_lcb_res_frame *) 7080 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7081 7082 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7083 icmd = &elsiocb->iocb; 7084 icmd->ulpContext = lcb_context->rx_id; 7085 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7086 7087 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7088 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7089 lcb_res->lcb_sub_command = lcb_context->sub_command; 7090 lcb_res->lcb_type = lcb_context->type; 7091 lcb_res->capability = lcb_context->capability; 7092 lcb_res->lcb_frequency = lcb_context->frequency; 7093 lcb_res->lcb_duration = lcb_context->duration; 7094 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7095 phba->fc_stat.elsXmitACC++; 7096 7097 elsiocb->context1 = lpfc_nlp_get(ndlp); 7098 if (!elsiocb->context1) { 7099 lpfc_els_free_iocb(phba, elsiocb); 7100 goto out; 7101 } 7102 7103 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7104 if (rc == IOCB_ERROR) { 7105 lpfc_els_free_iocb(phba, elsiocb); 7106 lpfc_nlp_put(ndlp); 7107 } 7108 out: 7109 kfree(lcb_context); 7110 return; 7111 7112 error: 7113 cmdsize = sizeof(struct fc_lcb_res_frame); 7114 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7115 lpfc_max_els_tries, ndlp, 7116 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7117 lpfc_nlp_put(ndlp); 7118 if (!elsiocb) 7119 goto free_lcb_context; 7120 7121 icmd = &elsiocb->iocb; 7122 icmd->ulpContext = lcb_context->rx_id; 7123 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7124 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7125 7126 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7127 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7128 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7129 7130 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7131 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7132 7133 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7134 phba->fc_stat.elsXmitLSRJT++; 7135 elsiocb->context1 = lpfc_nlp_get(ndlp); 7136 if (!elsiocb->context1) { 7137 lpfc_els_free_iocb(phba, elsiocb); 7138 goto free_lcb_context; 7139 } 7140 7141 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7142 if (rc == IOCB_ERROR) { 7143 lpfc_els_free_iocb(phba, elsiocb); 7144 lpfc_nlp_put(ndlp); 7145 } 7146 free_lcb_context: 7147 kfree(lcb_context); 7148 } 7149 7150 static int 7151 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7152 struct lpfc_lcb_context *lcb_context, 7153 uint32_t beacon_state) 7154 { 7155 struct lpfc_hba *phba = vport->phba; 7156 union lpfc_sli4_cfg_shdr *cfg_shdr; 7157 LPFC_MBOXQ_t *mbox = NULL; 7158 uint32_t len; 7159 int rc; 7160 7161 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7162 if (!mbox) 7163 return 1; 7164 7165 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7166 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7167 sizeof(struct lpfc_sli4_cfg_mhdr); 7168 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7169 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7170 LPFC_SLI4_MBX_EMBED); 7171 mbox->ctx_ndlp = (void *)lcb_context; 7172 mbox->vport = phba->pport; 7173 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7174 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7175 phba->sli4_hba.physical_port); 7176 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7177 beacon_state); 7178 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7179 7180 /* 7181 * Check bv1s bit before issuing the mailbox 7182 * if bv1s == 1, LCB V1 supported 7183 * else, LCB V0 supported 7184 */ 7185 7186 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7187 /* COMMON_SET_BEACON_CONFIG_V1 */ 7188 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7189 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7190 bf_set(lpfc_mbx_set_beacon_port_type, 7191 &mbox->u.mqe.un.beacon_config, 0); 7192 bf_set(lpfc_mbx_set_beacon_duration_v1, 7193 &mbox->u.mqe.un.beacon_config, 7194 be16_to_cpu(lcb_context->duration)); 7195 } else { 7196 /* COMMON_SET_BEACON_CONFIG_V0 */ 7197 if (be16_to_cpu(lcb_context->duration) != 0) { 7198 mempool_free(mbox, phba->mbox_mem_pool); 7199 return 1; 7200 } 7201 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7202 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7203 bf_set(lpfc_mbx_set_beacon_state, 7204 &mbox->u.mqe.un.beacon_config, beacon_state); 7205 bf_set(lpfc_mbx_set_beacon_port_type, 7206 &mbox->u.mqe.un.beacon_config, 1); 7207 bf_set(lpfc_mbx_set_beacon_duration, 7208 &mbox->u.mqe.un.beacon_config, 7209 be16_to_cpu(lcb_context->duration)); 7210 } 7211 7212 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7213 if (rc == MBX_NOT_FINISHED) { 7214 mempool_free(mbox, phba->mbox_mem_pool); 7215 return 1; 7216 } 7217 7218 return 0; 7219 } 7220 7221 7222 /** 7223 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7224 * @vport: pointer to a host virtual N_Port data structure. 7225 * @cmdiocb: pointer to lpfc command iocb data structure. 7226 * @ndlp: pointer to a node-list data structure. 7227 * 7228 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7229 * First, the payload of the unsolicited LCB is checked. 7230 * Then based on Subcommand beacon will either turn on or off. 7231 * 7232 * Return code 7233 * 0 - Sent the acc response 7234 * 1 - Sent the reject response. 7235 **/ 7236 static int 7237 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7238 struct lpfc_nodelist *ndlp) 7239 { 7240 struct lpfc_hba *phba = vport->phba; 7241 struct lpfc_dmabuf *pcmd; 7242 uint8_t *lp; 7243 struct fc_lcb_request_frame *beacon; 7244 struct lpfc_lcb_context *lcb_context; 7245 u8 state, rjt_err = 0; 7246 struct ls_rjt stat; 7247 7248 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 7249 lp = (uint8_t *)pcmd->virt; 7250 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7251 7252 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7253 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7254 "type x%x frequency %x duration x%x\n", 7255 lp[0], lp[1], lp[2], 7256 beacon->lcb_command, 7257 beacon->lcb_sub_command, 7258 beacon->lcb_type, 7259 beacon->lcb_frequency, 7260 be16_to_cpu(beacon->lcb_duration)); 7261 7262 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7263 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7264 rjt_err = LSRJT_CMD_UNSUPPORTED; 7265 goto rjt; 7266 } 7267 7268 if (phba->sli_rev < LPFC_SLI_REV4 || 7269 phba->hba_flag & HBA_FCOE_MODE || 7270 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7271 LPFC_SLI_INTF_IF_TYPE_2)) { 7272 rjt_err = LSRJT_CMD_UNSUPPORTED; 7273 goto rjt; 7274 } 7275 7276 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7277 if (!lcb_context) { 7278 rjt_err = LSRJT_UNABLE_TPC; 7279 goto rjt; 7280 } 7281 7282 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7283 lcb_context->sub_command = beacon->lcb_sub_command; 7284 lcb_context->capability = 0; 7285 lcb_context->type = beacon->lcb_type; 7286 lcb_context->frequency = beacon->lcb_frequency; 7287 lcb_context->duration = beacon->lcb_duration; 7288 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7289 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 7290 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7291 if (!lcb_context->ndlp) { 7292 rjt_err = LSRJT_UNABLE_TPC; 7293 goto rjt_free; 7294 } 7295 7296 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7297 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7298 "0193 failed to send mail box"); 7299 lpfc_nlp_put(ndlp); 7300 rjt_err = LSRJT_UNABLE_TPC; 7301 goto rjt_free; 7302 } 7303 return 0; 7304 7305 rjt_free: 7306 kfree(lcb_context); 7307 rjt: 7308 memset(&stat, 0, sizeof(stat)); 7309 stat.un.b.lsRjtRsnCode = rjt_err; 7310 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7311 return 1; 7312 } 7313 7314 7315 /** 7316 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7317 * @vport: pointer to a host virtual N_Port data structure. 7318 * 7319 * This routine cleans up any Registration State Change Notification 7320 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7321 * @vport together with the host_lock is used to prevent multiple thread 7322 * trying to access the RSCN array on a same @vport at the same time. 7323 **/ 7324 void 7325 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7326 { 7327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7328 struct lpfc_hba *phba = vport->phba; 7329 int i; 7330 7331 spin_lock_irq(shost->host_lock); 7332 if (vport->fc_rscn_flush) { 7333 /* Another thread is walking fc_rscn_id_list on this vport */ 7334 spin_unlock_irq(shost->host_lock); 7335 return; 7336 } 7337 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7338 vport->fc_rscn_flush = 1; 7339 spin_unlock_irq(shost->host_lock); 7340 7341 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7342 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7343 vport->fc_rscn_id_list[i] = NULL; 7344 } 7345 spin_lock_irq(shost->host_lock); 7346 vport->fc_rscn_id_cnt = 0; 7347 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7348 spin_unlock_irq(shost->host_lock); 7349 lpfc_can_disctmo(vport); 7350 /* Indicate we are done walking this fc_rscn_id_list */ 7351 vport->fc_rscn_flush = 0; 7352 } 7353 7354 /** 7355 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7356 * @vport: pointer to a host virtual N_Port data structure. 7357 * @did: remote destination port identifier. 7358 * 7359 * This routine checks whether there is any pending Registration State 7360 * Configuration Notification (RSCN) to a @did on @vport. 7361 * 7362 * Return code 7363 * None zero - The @did matched with a pending rscn 7364 * 0 - not able to match @did with a pending rscn 7365 **/ 7366 int 7367 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7368 { 7369 D_ID ns_did; 7370 D_ID rscn_did; 7371 uint32_t *lp; 7372 uint32_t payload_len, i; 7373 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7374 7375 ns_did.un.word = did; 7376 7377 /* Never match fabric nodes for RSCNs */ 7378 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7379 return 0; 7380 7381 /* If we are doing a FULL RSCN rediscovery, match everything */ 7382 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7383 return did; 7384 7385 spin_lock_irq(shost->host_lock); 7386 if (vport->fc_rscn_flush) { 7387 /* Another thread is walking fc_rscn_id_list on this vport */ 7388 spin_unlock_irq(shost->host_lock); 7389 return 0; 7390 } 7391 /* Indicate we are walking fc_rscn_id_list on this vport */ 7392 vport->fc_rscn_flush = 1; 7393 spin_unlock_irq(shost->host_lock); 7394 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7395 lp = vport->fc_rscn_id_list[i]->virt; 7396 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7397 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7398 while (payload_len) { 7399 rscn_did.un.word = be32_to_cpu(*lp++); 7400 payload_len -= sizeof(uint32_t); 7401 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7402 case RSCN_ADDRESS_FORMAT_PORT: 7403 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7404 && (ns_did.un.b.area == rscn_did.un.b.area) 7405 && (ns_did.un.b.id == rscn_did.un.b.id)) 7406 goto return_did_out; 7407 break; 7408 case RSCN_ADDRESS_FORMAT_AREA: 7409 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7410 && (ns_did.un.b.area == rscn_did.un.b.area)) 7411 goto return_did_out; 7412 break; 7413 case RSCN_ADDRESS_FORMAT_DOMAIN: 7414 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7415 goto return_did_out; 7416 break; 7417 case RSCN_ADDRESS_FORMAT_FABRIC: 7418 goto return_did_out; 7419 } 7420 } 7421 } 7422 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7423 vport->fc_rscn_flush = 0; 7424 return 0; 7425 return_did_out: 7426 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7427 vport->fc_rscn_flush = 0; 7428 return did; 7429 } 7430 7431 /** 7432 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7433 * @vport: pointer to a host virtual N_Port data structure. 7434 * 7435 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7436 * state machine for a @vport's nodes that are with pending RSCN (Registration 7437 * State Change Notification). 7438 * 7439 * Return code 7440 * 0 - Successful (currently alway return 0) 7441 **/ 7442 static int 7443 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7444 { 7445 struct lpfc_nodelist *ndlp = NULL; 7446 7447 /* Move all affected nodes by pending RSCNs to NPR state. */ 7448 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 7449 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7450 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7451 continue; 7452 7453 /* NVME Target mode does not do RSCN Recovery. */ 7454 if (vport->phba->nvmet_support) 7455 continue; 7456 7457 /* If we are in the process of doing discovery on this 7458 * NPort, let it continue on its own. 7459 */ 7460 switch (ndlp->nlp_state) { 7461 case NLP_STE_PLOGI_ISSUE: 7462 case NLP_STE_ADISC_ISSUE: 7463 case NLP_STE_REG_LOGIN_ISSUE: 7464 case NLP_STE_PRLI_ISSUE: 7465 case NLP_STE_LOGO_ISSUE: 7466 continue; 7467 } 7468 7469 lpfc_disc_state_machine(vport, ndlp, NULL, 7470 NLP_EVT_DEVICE_RECOVERY); 7471 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7472 } 7473 return 0; 7474 } 7475 7476 /** 7477 * lpfc_send_rscn_event - Send an RSCN event to management application 7478 * @vport: pointer to a host virtual N_Port data structure. 7479 * @cmdiocb: pointer to lpfc command iocb data structure. 7480 * 7481 * lpfc_send_rscn_event sends an RSCN netlink event to management 7482 * applications. 7483 */ 7484 static void 7485 lpfc_send_rscn_event(struct lpfc_vport *vport, 7486 struct lpfc_iocbq *cmdiocb) 7487 { 7488 struct lpfc_dmabuf *pcmd; 7489 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7490 uint32_t *payload_ptr; 7491 uint32_t payload_len; 7492 struct lpfc_rscn_event_header *rscn_event_data; 7493 7494 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7495 payload_ptr = (uint32_t *) pcmd->virt; 7496 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7497 7498 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7499 payload_len, GFP_KERNEL); 7500 if (!rscn_event_data) { 7501 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7502 "0147 Failed to allocate memory for RSCN event\n"); 7503 return; 7504 } 7505 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7506 rscn_event_data->payload_length = payload_len; 7507 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7508 payload_len); 7509 7510 fc_host_post_vendor_event(shost, 7511 fc_get_event_number(), 7512 sizeof(struct lpfc_rscn_event_header) + payload_len, 7513 (char *)rscn_event_data, 7514 LPFC_NL_VENDOR_ID); 7515 7516 kfree(rscn_event_data); 7517 } 7518 7519 /** 7520 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7521 * @vport: pointer to a host virtual N_Port data structure. 7522 * @cmdiocb: pointer to lpfc command iocb data structure. 7523 * @ndlp: pointer to a node-list data structure. 7524 * 7525 * This routine processes an unsolicited RSCN (Registration State Change 7526 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7527 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7528 * discover state machine is about to begin discovery, it just accepts the 7529 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7530 * contains N_Port IDs for other vports on this HBA, it just accepts the 7531 * RSCN and ignore processing it. If the state machine is in the recovery 7532 * state, the fc_rscn_id_list of this @vport is walked and the 7533 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7534 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7535 * routine is invoked to handle the RSCN event. 7536 * 7537 * Return code 7538 * 0 - Just sent the acc response 7539 * 1 - Sent the acc response and waited for name server completion 7540 **/ 7541 static int 7542 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7543 struct lpfc_nodelist *ndlp) 7544 { 7545 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7546 struct lpfc_hba *phba = vport->phba; 7547 struct lpfc_dmabuf *pcmd; 7548 uint32_t *lp, *datap; 7549 uint32_t payload_len, length, nportid, *cmd; 7550 int rscn_cnt; 7551 int rscn_id = 0, hba_id = 0; 7552 int i, tmo; 7553 7554 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7555 lp = (uint32_t *) pcmd->virt; 7556 7557 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7558 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7559 /* RSCN received */ 7560 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7561 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7562 vport->fc_flag, payload_len, *lp, 7563 vport->fc_rscn_id_cnt); 7564 7565 /* Send an RSCN event to the management application */ 7566 lpfc_send_rscn_event(vport, cmdiocb); 7567 7568 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7569 fc_host_post_event(shost, fc_get_event_number(), 7570 FCH_EVT_RSCN, lp[i]); 7571 7572 /* Check if RSCN is coming from a direct-connected remote NPort */ 7573 if (vport->fc_flag & FC_PT2PT) { 7574 /* If so, just ACC it, no other action needed for now */ 7575 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7576 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7577 *lp, vport->fc_flag, payload_len); 7578 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7579 7580 /* Check to see if we need to NVME rescan this target 7581 * remoteport. 7582 */ 7583 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7584 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7585 lpfc_nvme_rescan_port(vport, ndlp); 7586 return 0; 7587 } 7588 7589 /* If we are about to begin discovery, just ACC the RSCN. 7590 * Discovery processing will satisfy it. 7591 */ 7592 if (vport->port_state <= LPFC_NS_QRY) { 7593 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7594 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7595 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7596 7597 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7598 return 0; 7599 } 7600 7601 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7602 * just ACC and ignore it. 7603 */ 7604 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7605 !(vport->cfg_peer_port_login)) { 7606 i = payload_len; 7607 datap = lp; 7608 while (i > 0) { 7609 nportid = *datap++; 7610 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7611 i -= sizeof(uint32_t); 7612 rscn_id++; 7613 if (lpfc_find_vport_by_did(phba, nportid)) 7614 hba_id++; 7615 } 7616 if (rscn_id == hba_id) { 7617 /* ALL NPortIDs in RSCN are on HBA */ 7618 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7619 "0219 Ignore RSCN " 7620 "Data: x%x x%x x%x x%x\n", 7621 vport->fc_flag, payload_len, 7622 *lp, vport->fc_rscn_id_cnt); 7623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7624 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7625 ndlp->nlp_DID, vport->port_state, 7626 ndlp->nlp_flag); 7627 7628 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7629 ndlp, NULL); 7630 return 0; 7631 } 7632 } 7633 7634 spin_lock_irq(shost->host_lock); 7635 if (vport->fc_rscn_flush) { 7636 /* Another thread is walking fc_rscn_id_list on this vport */ 7637 vport->fc_flag |= FC_RSCN_DISCOVERY; 7638 spin_unlock_irq(shost->host_lock); 7639 /* Send back ACC */ 7640 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7641 return 0; 7642 } 7643 /* Indicate we are walking fc_rscn_id_list on this vport */ 7644 vport->fc_rscn_flush = 1; 7645 spin_unlock_irq(shost->host_lock); 7646 /* Get the array count after successfully have the token */ 7647 rscn_cnt = vport->fc_rscn_id_cnt; 7648 /* If we are already processing an RSCN, save the received 7649 * RSCN payload buffer, cmdiocb->context2 to process later. 7650 */ 7651 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7652 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7653 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7654 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7655 7656 spin_lock_irq(shost->host_lock); 7657 vport->fc_flag |= FC_RSCN_DEFERRED; 7658 7659 /* Restart disctmo if its already running */ 7660 if (vport->fc_flag & FC_DISC_TMO) { 7661 tmo = ((phba->fc_ratov * 3) + 3); 7662 mod_timer(&vport->fc_disctmo, 7663 jiffies + msecs_to_jiffies(1000 * tmo)); 7664 } 7665 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7666 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7667 vport->fc_flag |= FC_RSCN_MODE; 7668 spin_unlock_irq(shost->host_lock); 7669 if (rscn_cnt) { 7670 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7671 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7672 } 7673 if ((rscn_cnt) && 7674 (payload_len + length <= LPFC_BPL_SIZE)) { 7675 *cmd &= ELS_CMD_MASK; 7676 *cmd |= cpu_to_be32(payload_len + length); 7677 memcpy(((uint8_t *)cmd) + length, lp, 7678 payload_len); 7679 } else { 7680 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7681 vport->fc_rscn_id_cnt++; 7682 /* If we zero, cmdiocb->context2, the calling 7683 * routine will not try to free it. 7684 */ 7685 cmdiocb->context2 = NULL; 7686 } 7687 /* Deferred RSCN */ 7688 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7689 "0235 Deferred RSCN " 7690 "Data: x%x x%x x%x\n", 7691 vport->fc_rscn_id_cnt, vport->fc_flag, 7692 vport->port_state); 7693 } else { 7694 vport->fc_flag |= FC_RSCN_DISCOVERY; 7695 spin_unlock_irq(shost->host_lock); 7696 /* ReDiscovery RSCN */ 7697 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7698 "0234 ReDiscovery RSCN " 7699 "Data: x%x x%x x%x\n", 7700 vport->fc_rscn_id_cnt, vport->fc_flag, 7701 vport->port_state); 7702 } 7703 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7704 vport->fc_rscn_flush = 0; 7705 /* Send back ACC */ 7706 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7707 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7708 lpfc_rscn_recovery_check(vport); 7709 return 0; 7710 } 7711 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7712 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7713 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7714 7715 spin_lock_irq(shost->host_lock); 7716 vport->fc_flag |= FC_RSCN_MODE; 7717 spin_unlock_irq(shost->host_lock); 7718 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7719 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7720 vport->fc_rscn_flush = 0; 7721 /* 7722 * If we zero, cmdiocb->context2, the calling routine will 7723 * not try to free it. 7724 */ 7725 cmdiocb->context2 = NULL; 7726 lpfc_set_disctmo(vport); 7727 /* Send back ACC */ 7728 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7729 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7730 lpfc_rscn_recovery_check(vport); 7731 return lpfc_els_handle_rscn(vport); 7732 } 7733 7734 /** 7735 * lpfc_els_handle_rscn - Handle rscn for a vport 7736 * @vport: pointer to a host virtual N_Port data structure. 7737 * 7738 * This routine handles the Registration State Configuration Notification 7739 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7740 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7741 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7742 * NameServer shall be issued. If CT command to the NameServer fails to be 7743 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7744 * RSCN activities with the @vport. 7745 * 7746 * Return code 7747 * 0 - Cleaned up rscn on the @vport 7748 * 1 - Wait for plogi to name server before proceed 7749 **/ 7750 int 7751 lpfc_els_handle_rscn(struct lpfc_vport *vport) 7752 { 7753 struct lpfc_nodelist *ndlp; 7754 struct lpfc_hba *phba = vport->phba; 7755 7756 /* Ignore RSCN if the port is being torn down. */ 7757 if (vport->load_flag & FC_UNLOADING) { 7758 lpfc_els_flush_rscn(vport); 7759 return 0; 7760 } 7761 7762 /* Start timer for RSCN processing */ 7763 lpfc_set_disctmo(vport); 7764 7765 /* RSCN processed */ 7766 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7767 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 7768 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 7769 vport->port_state, vport->num_disc_nodes, 7770 vport->gidft_inp); 7771 7772 /* To process RSCN, first compare RSCN data with NameServer */ 7773 vport->fc_ns_retry = 0; 7774 vport->num_disc_nodes = 0; 7775 7776 ndlp = lpfc_findnode_did(vport, NameServer_DID); 7777 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 7778 /* Good ndlp, issue CT Request to NameServer. Need to 7779 * know how many gidfts were issued. If none, then just 7780 * flush the RSCN. Otherwise, the outstanding requests 7781 * need to complete. 7782 */ 7783 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7784 if (lpfc_issue_gidft(vport) > 0) 7785 return 1; 7786 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7787 if (lpfc_issue_gidpt(vport) > 0) 7788 return 1; 7789 } else { 7790 return 1; 7791 } 7792 } else { 7793 /* Nameserver login in question. Revalidate. */ 7794 if (ndlp) { 7795 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7796 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7797 } else { 7798 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7799 if (!ndlp) { 7800 lpfc_els_flush_rscn(vport); 7801 return 0; 7802 } 7803 ndlp->nlp_prev_state = ndlp->nlp_state; 7804 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7805 } 7806 ndlp->nlp_type |= NLP_FABRIC; 7807 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7808 /* Wait for NameServer login cmpl before we can 7809 * continue 7810 */ 7811 return 1; 7812 } 7813 7814 lpfc_els_flush_rscn(vport); 7815 return 0; 7816 } 7817 7818 /** 7819 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7820 * @vport: pointer to a host virtual N_Port data structure. 7821 * @cmdiocb: pointer to lpfc command iocb data structure. 7822 * @ndlp: pointer to a node-list data structure. 7823 * 7824 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7825 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7826 * point topology. As an unsolicited FLOGI should not be received in a loop 7827 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7828 * lpfc_check_sparm() routine is invoked to check the parameters in the 7829 * unsolicited FLOGI. If parameters validation failed, the routine 7830 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7831 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7832 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7833 * will initiate PLOGI. The higher lexicographical value party shall has 7834 * higher priority (as the winning port) and will initiate PLOGI and 7835 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7836 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7837 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7838 * 7839 * Return code 7840 * 0 - Successfully processed the unsolicited flogi 7841 * 1 - Failed to process the unsolicited flogi 7842 **/ 7843 static int 7844 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7845 struct lpfc_nodelist *ndlp) 7846 { 7847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7848 struct lpfc_hba *phba = vport->phba; 7849 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7850 uint32_t *lp = (uint32_t *) pcmd->virt; 7851 IOCB_t *icmd = &cmdiocb->iocb; 7852 struct serv_parm *sp; 7853 LPFC_MBOXQ_t *mbox; 7854 uint32_t cmd, did; 7855 int rc; 7856 uint32_t fc_flag = 0; 7857 uint32_t port_state = 0; 7858 7859 cmd = *lp++; 7860 sp = (struct serv_parm *) lp; 7861 7862 /* FLOGI received */ 7863 7864 lpfc_set_disctmo(vport); 7865 7866 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7867 /* We should never receive a FLOGI in loop mode, ignore it */ 7868 did = icmd->un.elsreq64.remoteID; 7869 7870 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7871 Loop Mode */ 7872 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7873 "0113 An FLOGI ELS command x%x was " 7874 "received from DID x%x in Loop Mode\n", 7875 cmd, did); 7876 return 1; 7877 } 7878 7879 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7880 7881 /* 7882 * If our portname is greater than the remote portname, 7883 * then we initiate Nport login. 7884 */ 7885 7886 rc = memcmp(&vport->fc_portname, &sp->portName, 7887 sizeof(struct lpfc_name)); 7888 7889 if (!rc) { 7890 if (phba->sli_rev < LPFC_SLI_REV4) { 7891 mbox = mempool_alloc(phba->mbox_mem_pool, 7892 GFP_KERNEL); 7893 if (!mbox) 7894 return 1; 7895 lpfc_linkdown(phba); 7896 lpfc_init_link(phba, mbox, 7897 phba->cfg_topology, 7898 phba->cfg_link_speed); 7899 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7900 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7901 mbox->vport = vport; 7902 rc = lpfc_sli_issue_mbox(phba, mbox, 7903 MBX_NOWAIT); 7904 lpfc_set_loopback_flag(phba); 7905 if (rc == MBX_NOT_FINISHED) 7906 mempool_free(mbox, phba->mbox_mem_pool); 7907 return 1; 7908 } 7909 7910 /* abort the flogi coming back to ourselves 7911 * due to external loopback on the port. 7912 */ 7913 lpfc_els_abort_flogi(phba); 7914 return 0; 7915 7916 } else if (rc > 0) { /* greater than */ 7917 spin_lock_irq(shost->host_lock); 7918 vport->fc_flag |= FC_PT2PT_PLOGI; 7919 spin_unlock_irq(shost->host_lock); 7920 7921 /* If we have the high WWPN we can assign our own 7922 * myDID; otherwise, we have to WAIT for a PLOGI 7923 * from the remote NPort to find out what it 7924 * will be. 7925 */ 7926 vport->fc_myDID = PT2PT_LocalID; 7927 } else { 7928 vport->fc_myDID = PT2PT_RemoteID; 7929 } 7930 7931 /* 7932 * The vport state should go to LPFC_FLOGI only 7933 * AFTER we issue a FLOGI, not receive one. 7934 */ 7935 spin_lock_irq(shost->host_lock); 7936 fc_flag = vport->fc_flag; 7937 port_state = vport->port_state; 7938 vport->fc_flag |= FC_PT2PT; 7939 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7940 7941 /* Acking an unsol FLOGI. Count 1 for link bounce 7942 * work-around. 7943 */ 7944 vport->rcv_flogi_cnt++; 7945 spin_unlock_irq(shost->host_lock); 7946 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7947 "3311 Rcv Flogi PS x%x new PS x%x " 7948 "fc_flag x%x new fc_flag x%x\n", 7949 port_state, vport->port_state, 7950 fc_flag, vport->fc_flag); 7951 7952 /* 7953 * We temporarily set fc_myDID to make it look like we are 7954 * a Fabric. This is done just so we end up with the right 7955 * did / sid on the FLOGI ACC rsp. 7956 */ 7957 did = vport->fc_myDID; 7958 vport->fc_myDID = Fabric_DID; 7959 7960 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7961 7962 /* Defer ACC response until AFTER we issue a FLOGI */ 7963 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7964 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7965 phba->defer_flogi_acc_ox_id = 7966 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7967 7968 vport->fc_myDID = did; 7969 7970 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7971 "3344 Deferring FLOGI ACC: rx_id: x%x," 7972 " ox_id: x%x, hba_flag x%x\n", 7973 phba->defer_flogi_acc_rx_id, 7974 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7975 7976 phba->defer_flogi_acc_flag = true; 7977 7978 return 0; 7979 } 7980 7981 /* Send back ACC */ 7982 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7983 7984 /* Now lets put fc_myDID back to what its supposed to be */ 7985 vport->fc_myDID = did; 7986 7987 return 0; 7988 } 7989 7990 /** 7991 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7992 * @vport: pointer to a host virtual N_Port data structure. 7993 * @cmdiocb: pointer to lpfc command iocb data structure. 7994 * @ndlp: pointer to a node-list data structure. 7995 * 7996 * This routine processes Request Node Identification Data (RNID) IOCB 7997 * received as an ELS unsolicited event. Only when the RNID specified format 7998 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7999 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8000 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8001 * rejected by invoking the lpfc_els_rsp_reject() routine. 8002 * 8003 * Return code 8004 * 0 - Successfully processed rnid iocb (currently always return 0) 8005 **/ 8006 static int 8007 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8008 struct lpfc_nodelist *ndlp) 8009 { 8010 struct lpfc_dmabuf *pcmd; 8011 uint32_t *lp; 8012 RNID *rn; 8013 struct ls_rjt stat; 8014 8015 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8016 lp = (uint32_t *) pcmd->virt; 8017 8018 lp++; 8019 rn = (RNID *) lp; 8020 8021 /* RNID received */ 8022 8023 switch (rn->Format) { 8024 case 0: 8025 case RNID_TOPOLOGY_DISC: 8026 /* Send back ACC */ 8027 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8028 break; 8029 default: 8030 /* Reject this request because format not supported */ 8031 stat.un.b.lsRjtRsvd0 = 0; 8032 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8033 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8034 stat.un.b.vendorUnique = 0; 8035 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8036 NULL); 8037 } 8038 return 0; 8039 } 8040 8041 /** 8042 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8043 * @vport: pointer to a host virtual N_Port data structure. 8044 * @cmdiocb: pointer to lpfc command iocb data structure. 8045 * @ndlp: pointer to a node-list data structure. 8046 * 8047 * Return code 8048 * 0 - Successfully processed echo iocb (currently always return 0) 8049 **/ 8050 static int 8051 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8052 struct lpfc_nodelist *ndlp) 8053 { 8054 uint8_t *pcmd; 8055 8056 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 8057 8058 /* skip over first word of echo command to find echo data */ 8059 pcmd += sizeof(uint32_t); 8060 8061 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8062 return 0; 8063 } 8064 8065 /** 8066 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8067 * @vport: pointer to a host virtual N_Port data structure. 8068 * @cmdiocb: pointer to lpfc command iocb data structure. 8069 * @ndlp: pointer to a node-list data structure. 8070 * 8071 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8072 * received as an ELS unsolicited event. Currently, this function just invokes 8073 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8074 * 8075 * Return code 8076 * 0 - Successfully processed lirr iocb (currently always return 0) 8077 **/ 8078 static int 8079 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8080 struct lpfc_nodelist *ndlp) 8081 { 8082 struct ls_rjt stat; 8083 8084 /* For now, unconditionally reject this command */ 8085 stat.un.b.lsRjtRsvd0 = 0; 8086 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8087 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8088 stat.un.b.vendorUnique = 0; 8089 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8090 return 0; 8091 } 8092 8093 /** 8094 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8095 * @vport: pointer to a host virtual N_Port data structure. 8096 * @cmdiocb: pointer to lpfc command iocb data structure. 8097 * @ndlp: pointer to a node-list data structure. 8098 * 8099 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8100 * received as an ELS unsolicited event. A request to RRQ shall only 8101 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8102 * Nx_Port N_Port_ID of the target Exchange is the same as the 8103 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8104 * not accepted, an LS_RJT with reason code "Unable to perform 8105 * command request" and reason code explanation "Invalid Originator 8106 * S_ID" shall be returned. For now, we just unconditionally accept 8107 * RRQ from the target. 8108 **/ 8109 static void 8110 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8111 struct lpfc_nodelist *ndlp) 8112 { 8113 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8114 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8115 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8116 } 8117 8118 /** 8119 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8120 * @phba: pointer to lpfc hba data structure. 8121 * @pmb: pointer to the driver internal queue element for mailbox command. 8122 * 8123 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8124 * mailbox command. This callback function is to actually send the Accept 8125 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8126 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8127 * mailbox command, constructs the RLS response with the link statistics 8128 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8129 * response to the RLS. 8130 * 8131 * Note that the ndlp reference count will be incremented by 1 for holding the 8132 * ndlp and the reference to ndlp will be stored into the context1 field of 8133 * the IOCB for the completion callback function to the RLS Accept Response 8134 * ELS IOCB command. 8135 * 8136 **/ 8137 static void 8138 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8139 { 8140 int rc = 0; 8141 MAILBOX_t *mb; 8142 IOCB_t *icmd; 8143 struct RLS_RSP *rls_rsp; 8144 uint8_t *pcmd; 8145 struct lpfc_iocbq *elsiocb; 8146 struct lpfc_nodelist *ndlp; 8147 uint16_t oxid; 8148 uint16_t rxid; 8149 uint32_t cmdsize; 8150 8151 mb = &pmb->u.mb; 8152 8153 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 8154 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8155 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8156 pmb->ctx_buf = NULL; 8157 pmb->ctx_ndlp = NULL; 8158 8159 if (mb->mbxStatus) { 8160 mempool_free(pmb, phba->mbox_mem_pool); 8161 return; 8162 } 8163 8164 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8165 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8166 lpfc_max_els_tries, ndlp, 8167 ndlp->nlp_DID, ELS_CMD_ACC); 8168 8169 /* Decrement the ndlp reference count from previous mbox command */ 8170 lpfc_nlp_put(ndlp); 8171 8172 if (!elsiocb) { 8173 mempool_free(pmb, phba->mbox_mem_pool); 8174 return; 8175 } 8176 8177 icmd = &elsiocb->iocb; 8178 icmd->ulpContext = rxid; 8179 icmd->unsli3.rcvsli3.ox_id = oxid; 8180 8181 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8182 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8183 pcmd += sizeof(uint32_t); /* Skip past command */ 8184 rls_rsp = (struct RLS_RSP *)pcmd; 8185 8186 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8187 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8188 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8189 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8190 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8191 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8192 mempool_free(pmb, phba->mbox_mem_pool); 8193 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8194 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8195 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8196 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8197 elsiocb->iotag, elsiocb->iocb.ulpContext, 8198 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8199 ndlp->nlp_rpi); 8200 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8201 phba->fc_stat.elsXmitACC++; 8202 elsiocb->context1 = lpfc_nlp_get(ndlp); 8203 if (!elsiocb->context1) { 8204 lpfc_els_free_iocb(phba, elsiocb); 8205 return; 8206 } 8207 8208 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8209 if (rc == IOCB_ERROR) { 8210 lpfc_els_free_iocb(phba, elsiocb); 8211 lpfc_nlp_put(ndlp); 8212 } 8213 return; 8214 } 8215 8216 /** 8217 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8218 * @vport: pointer to a host virtual N_Port data structure. 8219 * @cmdiocb: pointer to lpfc command iocb data structure. 8220 * @ndlp: pointer to a node-list data structure. 8221 * 8222 * This routine processes Read Link Status (RLS) IOCB received as an 8223 * ELS unsolicited event. It first checks the remote port state. If the 8224 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8225 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8226 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8227 * for reading the HBA link statistics. It is for the callback function, 8228 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8229 * to actually sending out RPL Accept (ACC) response. 8230 * 8231 * Return codes 8232 * 0 - Successfully processed rls iocb (currently always return 0) 8233 **/ 8234 static int 8235 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8236 struct lpfc_nodelist *ndlp) 8237 { 8238 struct lpfc_hba *phba = vport->phba; 8239 LPFC_MBOXQ_t *mbox; 8240 struct ls_rjt stat; 8241 8242 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8243 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8244 /* reject the unsolicited RLS request and done with it */ 8245 goto reject_out; 8246 8247 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8248 if (mbox) { 8249 lpfc_read_lnk_stat(phba, mbox); 8250 mbox->ctx_buf = (void *)((unsigned long) 8251 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 8252 cmdiocb->iocb.ulpContext)); /* rx_id */ 8253 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8254 if (!mbox->ctx_ndlp) 8255 goto node_err; 8256 mbox->vport = vport; 8257 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8258 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8259 != MBX_NOT_FINISHED) 8260 /* Mbox completion will send ELS Response */ 8261 return 0; 8262 /* Decrement reference count used for the failed mbox 8263 * command. 8264 */ 8265 lpfc_nlp_put(ndlp); 8266 node_err: 8267 mempool_free(mbox, phba->mbox_mem_pool); 8268 } 8269 reject_out: 8270 /* issue rejection response */ 8271 stat.un.b.lsRjtRsvd0 = 0; 8272 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8273 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8274 stat.un.b.vendorUnique = 0; 8275 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8276 return 0; 8277 } 8278 8279 /** 8280 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8281 * @vport: pointer to a host virtual N_Port data structure. 8282 * @cmdiocb: pointer to lpfc command iocb data structure. 8283 * @ndlp: pointer to a node-list data structure. 8284 * 8285 * This routine processes Read Timout Value (RTV) IOCB received as an 8286 * ELS unsolicited event. It first checks the remote port state. If the 8287 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8288 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8289 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8290 * Value (RTV) unsolicited IOCB event. 8291 * 8292 * Note that the ndlp reference count will be incremented by 1 for holding the 8293 * ndlp and the reference to ndlp will be stored into the context1 field of 8294 * the IOCB for the completion callback function to the RTV Accept Response 8295 * ELS IOCB command. 8296 * 8297 * Return codes 8298 * 0 - Successfully processed rtv iocb (currently always return 0) 8299 **/ 8300 static int 8301 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8302 struct lpfc_nodelist *ndlp) 8303 { 8304 int rc = 0; 8305 struct lpfc_hba *phba = vport->phba; 8306 struct ls_rjt stat; 8307 struct RTV_RSP *rtv_rsp; 8308 uint8_t *pcmd; 8309 struct lpfc_iocbq *elsiocb; 8310 uint32_t cmdsize; 8311 8312 8313 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8314 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8315 /* reject the unsolicited RTV request and done with it */ 8316 goto reject_out; 8317 8318 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8319 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8320 lpfc_max_els_tries, ndlp, 8321 ndlp->nlp_DID, ELS_CMD_ACC); 8322 8323 if (!elsiocb) 8324 return 1; 8325 8326 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8327 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8328 pcmd += sizeof(uint32_t); /* Skip past command */ 8329 8330 /* use the command's xri in the response */ 8331 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 8332 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 8333 8334 rtv_rsp = (struct RTV_RSP *)pcmd; 8335 8336 /* populate RTV payload */ 8337 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8338 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8339 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8340 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8341 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8342 8343 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8344 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8345 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8346 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8347 "Data: x%x x%x x%x\n", 8348 elsiocb->iotag, elsiocb->iocb.ulpContext, 8349 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8350 ndlp->nlp_rpi, 8351 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8352 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8353 phba->fc_stat.elsXmitACC++; 8354 elsiocb->context1 = lpfc_nlp_get(ndlp); 8355 if (!elsiocb->context1) { 8356 lpfc_els_free_iocb(phba, elsiocb); 8357 return 0; 8358 } 8359 8360 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8361 if (rc == IOCB_ERROR) { 8362 lpfc_els_free_iocb(phba, elsiocb); 8363 lpfc_nlp_put(ndlp); 8364 } 8365 return 0; 8366 8367 reject_out: 8368 /* issue rejection response */ 8369 stat.un.b.lsRjtRsvd0 = 0; 8370 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8371 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8372 stat.un.b.vendorUnique = 0; 8373 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8374 return 0; 8375 } 8376 8377 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8378 * @vport: pointer to a host virtual N_Port data structure. 8379 * @ndlp: pointer to a node-list data structure. 8380 * @did: DID of the target. 8381 * @rrq: Pointer to the rrq struct. 8382 * 8383 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8384 * Successful the the completion handler will clear the RRQ. 8385 * 8386 * Return codes 8387 * 0 - Successfully sent rrq els iocb. 8388 * 1 - Failed to send rrq els iocb. 8389 **/ 8390 static int 8391 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8392 uint32_t did, struct lpfc_node_rrq *rrq) 8393 { 8394 struct lpfc_hba *phba = vport->phba; 8395 struct RRQ *els_rrq; 8396 struct lpfc_iocbq *elsiocb; 8397 uint8_t *pcmd; 8398 uint16_t cmdsize; 8399 int ret; 8400 8401 if (!ndlp) 8402 return 1; 8403 8404 /* If ndlp is not NULL, we will bump the reference count on it */ 8405 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8406 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8407 ELS_CMD_RRQ); 8408 if (!elsiocb) 8409 return 1; 8410 8411 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8412 8413 /* For RRQ request, remainder of payload is Exchange IDs */ 8414 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8415 pcmd += sizeof(uint32_t); 8416 els_rrq = (struct RRQ *) pcmd; 8417 8418 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8419 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8420 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8421 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8422 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8423 8424 8425 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8426 "Issue RRQ: did:x%x", 8427 did, rrq->xritag, rrq->rxid); 8428 elsiocb->context_un.rrq = rrq; 8429 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 8430 8431 lpfc_nlp_get(ndlp); 8432 elsiocb->context1 = ndlp; 8433 8434 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8435 if (ret == IOCB_ERROR) 8436 goto io_err; 8437 return 0; 8438 8439 io_err: 8440 lpfc_els_free_iocb(phba, elsiocb); 8441 lpfc_nlp_put(ndlp); 8442 return 1; 8443 } 8444 8445 /** 8446 * lpfc_send_rrq - Sends ELS RRQ if needed. 8447 * @phba: pointer to lpfc hba data structure. 8448 * @rrq: pointer to the active rrq. 8449 * 8450 * This routine will call the lpfc_issue_els_rrq if the rrq is 8451 * still active for the xri. If this function returns a failure then 8452 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8453 * 8454 * Returns 0 Success. 8455 * 1 Failure. 8456 **/ 8457 int 8458 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8459 { 8460 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8461 rrq->nlp_DID); 8462 if (!ndlp) 8463 return 1; 8464 8465 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8466 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8467 rrq->nlp_DID, rrq); 8468 else 8469 return 1; 8470 } 8471 8472 /** 8473 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8474 * @vport: pointer to a host virtual N_Port data structure. 8475 * @cmdsize: size of the ELS command. 8476 * @oldiocb: pointer to the original lpfc command iocb data structure. 8477 * @ndlp: pointer to a node-list data structure. 8478 * 8479 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8480 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8481 * 8482 * Note that the ndlp reference count will be incremented by 1 for holding the 8483 * ndlp and the reference to ndlp will be stored into the context1 field of 8484 * the IOCB for the completion callback function to the RPL Accept Response 8485 * ELS command. 8486 * 8487 * Return code 8488 * 0 - Successfully issued ACC RPL ELS command 8489 * 1 - Failed to issue ACC RPL ELS command 8490 **/ 8491 static int 8492 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8493 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8494 { 8495 int rc = 0; 8496 struct lpfc_hba *phba = vport->phba; 8497 IOCB_t *icmd, *oldcmd; 8498 RPL_RSP rpl_rsp; 8499 struct lpfc_iocbq *elsiocb; 8500 uint8_t *pcmd; 8501 8502 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8503 ndlp->nlp_DID, ELS_CMD_ACC); 8504 8505 if (!elsiocb) 8506 return 1; 8507 8508 icmd = &elsiocb->iocb; 8509 oldcmd = &oldiocb->iocb; 8510 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 8511 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 8512 8513 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8514 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8515 pcmd += sizeof(uint16_t); 8516 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8517 pcmd += sizeof(uint16_t); 8518 8519 /* Setup the RPL ACC payload */ 8520 rpl_rsp.listLen = be32_to_cpu(1); 8521 rpl_rsp.index = 0; 8522 rpl_rsp.port_num_blk.portNum = 0; 8523 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8524 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8525 sizeof(struct lpfc_name)); 8526 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8527 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8528 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8529 "0120 Xmit ELS RPL ACC response tag x%x " 8530 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8531 "rpi x%x\n", 8532 elsiocb->iotag, elsiocb->iocb.ulpContext, 8533 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8534 ndlp->nlp_rpi); 8535 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8536 phba->fc_stat.elsXmitACC++; 8537 elsiocb->context1 = lpfc_nlp_get(ndlp); 8538 if (!elsiocb->context1) { 8539 lpfc_els_free_iocb(phba, elsiocb); 8540 return 1; 8541 } 8542 8543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8544 if (rc == IOCB_ERROR) { 8545 lpfc_els_free_iocb(phba, elsiocb); 8546 lpfc_nlp_put(ndlp); 8547 return 1; 8548 } 8549 8550 return 0; 8551 } 8552 8553 /** 8554 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8555 * @vport: pointer to a host virtual N_Port data structure. 8556 * @cmdiocb: pointer to lpfc command iocb data structure. 8557 * @ndlp: pointer to a node-list data structure. 8558 * 8559 * This routine processes Read Port List (RPL) IOCB received as an ELS 8560 * unsolicited event. It first checks the remote port state. If the remote 8561 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8562 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8563 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8564 * to accept the RPL. 8565 * 8566 * Return code 8567 * 0 - Successfully processed rpl iocb (currently always return 0) 8568 **/ 8569 static int 8570 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8571 struct lpfc_nodelist *ndlp) 8572 { 8573 struct lpfc_dmabuf *pcmd; 8574 uint32_t *lp; 8575 uint32_t maxsize; 8576 uint16_t cmdsize; 8577 RPL *rpl; 8578 struct ls_rjt stat; 8579 8580 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8581 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8582 /* issue rejection response */ 8583 stat.un.b.lsRjtRsvd0 = 0; 8584 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8585 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8586 stat.un.b.vendorUnique = 0; 8587 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8588 NULL); 8589 /* rejected the unsolicited RPL request and done with it */ 8590 return 0; 8591 } 8592 8593 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8594 lp = (uint32_t *) pcmd->virt; 8595 rpl = (RPL *) (lp + 1); 8596 maxsize = be32_to_cpu(rpl->maxsize); 8597 8598 /* We support only one port */ 8599 if ((rpl->index == 0) && 8600 ((maxsize == 0) || 8601 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8602 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8603 } else { 8604 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8605 } 8606 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8607 8608 return 0; 8609 } 8610 8611 /** 8612 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8613 * @vport: pointer to a virtual N_Port data structure. 8614 * @cmdiocb: pointer to lpfc command iocb data structure. 8615 * @ndlp: pointer to a node-list data structure. 8616 * 8617 * This routine processes Fibre Channel Address Resolution Protocol 8618 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8619 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8620 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8621 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8622 * remote PortName is compared against the FC PortName stored in the @vport 8623 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8624 * compared against the FC NodeName stored in the @vport data structure. 8625 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8626 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8627 * invoked to send out FARP Response to the remote node. Before sending the 8628 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8629 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8630 * routine is invoked to log into the remote port first. 8631 * 8632 * Return code 8633 * 0 - Either the FARP Match Mode not supported or successfully processed 8634 **/ 8635 static int 8636 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8637 struct lpfc_nodelist *ndlp) 8638 { 8639 struct lpfc_dmabuf *pcmd; 8640 uint32_t *lp; 8641 IOCB_t *icmd; 8642 FARP *fp; 8643 uint32_t cnt, did; 8644 8645 icmd = &cmdiocb->iocb; 8646 did = icmd->un.elsreq64.remoteID; 8647 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8648 lp = (uint32_t *) pcmd->virt; 8649 8650 lp++; 8651 fp = (FARP *) lp; 8652 /* FARP-REQ received from DID <did> */ 8653 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8654 "0601 FARP-REQ received from DID x%x\n", did); 8655 /* We will only support match on WWPN or WWNN */ 8656 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8657 return 0; 8658 } 8659 8660 cnt = 0; 8661 /* If this FARP command is searching for my portname */ 8662 if (fp->Mflags & FARP_MATCH_PORT) { 8663 if (memcmp(&fp->RportName, &vport->fc_portname, 8664 sizeof(struct lpfc_name)) == 0) 8665 cnt = 1; 8666 } 8667 8668 /* If this FARP command is searching for my nodename */ 8669 if (fp->Mflags & FARP_MATCH_NODE) { 8670 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8671 sizeof(struct lpfc_name)) == 0) 8672 cnt = 1; 8673 } 8674 8675 if (cnt) { 8676 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8677 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8678 /* Log back into the node before sending the FARP. */ 8679 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8680 ndlp->nlp_prev_state = ndlp->nlp_state; 8681 lpfc_nlp_set_state(vport, ndlp, 8682 NLP_STE_PLOGI_ISSUE); 8683 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8684 } 8685 8686 /* Send a FARP response to that node */ 8687 if (fp->Rflags & FARP_REQUEST_FARPR) 8688 lpfc_issue_els_farpr(vport, did, 0); 8689 } 8690 } 8691 return 0; 8692 } 8693 8694 /** 8695 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8696 * @vport: pointer to a host virtual N_Port data structure. 8697 * @cmdiocb: pointer to lpfc command iocb data structure. 8698 * @ndlp: pointer to a node-list data structure. 8699 * 8700 * This routine processes Fibre Channel Address Resolution Protocol 8701 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 8702 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 8703 * the FARP response request. 8704 * 8705 * Return code 8706 * 0 - Successfully processed FARPR IOCB (currently always return 0) 8707 **/ 8708 static int 8709 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8710 struct lpfc_nodelist *ndlp) 8711 { 8712 struct lpfc_dmabuf *pcmd; 8713 uint32_t *lp; 8714 IOCB_t *icmd; 8715 uint32_t did; 8716 8717 icmd = &cmdiocb->iocb; 8718 did = icmd->un.elsreq64.remoteID; 8719 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8720 lp = (uint32_t *) pcmd->virt; 8721 8722 lp++; 8723 /* FARP-RSP received from DID <did> */ 8724 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8725 "0600 FARP-RSP received from DID x%x\n", did); 8726 /* ACCEPT the Farp resp request */ 8727 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8728 8729 return 0; 8730 } 8731 8732 /** 8733 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 8734 * @vport: pointer to a host virtual N_Port data structure. 8735 * @cmdiocb: pointer to lpfc command iocb data structure. 8736 * @fan_ndlp: pointer to a node-list data structure. 8737 * 8738 * This routine processes a Fabric Address Notification (FAN) IOCB 8739 * command received as an ELS unsolicited event. The FAN ELS command will 8740 * only be processed on a physical port (i.e., the @vport represents the 8741 * physical port). The fabric NodeName and PortName from the FAN IOCB are 8742 * compared against those in the phba data structure. If any of those is 8743 * different, the lpfc_initial_flogi() routine is invoked to initialize 8744 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 8745 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 8746 * is invoked to register login to the fabric. 8747 * 8748 * Return code 8749 * 0 - Successfully processed fan iocb (currently always return 0). 8750 **/ 8751 static int 8752 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8753 struct lpfc_nodelist *fan_ndlp) 8754 { 8755 struct lpfc_hba *phba = vport->phba; 8756 uint32_t *lp; 8757 FAN *fp; 8758 8759 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 8760 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8761 fp = (FAN *) ++lp; 8762 /* FAN received; Fan does not have a reply sequence */ 8763 if ((vport == phba->pport) && 8764 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 8765 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 8766 sizeof(struct lpfc_name))) || 8767 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 8768 sizeof(struct lpfc_name)))) { 8769 /* This port has switched fabrics. FLOGI is required */ 8770 lpfc_issue_init_vfi(vport); 8771 } else { 8772 /* FAN verified - skip FLOGI */ 8773 vport->fc_myDID = vport->fc_prevDID; 8774 if (phba->sli_rev < LPFC_SLI_REV4) 8775 lpfc_issue_fabric_reglogin(vport); 8776 else { 8777 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8778 "3138 Need register VFI: (x%x/%x)\n", 8779 vport->fc_prevDID, vport->fc_myDID); 8780 lpfc_issue_reg_vfi(vport); 8781 } 8782 } 8783 } 8784 return 0; 8785 } 8786 8787 /** 8788 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 8789 * @vport: pointer to a host virtual N_Port data structure. 8790 * @cmdiocb: pointer to lpfc command iocb data structure. 8791 * @ndlp: pointer to a node-list data structure. 8792 * 8793 * Return code 8794 * 0 - Successfully processed echo iocb (currently always return 0) 8795 **/ 8796 static int 8797 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8798 struct lpfc_nodelist *ndlp) 8799 { 8800 struct lpfc_hba *phba = vport->phba; 8801 struct fc_els_edc *edc_req; 8802 struct fc_tlv_desc *tlv; 8803 uint8_t *payload; 8804 uint32_t *ptr, dtag; 8805 const char *dtag_nm; 8806 int desc_cnt = 0, bytes_remain; 8807 bool rcv_cap_desc = false; 8808 8809 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8810 8811 edc_req = (struct fc_els_edc *)payload; 8812 bytes_remain = be32_to_cpu(edc_req->desc_len); 8813 8814 ptr = (uint32_t *)payload; 8815 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 8816 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 8817 bytes_remain, be32_to_cpu(*ptr), 8818 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 8819 8820 /* No signal support unless there is a congestion descriptor */ 8821 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 8822 phba->cgn_sig_freq = 0; 8823 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 8824 8825 if (bytes_remain <= 0) 8826 goto out; 8827 8828 tlv = edc_req->desc; 8829 8830 /* 8831 * cycle through EDC diagnostic descriptors to find the 8832 * congestion signaling capability descriptor 8833 */ 8834 while (bytes_remain && !rcv_cap_desc) { 8835 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 8836 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8837 "6464 Truncated TLV hdr on " 8838 "Diagnostic descriptor[%d]\n", 8839 desc_cnt); 8840 goto out; 8841 } 8842 8843 dtag = be32_to_cpu(tlv->desc_tag); 8844 switch (dtag) { 8845 case ELS_DTAG_LNK_FAULT_CAP: 8846 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8847 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8848 sizeof(struct fc_diag_lnkflt_desc)) { 8849 lpfc_printf_log( 8850 phba, KERN_WARNING, LOG_CGN_MGMT, 8851 "6465 Truncated Link Fault Diagnostic " 8852 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8853 desc_cnt, bytes_remain, 8854 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8855 sizeof(struct fc_diag_cg_sig_desc)); 8856 goto out; 8857 } 8858 /* No action for Link Fault descriptor for now */ 8859 break; 8860 case ELS_DTAG_CG_SIGNAL_CAP: 8861 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8862 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8863 sizeof(struct fc_diag_cg_sig_desc)) { 8864 lpfc_printf_log( 8865 phba, KERN_WARNING, LOG_CGN_MGMT, 8866 "6466 Truncated cgn signal Diagnostic " 8867 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8868 desc_cnt, bytes_remain, 8869 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8870 sizeof(struct fc_diag_cg_sig_desc)); 8871 goto out; 8872 } 8873 8874 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 8875 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 8876 8877 /* We start negotiation with lpfc_fabric_cgn_frequency. 8878 * When we process the EDC, we will settle on the 8879 * higher frequency. 8880 */ 8881 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 8882 8883 lpfc_least_capable_settings( 8884 phba, (struct fc_diag_cg_sig_desc *)tlv); 8885 rcv_cap_desc = true; 8886 break; 8887 default: 8888 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8889 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8890 "6467 unknown Diagnostic " 8891 "Descriptor[%d]: tag x%x (%s)\n", 8892 desc_cnt, dtag, dtag_nm); 8893 } 8894 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8895 tlv = fc_tlv_next_desc(tlv); 8896 desc_cnt++; 8897 } 8898 out: 8899 /* Need to send back an ACC */ 8900 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 8901 8902 lpfc_config_cgn_signal(phba); 8903 return 0; 8904 } 8905 8906 /** 8907 * lpfc_els_timeout - Handler funciton to the els timer 8908 * @t: timer context used to obtain the vport. 8909 * 8910 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8911 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8912 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8913 * up the worker thread. It is for the worker thread to invoke the routine 8914 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8915 **/ 8916 void 8917 lpfc_els_timeout(struct timer_list *t) 8918 { 8919 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8920 struct lpfc_hba *phba = vport->phba; 8921 uint32_t tmo_posted; 8922 unsigned long iflag; 8923 8924 spin_lock_irqsave(&vport->work_port_lock, iflag); 8925 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8926 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8927 vport->work_port_events |= WORKER_ELS_TMO; 8928 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8929 8930 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8931 lpfc_worker_wake_up(phba); 8932 return; 8933 } 8934 8935 8936 /** 8937 * lpfc_els_timeout_handler - Process an els timeout event 8938 * @vport: pointer to a virtual N_Port data structure. 8939 * 8940 * This routine is the actual handler function that processes an ELS timeout 8941 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8942 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8943 * invoking the lpfc_sli_issue_abort_iotag() routine. 8944 **/ 8945 void 8946 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8947 { 8948 struct lpfc_hba *phba = vport->phba; 8949 struct lpfc_sli_ring *pring; 8950 struct lpfc_iocbq *tmp_iocb, *piocb; 8951 IOCB_t *cmd = NULL; 8952 struct lpfc_dmabuf *pcmd; 8953 uint32_t els_command = 0; 8954 uint32_t timeout; 8955 uint32_t remote_ID = 0xffffffff; 8956 LIST_HEAD(abort_list); 8957 8958 8959 timeout = (uint32_t)(phba->fc_ratov << 1); 8960 8961 pring = lpfc_phba_elsring(phba); 8962 if (unlikely(!pring)) 8963 return; 8964 8965 if (phba->pport->load_flag & FC_UNLOADING) 8966 return; 8967 8968 spin_lock_irq(&phba->hbalock); 8969 if (phba->sli_rev == LPFC_SLI_REV4) 8970 spin_lock(&pring->ring_lock); 8971 8972 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8973 cmd = &piocb->iocb; 8974 8975 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8976 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8977 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8978 continue; 8979 8980 if (piocb->vport != vport) 8981 continue; 8982 8983 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8984 if (pcmd) 8985 els_command = *(uint32_t *) (pcmd->virt); 8986 8987 if (els_command == ELS_CMD_FARP || 8988 els_command == ELS_CMD_FARPR || 8989 els_command == ELS_CMD_FDISC) 8990 continue; 8991 8992 if (piocb->drvrTimeout > 0) { 8993 if (piocb->drvrTimeout >= timeout) 8994 piocb->drvrTimeout -= timeout; 8995 else 8996 piocb->drvrTimeout = 0; 8997 continue; 8998 } 8999 9000 remote_ID = 0xffffffff; 9001 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 9002 remote_ID = cmd->un.elsreq64.remoteID; 9003 else { 9004 struct lpfc_nodelist *ndlp; 9005 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 9006 if (ndlp) 9007 remote_ID = ndlp->nlp_DID; 9008 } 9009 list_add_tail(&piocb->dlist, &abort_list); 9010 } 9011 if (phba->sli_rev == LPFC_SLI_REV4) 9012 spin_unlock(&pring->ring_lock); 9013 spin_unlock_irq(&phba->hbalock); 9014 9015 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9016 cmd = &piocb->iocb; 9017 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9018 "0127 ELS timeout Data: x%x x%x x%x " 9019 "x%x\n", els_command, 9020 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 9021 spin_lock_irq(&phba->hbalock); 9022 list_del_init(&piocb->dlist); 9023 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9024 spin_unlock_irq(&phba->hbalock); 9025 } 9026 9027 /* Make sure HBA is alive */ 9028 lpfc_issue_hb_tmo(phba); 9029 9030 if (!list_empty(&pring->txcmplq)) 9031 if (!(phba->pport->load_flag & FC_UNLOADING)) 9032 mod_timer(&vport->els_tmofunc, 9033 jiffies + msecs_to_jiffies(1000 * timeout)); 9034 } 9035 9036 /** 9037 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9038 * @vport: pointer to a host virtual N_Port data structure. 9039 * 9040 * This routine is used to clean up all the outstanding ELS commands on a 9041 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9042 * routine. After that, it walks the ELS transmit queue to remove all the 9043 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9044 * the IOCBs with a non-NULL completion callback function, the callback 9045 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9046 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9047 * callback function, the IOCB will simply be released. Finally, it walks 9048 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9049 * completion queue IOCB that is associated with the @vport and is not 9050 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9051 * part of the discovery state machine) out to HBA by invoking the 9052 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9053 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9054 * the IOCBs are aborted when this function returns. 9055 **/ 9056 void 9057 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9058 { 9059 LIST_HEAD(abort_list); 9060 struct lpfc_hba *phba = vport->phba; 9061 struct lpfc_sli_ring *pring; 9062 struct lpfc_iocbq *tmp_iocb, *piocb; 9063 IOCB_t *cmd = NULL; 9064 unsigned long iflags = 0; 9065 9066 lpfc_fabric_abort_vport(vport); 9067 9068 /* 9069 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9070 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9071 * ultimately grabs the ring_lock, the driver must splice the list into 9072 * a working list and release the locks before calling the abort. 9073 */ 9074 spin_lock_irqsave(&phba->hbalock, iflags); 9075 pring = lpfc_phba_elsring(phba); 9076 9077 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9078 if (unlikely(!pring)) { 9079 spin_unlock_irqrestore(&phba->hbalock, iflags); 9080 return; 9081 } 9082 9083 if (phba->sli_rev == LPFC_SLI_REV4) 9084 spin_lock(&pring->ring_lock); 9085 9086 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9087 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9088 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 9089 continue; 9090 9091 if (piocb->vport != vport) 9092 continue; 9093 9094 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 9095 continue; 9096 9097 /* On the ELS ring we can have ELS_REQUESTs or 9098 * GEN_REQUESTs waiting for a response. 9099 */ 9100 cmd = &piocb->iocb; 9101 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 9102 list_add_tail(&piocb->dlist, &abort_list); 9103 9104 /* If the link is down when flushing ELS commands 9105 * the firmware will not complete them till after 9106 * the link comes back up. This may confuse 9107 * discovery for the new link up, so we need to 9108 * change the compl routine to just clean up the iocb 9109 * and avoid any retry logic. 9110 */ 9111 if (phba->link_state == LPFC_LINK_DOWN) 9112 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 9113 } 9114 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 9115 list_add_tail(&piocb->dlist, &abort_list); 9116 } 9117 9118 if (phba->sli_rev == LPFC_SLI_REV4) 9119 spin_unlock(&pring->ring_lock); 9120 spin_unlock_irqrestore(&phba->hbalock, iflags); 9121 9122 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9123 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9124 spin_lock_irqsave(&phba->hbalock, iflags); 9125 list_del_init(&piocb->dlist); 9126 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9127 spin_unlock_irqrestore(&phba->hbalock, iflags); 9128 } 9129 /* Make sure HBA is alive */ 9130 lpfc_issue_hb_tmo(phba); 9131 9132 if (!list_empty(&abort_list)) 9133 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9134 "3387 abort list for txq not empty\n"); 9135 INIT_LIST_HEAD(&abort_list); 9136 9137 spin_lock_irqsave(&phba->hbalock, iflags); 9138 if (phba->sli_rev == LPFC_SLI_REV4) 9139 spin_lock(&pring->ring_lock); 9140 9141 /* No need to abort the txq list, 9142 * just queue them up for lpfc_sli_cancel_iocbs 9143 */ 9144 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9145 cmd = &piocb->iocb; 9146 9147 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 9148 continue; 9149 } 9150 9151 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9152 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 9153 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 9154 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 9155 cmd->ulpCommand == CMD_ABORT_XRI_CN) 9156 continue; 9157 9158 if (piocb->vport != vport) 9159 continue; 9160 9161 list_del_init(&piocb->list); 9162 list_add_tail(&piocb->list, &abort_list); 9163 } 9164 9165 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9166 if (vport == phba->pport) { 9167 list_for_each_entry_safe(piocb, tmp_iocb, 9168 &phba->fabric_iocb_list, list) { 9169 cmd = &piocb->iocb; 9170 list_del_init(&piocb->list); 9171 list_add_tail(&piocb->list, &abort_list); 9172 } 9173 } 9174 9175 if (phba->sli_rev == LPFC_SLI_REV4) 9176 spin_unlock(&pring->ring_lock); 9177 spin_unlock_irqrestore(&phba->hbalock, iflags); 9178 9179 /* Cancel all the IOCBs from the completions list */ 9180 lpfc_sli_cancel_iocbs(phba, &abort_list, 9181 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9182 9183 return; 9184 } 9185 9186 /** 9187 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9188 * @phba: pointer to lpfc hba data structure. 9189 * 9190 * This routine is used to clean up all the outstanding ELS commands on a 9191 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9192 * routine. After that, it walks the ELS transmit queue to remove all the 9193 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9194 * the IOCBs with the completion callback function associated, the callback 9195 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9196 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9197 * callback function associated, the IOCB will simply be released. Finally, 9198 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9199 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9200 * management plane IOCBs that are not part of the discovery state machine) 9201 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9202 **/ 9203 void 9204 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9205 { 9206 struct lpfc_vport *vport; 9207 9208 spin_lock_irq(&phba->port_list_lock); 9209 list_for_each_entry(vport, &phba->port_list, listentry) 9210 lpfc_els_flush_cmd(vport); 9211 spin_unlock_irq(&phba->port_list_lock); 9212 9213 return; 9214 } 9215 9216 /** 9217 * lpfc_send_els_failure_event - Posts an ELS command failure event 9218 * @phba: Pointer to hba context object. 9219 * @cmdiocbp: Pointer to command iocb which reported error. 9220 * @rspiocbp: Pointer to response iocb which reported error. 9221 * 9222 * This function sends an event when there is an ELS command 9223 * failure. 9224 **/ 9225 void 9226 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9227 struct lpfc_iocbq *cmdiocbp, 9228 struct lpfc_iocbq *rspiocbp) 9229 { 9230 struct lpfc_vport *vport = cmdiocbp->vport; 9231 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9232 struct lpfc_lsrjt_event lsrjt_event; 9233 struct lpfc_fabric_event_header fabric_event; 9234 struct ls_rjt stat; 9235 struct lpfc_nodelist *ndlp; 9236 uint32_t *pcmd; 9237 9238 ndlp = cmdiocbp->context1; 9239 if (!ndlp) 9240 return; 9241 9242 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 9243 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9244 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9245 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9246 sizeof(struct lpfc_name)); 9247 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9248 sizeof(struct lpfc_name)); 9249 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9250 cmdiocbp->context2)->virt); 9251 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9252 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 9253 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9254 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9255 fc_host_post_vendor_event(shost, 9256 fc_get_event_number(), 9257 sizeof(lsrjt_event), 9258 (char *)&lsrjt_event, 9259 LPFC_NL_VENDOR_ID); 9260 return; 9261 } 9262 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 9263 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 9264 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9265 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 9266 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9267 else 9268 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9269 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9270 sizeof(struct lpfc_name)); 9271 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9272 sizeof(struct lpfc_name)); 9273 fc_host_post_vendor_event(shost, 9274 fc_get_event_number(), 9275 sizeof(fabric_event), 9276 (char *)&fabric_event, 9277 LPFC_NL_VENDOR_ID); 9278 return; 9279 } 9280 9281 } 9282 9283 /** 9284 * lpfc_send_els_event - Posts unsolicited els event 9285 * @vport: Pointer to vport object. 9286 * @ndlp: Pointer FC node object. 9287 * @payload: ELS command code type. 9288 * 9289 * This function posts an event when there is an incoming 9290 * unsolicited ELS command. 9291 **/ 9292 static void 9293 lpfc_send_els_event(struct lpfc_vport *vport, 9294 struct lpfc_nodelist *ndlp, 9295 uint32_t *payload) 9296 { 9297 struct lpfc_els_event_header *els_data = NULL; 9298 struct lpfc_logo_event *logo_data = NULL; 9299 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9300 9301 if (*payload == ELS_CMD_LOGO) { 9302 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9303 if (!logo_data) { 9304 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9305 "0148 Failed to allocate memory " 9306 "for LOGO event\n"); 9307 return; 9308 } 9309 els_data = &logo_data->header; 9310 } else { 9311 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9312 GFP_KERNEL); 9313 if (!els_data) { 9314 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9315 "0149 Failed to allocate memory " 9316 "for ELS event\n"); 9317 return; 9318 } 9319 } 9320 els_data->event_type = FC_REG_ELS_EVENT; 9321 switch (*payload) { 9322 case ELS_CMD_PLOGI: 9323 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9324 break; 9325 case ELS_CMD_PRLO: 9326 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9327 break; 9328 case ELS_CMD_ADISC: 9329 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9330 break; 9331 case ELS_CMD_LOGO: 9332 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9333 /* Copy the WWPN in the LOGO payload */ 9334 memcpy(logo_data->logo_wwpn, &payload[2], 9335 sizeof(struct lpfc_name)); 9336 break; 9337 default: 9338 kfree(els_data); 9339 return; 9340 } 9341 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9342 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9343 if (*payload == ELS_CMD_LOGO) { 9344 fc_host_post_vendor_event(shost, 9345 fc_get_event_number(), 9346 sizeof(struct lpfc_logo_event), 9347 (char *)logo_data, 9348 LPFC_NL_VENDOR_ID); 9349 kfree(logo_data); 9350 } else { 9351 fc_host_post_vendor_event(shost, 9352 fc_get_event_number(), 9353 sizeof(struct lpfc_els_event_header), 9354 (char *)els_data, 9355 LPFC_NL_VENDOR_ID); 9356 kfree(els_data); 9357 } 9358 9359 return; 9360 } 9361 9362 9363 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9364 FC_FPIN_LI_EVT_TYPES_INIT); 9365 9366 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9367 FC_FPIN_DELI_EVT_TYPES_INIT); 9368 9369 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9370 FC_FPIN_CONGN_EVT_TYPES_INIT); 9371 9372 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9373 fc_fpin_congn_severity_types, 9374 FC_FPIN_CONGN_SEVERITY_INIT); 9375 9376 9377 /** 9378 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9379 * @phba: Pointer to phba object. 9380 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9381 * @cnt: count of WWPNs in FPIN payload 9382 * 9383 * This routine is called by LI and PC descriptors. 9384 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9385 */ 9386 static void 9387 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9388 { 9389 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9390 __be64 wwn; 9391 u64 wwpn; 9392 int i, len; 9393 int line = 0; 9394 int wcnt = 0; 9395 bool endit = false; 9396 9397 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9398 for (i = 0; i < cnt; i++) { 9399 /* Are we on the last WWPN */ 9400 if (i == (cnt - 1)) 9401 endit = true; 9402 9403 /* Extract the next WWPN from the payload */ 9404 wwn = *wwnlist++; 9405 wwpn = be64_to_cpu(wwn); 9406 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9407 " %016llx", wwpn); 9408 9409 /* Log a message if we are on the last WWPN 9410 * or if we hit the max allowed per message. 9411 */ 9412 wcnt++; 9413 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9414 buf[len] = 0; 9415 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9416 "4686 %s\n", buf); 9417 9418 /* Check if we reached the last WWPN */ 9419 if (endit) 9420 return; 9421 9422 /* Limit the number of log message displayed per FPIN */ 9423 line++; 9424 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9425 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9426 "4687 %d WWPNs Truncated\n", 9427 cnt - i - 1); 9428 return; 9429 } 9430 9431 /* Start over with next log message */ 9432 wcnt = 0; 9433 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9434 "Additional WWPNs:"); 9435 } 9436 } 9437 } 9438 9439 /** 9440 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9441 * @phba: Pointer to phba object. 9442 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9443 * 9444 * This function processes a Link Integrity FPIN event by logging a message. 9445 **/ 9446 static void 9447 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9448 { 9449 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9450 const char *li_evt_str; 9451 u32 li_evt, cnt; 9452 9453 li_evt = be16_to_cpu(li->event_type); 9454 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9455 cnt = be32_to_cpu(li->pname_count); 9456 9457 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9458 "4680 FPIN Link Integrity %s (x%x) " 9459 "Detecting PN x%016llx Attached PN x%016llx " 9460 "Duration %d mSecs Count %d Port Cnt %d\n", 9461 li_evt_str, li_evt, 9462 be64_to_cpu(li->detecting_wwpn), 9463 be64_to_cpu(li->attached_wwpn), 9464 be32_to_cpu(li->event_threshold), 9465 be32_to_cpu(li->event_count), cnt); 9466 9467 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9468 } 9469 9470 /** 9471 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9472 * @phba: Pointer to hba object. 9473 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9474 * 9475 * This function processes a Delivery FPIN event by logging a message. 9476 **/ 9477 static void 9478 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9479 { 9480 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9481 const char *del_rsn_str; 9482 u32 del_rsn; 9483 __be32 *frame; 9484 9485 del_rsn = be16_to_cpu(del->deli_reason_code); 9486 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9487 9488 /* Skip over desc_tag/desc_len header to payload */ 9489 frame = (__be32 *)(del + 1); 9490 9491 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9492 "4681 FPIN Delivery %s (x%x) " 9493 "Detecting PN x%016llx Attached PN x%016llx " 9494 "DiscHdr0 x%08x " 9495 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9496 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9497 del_rsn_str, del_rsn, 9498 be64_to_cpu(del->detecting_wwpn), 9499 be64_to_cpu(del->attached_wwpn), 9500 be32_to_cpu(frame[0]), 9501 be32_to_cpu(frame[1]), 9502 be32_to_cpu(frame[2]), 9503 be32_to_cpu(frame[3]), 9504 be32_to_cpu(frame[4]), 9505 be32_to_cpu(frame[5])); 9506 } 9507 9508 /** 9509 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9510 * @phba: Pointer to hba object. 9511 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9512 * 9513 * This function processes a Peer Congestion FPIN event by logging a message. 9514 **/ 9515 static void 9516 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9517 { 9518 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9519 const char *pc_evt_str; 9520 u32 pc_evt, cnt; 9521 9522 pc_evt = be16_to_cpu(pc->event_type); 9523 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9524 cnt = be32_to_cpu(pc->pname_count); 9525 9526 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9527 "4684 FPIN Peer Congestion %s (x%x) " 9528 "Duration %d mSecs " 9529 "Detecting PN x%016llx Attached PN x%016llx " 9530 "Impacted Port Cnt %d\n", 9531 pc_evt_str, pc_evt, 9532 be32_to_cpu(pc->event_period), 9533 be64_to_cpu(pc->detecting_wwpn), 9534 be64_to_cpu(pc->attached_wwpn), 9535 cnt); 9536 9537 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9538 } 9539 9540 /** 9541 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9542 * @phba: Pointer to hba object. 9543 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9544 * 9545 * This function processes an FPIN Congestion Notifiction. The notification 9546 * could be an Alarm or Warning. This routine feeds that data into driver's 9547 * running congestion algorithm. It also processes the FPIN by 9548 * logging a message. It returns 1 to indicate deliver this message 9549 * to the upper layer or 0 to indicate don't deliver it. 9550 **/ 9551 static int 9552 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9553 { 9554 struct lpfc_cgn_info *cp; 9555 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9556 const char *cgn_evt_str; 9557 u32 cgn_evt; 9558 const char *cgn_sev_str; 9559 u32 cgn_sev; 9560 uint16_t value; 9561 u32 crc; 9562 bool nm_log = false; 9563 int rc = 1; 9564 9565 cgn_evt = be16_to_cpu(cgn->event_type); 9566 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9567 cgn_sev = cgn->severity; 9568 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9569 9570 /* The driver only takes action on a Credit Stall or Oversubscription 9571 * event type to engage the IO algorithm. The driver prints an 9572 * unmaskable message only for Lost Credit and Credit Stall. 9573 * TODO: Still need to have definition of host action on clear, 9574 * lost credit and device specific event types. 9575 */ 9576 switch (cgn_evt) { 9577 case FPIN_CONGN_LOST_CREDIT: 9578 nm_log = true; 9579 break; 9580 case FPIN_CONGN_CREDIT_STALL: 9581 nm_log = true; 9582 fallthrough; 9583 case FPIN_CONGN_OVERSUBSCRIPTION: 9584 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9585 nm_log = false; 9586 switch (cgn_sev) { 9587 case FPIN_CONGN_SEVERITY_ERROR: 9588 /* Take action here for an Alarm event */ 9589 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9590 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9591 /* Track of alarm cnt for cgn_info */ 9592 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9593 /* Track of alarm cnt for SYNC_WQE */ 9594 atomic_inc(&phba->cgn_sync_alarm_cnt); 9595 } 9596 goto cleanup; 9597 } 9598 break; 9599 case FPIN_CONGN_SEVERITY_WARNING: 9600 /* Take action here for a Warning event */ 9601 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9602 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9603 /* Track of warning cnt for cgn_info */ 9604 atomic_inc(&phba->cgn_fabric_warn_cnt); 9605 /* Track of warning cnt for SYNC_WQE */ 9606 atomic_inc(&phba->cgn_sync_warn_cnt); 9607 } 9608 cleanup: 9609 /* Save frequency in ms */ 9610 phba->cgn_fpin_frequency = 9611 be32_to_cpu(cgn->event_period); 9612 value = phba->cgn_fpin_frequency; 9613 if (phba->cgn_i) { 9614 cp = (struct lpfc_cgn_info *) 9615 phba->cgn_i->virt; 9616 if (phba->cgn_reg_fpin & 9617 LPFC_CGN_FPIN_ALARM) 9618 cp->cgn_alarm_freq = 9619 cpu_to_le16(value); 9620 if (phba->cgn_reg_fpin & 9621 LPFC_CGN_FPIN_WARN) 9622 cp->cgn_warn_freq = 9623 cpu_to_le16(value); 9624 crc = lpfc_cgn_calc_crc32 9625 (cp, 9626 LPFC_CGN_INFO_SZ, 9627 LPFC_CGN_CRC32_SEED); 9628 cp->cgn_info_crc = cpu_to_le32(crc); 9629 } 9630 9631 /* Don't deliver to upper layer since 9632 * driver took action on this tlv. 9633 */ 9634 rc = 0; 9635 } 9636 break; 9637 } 9638 break; 9639 } 9640 9641 /* Change the log level to unmaskable for the following event types. */ 9642 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9643 LOG_CGN_MGMT | LOG_ELS, 9644 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9645 "Duration %d mSecs\n", 9646 cgn_sev_str, cgn_evt_str, cgn_evt, 9647 be32_to_cpu(cgn->event_period)); 9648 return rc; 9649 } 9650 9651 void 9652 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 9653 { 9654 struct lpfc_hba *phba = vport->phba; 9655 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 9656 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 9657 const char *dtag_nm; 9658 int desc_cnt = 0, bytes_remain, cnt; 9659 u32 dtag, deliver = 0; 9660 int len; 9661 9662 /* FPINs handled only if we are in the right discovery state */ 9663 if (vport->port_state < LPFC_DISC_AUTH) 9664 return; 9665 9666 /* make sure there is the full fpin header */ 9667 if (fpin_length < sizeof(struct fc_els_fpin)) 9668 return; 9669 9670 /* Sanity check descriptor length. The desc_len value does not 9671 * include space for the ELS command and the desc_len fields. 9672 */ 9673 len = be32_to_cpu(fpin->desc_len); 9674 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 9675 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9676 "4671 Bad ELS FPIN length %d: %d\n", 9677 len, fpin_length); 9678 return; 9679 } 9680 9681 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 9682 first_tlv = tlv; 9683 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 9684 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 9685 9686 /* process each descriptor separately */ 9687 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 9688 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 9689 dtag = be32_to_cpu(tlv->desc_tag); 9690 switch (dtag) { 9691 case ELS_DTAG_LNK_INTEGRITY: 9692 lpfc_els_rcv_fpin_li(phba, tlv); 9693 deliver = 1; 9694 break; 9695 case ELS_DTAG_DELIVERY: 9696 lpfc_els_rcv_fpin_del(phba, tlv); 9697 deliver = 1; 9698 break; 9699 case ELS_DTAG_PEER_CONGEST: 9700 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 9701 deliver = 1; 9702 break; 9703 case ELS_DTAG_CONGESTION: 9704 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 9705 break; 9706 default: 9707 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9708 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9709 "4678 unknown FPIN descriptor[%d]: " 9710 "tag x%x (%s)\n", 9711 desc_cnt, dtag, dtag_nm); 9712 9713 /* If descriptor is bad, drop the rest of the data */ 9714 return; 9715 } 9716 lpfc_cgn_update_stat(phba, dtag); 9717 cnt = be32_to_cpu(tlv->desc_len); 9718 9719 /* Sanity check descriptor length. The desc_len value does not 9720 * include space for the desc_tag and the desc_len fields. 9721 */ 9722 len -= (cnt + sizeof(struct fc_tlv_desc)); 9723 if (len < 0) { 9724 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9725 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9726 "4672 Bad FPIN descriptor TLV length " 9727 "%d: %d %d %s\n", 9728 cnt, len, fpin_length, dtag_nm); 9729 return; 9730 } 9731 9732 current_tlv = tlv; 9733 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9734 tlv = fc_tlv_next_desc(tlv); 9735 9736 /* Format payload such that the FPIN delivered to the 9737 * upper layer is a single descriptor FPIN. 9738 */ 9739 if (desc_cnt) 9740 memcpy(first_tlv, current_tlv, 9741 (cnt + sizeof(struct fc_els_fpin))); 9742 9743 /* Adjust the length so that it only reflects a 9744 * single descriptor FPIN. 9745 */ 9746 fpin_length = cnt + sizeof(struct fc_els_fpin); 9747 fpin->desc_len = cpu_to_be32(fpin_length); 9748 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 9749 9750 /* Send every descriptor individually to the upper layer */ 9751 if (deliver) 9752 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 9753 fpin_length, (char *)fpin); 9754 desc_cnt++; 9755 } 9756 } 9757 9758 /** 9759 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 9760 * @phba: pointer to lpfc hba data structure. 9761 * @pring: pointer to a SLI ring. 9762 * @vport: pointer to a host virtual N_Port data structure. 9763 * @elsiocb: pointer to lpfc els command iocb data structure. 9764 * 9765 * This routine is used for processing the IOCB associated with a unsolicited 9766 * event. It first determines whether there is an existing ndlp that matches 9767 * the DID from the unsolicited IOCB. If not, it will create a new one with 9768 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 9769 * IOCB is then used to invoke the proper routine and to set up proper state 9770 * of the discovery state machine. 9771 **/ 9772 static void 9773 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9774 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 9775 { 9776 struct lpfc_nodelist *ndlp; 9777 struct ls_rjt stat; 9778 uint32_t *payload, payload_len; 9779 uint32_t cmd, did, newnode; 9780 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 9781 IOCB_t *icmd = &elsiocb->iocb; 9782 LPFC_MBOXQ_t *mbox; 9783 9784 if (!vport || !(elsiocb->context2)) 9785 goto dropit; 9786 9787 newnode = 0; 9788 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 9789 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 9790 cmd = *payload; 9791 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 9792 lpfc_post_buffer(phba, pring, 1); 9793 9794 did = icmd->un.rcvels.remoteID; 9795 if (icmd->ulpStatus) { 9796 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9797 "RCV Unsol ELS: status:x%x/x%x did:x%x", 9798 icmd->ulpStatus, icmd->un.ulpWord[4], did); 9799 goto dropit; 9800 } 9801 9802 /* Check to see if link went down during discovery */ 9803 if (lpfc_els_chk_latt(vport)) 9804 goto dropit; 9805 9806 /* Ignore traffic received during vport shutdown. */ 9807 if (vport->load_flag & FC_UNLOADING) 9808 goto dropit; 9809 9810 /* If NPort discovery is delayed drop incoming ELS */ 9811 if ((vport->fc_flag & FC_DISC_DELAYED) && 9812 (cmd != ELS_CMD_PLOGI)) 9813 goto dropit; 9814 9815 ndlp = lpfc_findnode_did(vport, did); 9816 if (!ndlp) { 9817 /* Cannot find existing Fabric ndlp, so allocate a new one */ 9818 ndlp = lpfc_nlp_init(vport, did); 9819 if (!ndlp) 9820 goto dropit; 9821 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9822 newnode = 1; 9823 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 9824 ndlp->nlp_type |= NLP_FABRIC; 9825 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 9826 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9827 newnode = 1; 9828 } 9829 9830 phba->fc_stat.elsRcvFrame++; 9831 9832 /* 9833 * Do not process any unsolicited ELS commands 9834 * if the ndlp is in DEV_LOSS 9835 */ 9836 spin_lock_irq(&ndlp->lock); 9837 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 9838 spin_unlock_irq(&ndlp->lock); 9839 if (newnode) 9840 lpfc_nlp_put(ndlp); 9841 goto dropit; 9842 } 9843 spin_unlock_irq(&ndlp->lock); 9844 9845 elsiocb->context1 = lpfc_nlp_get(ndlp); 9846 if (!elsiocb->context1) 9847 goto dropit; 9848 elsiocb->vport = vport; 9849 9850 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 9851 cmd &= ELS_CMD_MASK; 9852 } 9853 /* ELS command <elsCmd> received from NPORT <did> */ 9854 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9855 "0112 ELS command x%x received from NPORT x%x " 9856 "refcnt %d Data: x%x x%x x%x x%x\n", 9857 cmd, did, kref_read(&ndlp->kref), vport->port_state, 9858 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 9859 9860 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 9861 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 9862 (cmd != ELS_CMD_FLOGI) && 9863 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 9864 rjt_err = LSRJT_LOGICAL_BSY; 9865 rjt_exp = LSEXP_NOTHING_MORE; 9866 goto lsrjt; 9867 } 9868 9869 switch (cmd) { 9870 case ELS_CMD_PLOGI: 9871 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9872 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 9873 did, vport->port_state, ndlp->nlp_flag); 9874 9875 phba->fc_stat.elsRcvPLOGI++; 9876 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 9877 if (phba->sli_rev == LPFC_SLI_REV4 && 9878 (phba->pport->fc_flag & FC_PT2PT)) { 9879 vport->fc_prevDID = vport->fc_myDID; 9880 /* Our DID needs to be updated before registering 9881 * the vfi. This is done in lpfc_rcv_plogi but 9882 * that is called after the reg_vfi. 9883 */ 9884 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 9885 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9886 "3312 Remote port assigned DID x%x " 9887 "%x\n", vport->fc_myDID, 9888 vport->fc_prevDID); 9889 } 9890 9891 lpfc_send_els_event(vport, ndlp, payload); 9892 9893 /* If Nport discovery is delayed, reject PLOGIs */ 9894 if (vport->fc_flag & FC_DISC_DELAYED) { 9895 rjt_err = LSRJT_UNABLE_TPC; 9896 rjt_exp = LSEXP_NOTHING_MORE; 9897 break; 9898 } 9899 9900 if (vport->port_state < LPFC_DISC_AUTH) { 9901 if (!(phba->pport->fc_flag & FC_PT2PT) || 9902 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 9903 rjt_err = LSRJT_UNABLE_TPC; 9904 rjt_exp = LSEXP_NOTHING_MORE; 9905 break; 9906 } 9907 } 9908 9909 spin_lock_irq(&ndlp->lock); 9910 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 9911 spin_unlock_irq(&ndlp->lock); 9912 9913 lpfc_disc_state_machine(vport, ndlp, elsiocb, 9914 NLP_EVT_RCV_PLOGI); 9915 9916 break; 9917 case ELS_CMD_FLOGI: 9918 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9919 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 9920 did, vport->port_state, ndlp->nlp_flag); 9921 9922 phba->fc_stat.elsRcvFLOGI++; 9923 9924 /* If the driver believes fabric discovery is done and is ready, 9925 * bounce the link. There is some descrepancy. 9926 */ 9927 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 9928 vport->fc_flag & FC_PT2PT && 9929 vport->rcv_flogi_cnt >= 1) { 9930 rjt_err = LSRJT_LOGICAL_BSY; 9931 rjt_exp = LSEXP_NOTHING_MORE; 9932 init_link++; 9933 goto lsrjt; 9934 } 9935 9936 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 9937 if (newnode) 9938 lpfc_disc_state_machine(vport, ndlp, NULL, 9939 NLP_EVT_DEVICE_RM); 9940 break; 9941 case ELS_CMD_LOGO: 9942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9943 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 9944 did, vport->port_state, ndlp->nlp_flag); 9945 9946 phba->fc_stat.elsRcvLOGO++; 9947 lpfc_send_els_event(vport, ndlp, payload); 9948 if (vport->port_state < LPFC_DISC_AUTH) { 9949 rjt_err = LSRJT_UNABLE_TPC; 9950 rjt_exp = LSEXP_NOTHING_MORE; 9951 break; 9952 } 9953 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 9954 if (newnode) 9955 lpfc_disc_state_machine(vport, ndlp, NULL, 9956 NLP_EVT_DEVICE_RM); 9957 break; 9958 case ELS_CMD_PRLO: 9959 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9960 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 9961 did, vport->port_state, ndlp->nlp_flag); 9962 9963 phba->fc_stat.elsRcvPRLO++; 9964 lpfc_send_els_event(vport, ndlp, payload); 9965 if (vport->port_state < LPFC_DISC_AUTH) { 9966 rjt_err = LSRJT_UNABLE_TPC; 9967 rjt_exp = LSEXP_NOTHING_MORE; 9968 break; 9969 } 9970 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 9971 break; 9972 case ELS_CMD_LCB: 9973 phba->fc_stat.elsRcvLCB++; 9974 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 9975 break; 9976 case ELS_CMD_RDP: 9977 phba->fc_stat.elsRcvRDP++; 9978 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 9979 break; 9980 case ELS_CMD_RSCN: 9981 phba->fc_stat.elsRcvRSCN++; 9982 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 9983 if (newnode) 9984 lpfc_disc_state_machine(vport, ndlp, NULL, 9985 NLP_EVT_DEVICE_RM); 9986 break; 9987 case ELS_CMD_ADISC: 9988 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9989 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 9990 did, vport->port_state, ndlp->nlp_flag); 9991 9992 lpfc_send_els_event(vport, ndlp, payload); 9993 phba->fc_stat.elsRcvADISC++; 9994 if (vport->port_state < LPFC_DISC_AUTH) { 9995 rjt_err = LSRJT_UNABLE_TPC; 9996 rjt_exp = LSEXP_NOTHING_MORE; 9997 break; 9998 } 9999 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10000 NLP_EVT_RCV_ADISC); 10001 break; 10002 case ELS_CMD_PDISC: 10003 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10004 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10005 did, vport->port_state, ndlp->nlp_flag); 10006 10007 phba->fc_stat.elsRcvPDISC++; 10008 if (vport->port_state < LPFC_DISC_AUTH) { 10009 rjt_err = LSRJT_UNABLE_TPC; 10010 rjt_exp = LSEXP_NOTHING_MORE; 10011 break; 10012 } 10013 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10014 NLP_EVT_RCV_PDISC); 10015 break; 10016 case ELS_CMD_FARPR: 10017 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10018 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10019 did, vport->port_state, ndlp->nlp_flag); 10020 10021 phba->fc_stat.elsRcvFARPR++; 10022 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10023 break; 10024 case ELS_CMD_FARP: 10025 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10026 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10027 did, vport->port_state, ndlp->nlp_flag); 10028 10029 phba->fc_stat.elsRcvFARP++; 10030 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10031 break; 10032 case ELS_CMD_FAN: 10033 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10034 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10035 did, vport->port_state, ndlp->nlp_flag); 10036 10037 phba->fc_stat.elsRcvFAN++; 10038 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10039 break; 10040 case ELS_CMD_PRLI: 10041 case ELS_CMD_NVMEPRLI: 10042 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10043 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10044 did, vport->port_state, ndlp->nlp_flag); 10045 10046 phba->fc_stat.elsRcvPRLI++; 10047 if ((vport->port_state < LPFC_DISC_AUTH) && 10048 (vport->fc_flag & FC_FABRIC)) { 10049 rjt_err = LSRJT_UNABLE_TPC; 10050 rjt_exp = LSEXP_NOTHING_MORE; 10051 break; 10052 } 10053 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10054 break; 10055 case ELS_CMD_LIRR: 10056 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10057 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10058 did, vport->port_state, ndlp->nlp_flag); 10059 10060 phba->fc_stat.elsRcvLIRR++; 10061 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10062 if (newnode) 10063 lpfc_disc_state_machine(vport, ndlp, NULL, 10064 NLP_EVT_DEVICE_RM); 10065 break; 10066 case ELS_CMD_RLS: 10067 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10068 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10069 did, vport->port_state, ndlp->nlp_flag); 10070 10071 phba->fc_stat.elsRcvRLS++; 10072 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10073 if (newnode) 10074 lpfc_disc_state_machine(vport, ndlp, NULL, 10075 NLP_EVT_DEVICE_RM); 10076 break; 10077 case ELS_CMD_RPL: 10078 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10079 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10080 did, vport->port_state, ndlp->nlp_flag); 10081 10082 phba->fc_stat.elsRcvRPL++; 10083 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10084 if (newnode) 10085 lpfc_disc_state_machine(vport, ndlp, NULL, 10086 NLP_EVT_DEVICE_RM); 10087 break; 10088 case ELS_CMD_RNID: 10089 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10090 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10091 did, vport->port_state, ndlp->nlp_flag); 10092 10093 phba->fc_stat.elsRcvRNID++; 10094 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10095 if (newnode) 10096 lpfc_disc_state_machine(vport, ndlp, NULL, 10097 NLP_EVT_DEVICE_RM); 10098 break; 10099 case ELS_CMD_RTV: 10100 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10101 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10102 did, vport->port_state, ndlp->nlp_flag); 10103 phba->fc_stat.elsRcvRTV++; 10104 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10105 if (newnode) 10106 lpfc_disc_state_machine(vport, ndlp, NULL, 10107 NLP_EVT_DEVICE_RM); 10108 break; 10109 case ELS_CMD_RRQ: 10110 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10111 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10112 did, vport->port_state, ndlp->nlp_flag); 10113 10114 phba->fc_stat.elsRcvRRQ++; 10115 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10116 if (newnode) 10117 lpfc_disc_state_machine(vport, ndlp, NULL, 10118 NLP_EVT_DEVICE_RM); 10119 break; 10120 case ELS_CMD_ECHO: 10121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10122 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10123 did, vport->port_state, ndlp->nlp_flag); 10124 10125 phba->fc_stat.elsRcvECHO++; 10126 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10127 if (newnode) 10128 lpfc_disc_state_machine(vport, ndlp, NULL, 10129 NLP_EVT_DEVICE_RM); 10130 break; 10131 case ELS_CMD_REC: 10132 /* receive this due to exchange closed */ 10133 rjt_err = LSRJT_UNABLE_TPC; 10134 rjt_exp = LSEXP_INVALID_OX_RX; 10135 break; 10136 case ELS_CMD_FPIN: 10137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10138 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10139 did, vport->port_state, ndlp->nlp_flag); 10140 10141 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10142 payload_len); 10143 10144 /* There are no replies, so no rjt codes */ 10145 break; 10146 case ELS_CMD_EDC: 10147 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10148 break; 10149 case ELS_CMD_RDF: 10150 phba->fc_stat.elsRcvRDF++; 10151 /* Accept RDF only from fabric controller */ 10152 if (did != Fabric_Cntl_DID) { 10153 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10154 "1115 Received RDF from invalid DID " 10155 "x%x\n", did); 10156 rjt_err = LSRJT_PROTOCOL_ERR; 10157 rjt_exp = LSEXP_NOTHING_MORE; 10158 goto lsrjt; 10159 } 10160 10161 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10162 break; 10163 default: 10164 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10165 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10166 cmd, did, vport->port_state); 10167 10168 /* Unsupported ELS command, reject */ 10169 rjt_err = LSRJT_CMD_UNSUPPORTED; 10170 rjt_exp = LSEXP_NOTHING_MORE; 10171 10172 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10173 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10174 "0115 Unknown ELS command x%x " 10175 "received from NPORT x%x\n", cmd, did); 10176 if (newnode) 10177 lpfc_disc_state_machine(vport, ndlp, NULL, 10178 NLP_EVT_DEVICE_RM); 10179 break; 10180 } 10181 10182 lsrjt: 10183 /* check if need to LS_RJT received ELS cmd */ 10184 if (rjt_err) { 10185 memset(&stat, 0, sizeof(stat)); 10186 stat.un.b.lsRjtRsnCode = rjt_err; 10187 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10188 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10189 NULL); 10190 /* Remove the reference from above for new nodes. */ 10191 if (newnode) 10192 lpfc_disc_state_machine(vport, ndlp, NULL, 10193 NLP_EVT_DEVICE_RM); 10194 } 10195 10196 /* Release the reference on this elsiocb, not the ndlp. */ 10197 lpfc_nlp_put(elsiocb->context1); 10198 elsiocb->context1 = NULL; 10199 10200 /* Special case. Driver received an unsolicited command that 10201 * unsupportable given the driver's current state. Reset the 10202 * link and start over. 10203 */ 10204 if (init_link) { 10205 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10206 if (!mbox) 10207 return; 10208 lpfc_linkdown(phba); 10209 lpfc_init_link(phba, mbox, 10210 phba->cfg_topology, 10211 phba->cfg_link_speed); 10212 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10213 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10214 mbox->vport = vport; 10215 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10216 MBX_NOT_FINISHED) 10217 mempool_free(mbox, phba->mbox_mem_pool); 10218 } 10219 10220 return; 10221 10222 dropit: 10223 if (vport && !(vport->load_flag & FC_UNLOADING)) 10224 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10225 "0111 Dropping received ELS cmd " 10226 "Data: x%x x%x x%x\n", 10227 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 10228 phba->fc_stat.elsRcvDrop++; 10229 } 10230 10231 /** 10232 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10233 * @phba: pointer to lpfc hba data structure. 10234 * @pring: pointer to a SLI ring. 10235 * @elsiocb: pointer to lpfc els iocb data structure. 10236 * 10237 * This routine is used to process an unsolicited event received from a SLI 10238 * (Service Level Interface) ring. The actual processing of the data buffer 10239 * associated with the unsolicited event is done by invoking the routine 10240 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10241 * SLI ring on which the unsolicited event was received. 10242 **/ 10243 void 10244 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10245 struct lpfc_iocbq *elsiocb) 10246 { 10247 struct lpfc_vport *vport = phba->pport; 10248 IOCB_t *icmd = &elsiocb->iocb; 10249 dma_addr_t paddr; 10250 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 10251 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 10252 10253 elsiocb->context1 = NULL; 10254 elsiocb->context2 = NULL; 10255 elsiocb->context3 = NULL; 10256 10257 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 10258 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10259 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 10260 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 10261 IOERR_RCV_BUFFER_WAITING) { 10262 phba->fc_stat.NoRcvBuf++; 10263 /* Not enough posted buffers; Try posting more buffers */ 10264 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10265 lpfc_post_buffer(phba, pring, 0); 10266 return; 10267 } 10268 10269 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10270 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 10271 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 10272 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10273 vport = phba->pport; 10274 else 10275 vport = lpfc_find_vport_by_vpid(phba, 10276 icmd->unsli3.rcvsli3.vpi); 10277 } 10278 10279 /* If there are no BDEs associated 10280 * with this IOCB, there is nothing to do. 10281 */ 10282 if (icmd->ulpBdeCount == 0) 10283 return; 10284 10285 /* type of ELS cmd is first 32bit word 10286 * in packet 10287 */ 10288 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10289 elsiocb->context2 = bdeBuf1; 10290 } else { 10291 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10292 icmd->un.cont64[0].addrLow); 10293 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 10294 paddr); 10295 } 10296 10297 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10298 /* 10299 * The different unsolicited event handlers would tell us 10300 * if they are done with "mp" by setting context2 to NULL. 10301 */ 10302 if (elsiocb->context2) { 10303 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 10304 elsiocb->context2 = NULL; 10305 } 10306 10307 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 10308 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 10309 icmd->ulpBdeCount == 2) { 10310 elsiocb->context2 = bdeBuf2; 10311 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10312 /* free mp if we are done with it */ 10313 if (elsiocb->context2) { 10314 lpfc_in_buf_free(phba, elsiocb->context2); 10315 elsiocb->context2 = NULL; 10316 } 10317 } 10318 } 10319 10320 static void 10321 lpfc_start_fdmi(struct lpfc_vport *vport) 10322 { 10323 struct lpfc_nodelist *ndlp; 10324 10325 /* If this is the first time, allocate an ndlp and initialize 10326 * it. Otherwise, make sure the node is enabled and then do the 10327 * login. 10328 */ 10329 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10330 if (!ndlp) { 10331 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10332 if (ndlp) { 10333 ndlp->nlp_type |= NLP_FABRIC; 10334 } else { 10335 return; 10336 } 10337 } 10338 10339 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10340 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10341 } 10342 10343 /** 10344 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10345 * @phba: pointer to lpfc hba data structure. 10346 * @vport: pointer to a virtual N_Port data structure. 10347 * 10348 * This routine issues a Port Login (PLOGI) to the Name Server with 10349 * State Change Request (SCR) for a @vport. This routine will create an 10350 * ndlp for the Name Server associated to the @vport if such node does 10351 * not already exist. The PLOGI to Name Server is issued by invoking the 10352 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10353 * (FDMI) is configured to the @vport, a FDMI node will be created and 10354 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10355 **/ 10356 void 10357 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10358 { 10359 struct lpfc_nodelist *ndlp; 10360 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10361 10362 /* 10363 * If lpfc_delay_discovery parameter is set and the clean address 10364 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10365 * discovery. 10366 */ 10367 spin_lock_irq(shost->host_lock); 10368 if (vport->fc_flag & FC_DISC_DELAYED) { 10369 spin_unlock_irq(shost->host_lock); 10370 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10371 "3334 Delay fc port discovery for %d secs\n", 10372 phba->fc_ratov); 10373 mod_timer(&vport->delayed_disc_tmo, 10374 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10375 return; 10376 } 10377 spin_unlock_irq(shost->host_lock); 10378 10379 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10380 if (!ndlp) { 10381 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10382 if (!ndlp) { 10383 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10384 lpfc_disc_start(vport); 10385 return; 10386 } 10387 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10388 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10389 "0251 NameServer login: no memory\n"); 10390 return; 10391 } 10392 } 10393 10394 ndlp->nlp_type |= NLP_FABRIC; 10395 10396 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10397 10398 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10399 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10400 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10401 "0252 Cannot issue NameServer login\n"); 10402 return; 10403 } 10404 10405 if ((phba->cfg_enable_SmartSAN || 10406 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10407 (vport->load_flag & FC_ALLOW_FDMI)) 10408 lpfc_start_fdmi(vport); 10409 } 10410 10411 /** 10412 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10413 * @phba: pointer to lpfc hba data structure. 10414 * @pmb: pointer to the driver internal queue element for mailbox command. 10415 * 10416 * This routine is the completion callback function to register new vport 10417 * mailbox command. If the new vport mailbox command completes successfully, 10418 * the fabric registration login shall be performed on physical port (the 10419 * new vport created is actually a physical port, with VPI 0) or the port 10420 * login to Name Server for State Change Request (SCR) will be performed 10421 * on virtual port (real virtual port, with VPI greater than 0). 10422 **/ 10423 static void 10424 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10425 { 10426 struct lpfc_vport *vport = pmb->vport; 10427 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10428 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 10429 MAILBOX_t *mb = &pmb->u.mb; 10430 int rc; 10431 10432 spin_lock_irq(shost->host_lock); 10433 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10434 spin_unlock_irq(shost->host_lock); 10435 10436 if (mb->mbxStatus) { 10437 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10438 "0915 Register VPI failed : Status: x%x" 10439 " upd bit: x%x \n", mb->mbxStatus, 10440 mb->un.varRegVpi.upd); 10441 if (phba->sli_rev == LPFC_SLI_REV4 && 10442 mb->un.varRegVpi.upd) 10443 goto mbox_err_exit ; 10444 10445 switch (mb->mbxStatus) { 10446 case 0x11: /* unsupported feature */ 10447 case 0x9603: /* max_vpi exceeded */ 10448 case 0x9602: /* Link event since CLEAR_LA */ 10449 /* giving up on vport registration */ 10450 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10451 spin_lock_irq(shost->host_lock); 10452 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10453 spin_unlock_irq(shost->host_lock); 10454 lpfc_can_disctmo(vport); 10455 break; 10456 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10457 case 0x20: 10458 spin_lock_irq(shost->host_lock); 10459 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10460 spin_unlock_irq(shost->host_lock); 10461 lpfc_init_vpi(phba, pmb, vport->vpi); 10462 pmb->vport = vport; 10463 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10464 rc = lpfc_sli_issue_mbox(phba, pmb, 10465 MBX_NOWAIT); 10466 if (rc == MBX_NOT_FINISHED) { 10467 lpfc_printf_vlog(vport, KERN_ERR, 10468 LOG_TRACE_EVENT, 10469 "2732 Failed to issue INIT_VPI" 10470 " mailbox command\n"); 10471 } else { 10472 lpfc_nlp_put(ndlp); 10473 return; 10474 } 10475 fallthrough; 10476 default: 10477 /* Try to recover from this error */ 10478 if (phba->sli_rev == LPFC_SLI_REV4) 10479 lpfc_sli4_unreg_all_rpis(vport); 10480 lpfc_mbx_unreg_vpi(vport); 10481 spin_lock_irq(shost->host_lock); 10482 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10483 spin_unlock_irq(shost->host_lock); 10484 if (mb->mbxStatus == MBX_NOT_FINISHED) 10485 break; 10486 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10487 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10488 if (phba->sli_rev == LPFC_SLI_REV4) 10489 lpfc_issue_init_vfi(vport); 10490 else 10491 lpfc_initial_flogi(vport); 10492 } else { 10493 lpfc_initial_fdisc(vport); 10494 } 10495 break; 10496 } 10497 } else { 10498 spin_lock_irq(shost->host_lock); 10499 vport->vpi_state |= LPFC_VPI_REGISTERED; 10500 spin_unlock_irq(shost->host_lock); 10501 if (vport == phba->pport) { 10502 if (phba->sli_rev < LPFC_SLI_REV4) 10503 lpfc_issue_fabric_reglogin(vport); 10504 else { 10505 /* 10506 * If the physical port is instantiated using 10507 * FDISC, do not start vport discovery. 10508 */ 10509 if (vport->port_state != LPFC_FDISC) 10510 lpfc_start_fdiscs(phba); 10511 lpfc_do_scr_ns_plogi(phba, vport); 10512 } 10513 } else { 10514 lpfc_do_scr_ns_plogi(phba, vport); 10515 } 10516 } 10517 mbox_err_exit: 10518 /* Now, we decrement the ndlp reference count held for this 10519 * callback function 10520 */ 10521 lpfc_nlp_put(ndlp); 10522 10523 mempool_free(pmb, phba->mbox_mem_pool); 10524 return; 10525 } 10526 10527 /** 10528 * lpfc_register_new_vport - Register a new vport with a HBA 10529 * @phba: pointer to lpfc hba data structure. 10530 * @vport: pointer to a host virtual N_Port data structure. 10531 * @ndlp: pointer to a node-list data structure. 10532 * 10533 * This routine registers the @vport as a new virtual port with a HBA. 10534 * It is done through a registering vpi mailbox command. 10535 **/ 10536 void 10537 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10538 struct lpfc_nodelist *ndlp) 10539 { 10540 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10541 LPFC_MBOXQ_t *mbox; 10542 10543 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10544 if (mbox) { 10545 lpfc_reg_vpi(vport, mbox); 10546 mbox->vport = vport; 10547 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10548 if (!mbox->ctx_ndlp) { 10549 mempool_free(mbox, phba->mbox_mem_pool); 10550 goto mbox_err_exit; 10551 } 10552 10553 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10554 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10555 == MBX_NOT_FINISHED) { 10556 /* mailbox command not success, decrement ndlp 10557 * reference count for this command 10558 */ 10559 lpfc_nlp_put(ndlp); 10560 mempool_free(mbox, phba->mbox_mem_pool); 10561 10562 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10563 "0253 Register VPI: Can't send mbox\n"); 10564 goto mbox_err_exit; 10565 } 10566 } else { 10567 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10568 "0254 Register VPI: no memory\n"); 10569 goto mbox_err_exit; 10570 } 10571 return; 10572 10573 mbox_err_exit: 10574 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10575 spin_lock_irq(shost->host_lock); 10576 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10577 spin_unlock_irq(shost->host_lock); 10578 return; 10579 } 10580 10581 /** 10582 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10583 * @phba: pointer to lpfc hba data structure. 10584 * 10585 * This routine cancels the retry delay timers to all the vports. 10586 **/ 10587 void 10588 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10589 { 10590 struct lpfc_vport **vports; 10591 struct lpfc_nodelist *ndlp; 10592 uint32_t link_state; 10593 int i; 10594 10595 /* Treat this failure as linkdown for all vports */ 10596 link_state = phba->link_state; 10597 lpfc_linkdown(phba); 10598 phba->link_state = link_state; 10599 10600 vports = lpfc_create_vport_work_array(phba); 10601 10602 if (vports) { 10603 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10604 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10605 if (ndlp) 10606 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10607 lpfc_els_flush_cmd(vports[i]); 10608 } 10609 lpfc_destroy_vport_work_array(phba, vports); 10610 } 10611 } 10612 10613 /** 10614 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10615 * @phba: pointer to lpfc hba data structure. 10616 * 10617 * This routine abort all pending discovery commands and 10618 * start a timer to retry FLOGI for the physical port 10619 * discovery. 10620 **/ 10621 void 10622 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 10623 { 10624 struct lpfc_nodelist *ndlp; 10625 10626 /* Cancel the all vports retry delay retry timers */ 10627 lpfc_cancel_all_vport_retry_delay_timer(phba); 10628 10629 /* If fabric require FLOGI, then re-instantiate physical login */ 10630 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 10631 if (!ndlp) 10632 return; 10633 10634 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 10635 spin_lock_irq(&ndlp->lock); 10636 ndlp->nlp_flag |= NLP_DELAY_TMO; 10637 spin_unlock_irq(&ndlp->lock); 10638 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 10639 phba->pport->port_state = LPFC_FLOGI; 10640 return; 10641 } 10642 10643 /** 10644 * lpfc_fabric_login_reqd - Check if FLOGI required. 10645 * @phba: pointer to lpfc hba data structure. 10646 * @cmdiocb: pointer to FDISC command iocb. 10647 * @rspiocb: pointer to FDISC response iocb. 10648 * 10649 * This routine checks if a FLOGI is reguired for FDISC 10650 * to succeed. 10651 **/ 10652 static int 10653 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 10654 struct lpfc_iocbq *cmdiocb, 10655 struct lpfc_iocbq *rspiocb) 10656 { 10657 10658 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 10659 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 10660 return 0; 10661 else 10662 return 1; 10663 } 10664 10665 /** 10666 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 10667 * @phba: pointer to lpfc hba data structure. 10668 * @cmdiocb: pointer to lpfc command iocb data structure. 10669 * @rspiocb: pointer to lpfc response iocb data structure. 10670 * 10671 * This routine is the completion callback function to a Fabric Discover 10672 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 10673 * single threaded, each FDISC completion callback function will reset 10674 * the discovery timer for all vports such that the timers will not get 10675 * unnecessary timeout. The function checks the FDISC IOCB status. If error 10676 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 10677 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 10678 * assigned to the vport has been changed with the completion of the FDISC 10679 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 10680 * are unregistered from the HBA, and then the lpfc_register_new_vport() 10681 * routine is invoked to register new vport with the HBA. Otherwise, the 10682 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 10683 * Server for State Change Request (SCR). 10684 **/ 10685 static void 10686 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10687 struct lpfc_iocbq *rspiocb) 10688 { 10689 struct lpfc_vport *vport = cmdiocb->vport; 10690 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10691 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 10692 struct lpfc_nodelist *np; 10693 struct lpfc_nodelist *next_np; 10694 IOCB_t *irsp = &rspiocb->iocb; 10695 struct lpfc_iocbq *piocb; 10696 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 10697 struct serv_parm *sp; 10698 uint8_t fabric_param_changed; 10699 10700 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10701 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 10702 irsp->ulpStatus, irsp->un.ulpWord[4], 10703 vport->fc_prevDID); 10704 /* Since all FDISCs are being single threaded, we 10705 * must reset the discovery timer for ALL vports 10706 * waiting to send FDISC when one completes. 10707 */ 10708 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 10709 lpfc_set_disctmo(piocb->vport); 10710 } 10711 10712 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10713 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 10714 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 10715 10716 if (irsp->ulpStatus) { 10717 10718 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 10719 lpfc_retry_pport_discovery(phba); 10720 goto out; 10721 } 10722 10723 /* Check for retry */ 10724 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 10725 goto out; 10726 /* FDISC failed */ 10727 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10728 "0126 FDISC failed. (x%x/x%x)\n", 10729 irsp->ulpStatus, irsp->un.ulpWord[4]); 10730 goto fdisc_failed; 10731 } 10732 10733 lpfc_check_nlp_post_devloss(vport, ndlp); 10734 10735 spin_lock_irq(shost->host_lock); 10736 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 10737 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 10738 vport->fc_flag |= FC_FABRIC; 10739 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 10740 vport->fc_flag |= FC_PUBLIC_LOOP; 10741 spin_unlock_irq(shost->host_lock); 10742 10743 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 10744 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 10745 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 10746 if (!prsp) 10747 goto out; 10748 sp = prsp->virt + sizeof(uint32_t); 10749 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 10750 memcpy(&vport->fabric_portname, &sp->portName, 10751 sizeof(struct lpfc_name)); 10752 memcpy(&vport->fabric_nodename, &sp->nodeName, 10753 sizeof(struct lpfc_name)); 10754 if (fabric_param_changed && 10755 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10756 /* If our NportID changed, we need to ensure all 10757 * remaining NPORTs get unreg_login'ed so we can 10758 * issue unreg_vpi. 10759 */ 10760 list_for_each_entry_safe(np, next_np, 10761 &vport->fc_nodes, nlp_listp) { 10762 if ((np->nlp_state != NLP_STE_NPR_NODE) || 10763 !(np->nlp_flag & NLP_NPR_ADISC)) 10764 continue; 10765 spin_lock_irq(&ndlp->lock); 10766 np->nlp_flag &= ~NLP_NPR_ADISC; 10767 spin_unlock_irq(&ndlp->lock); 10768 lpfc_unreg_rpi(vport, np); 10769 } 10770 lpfc_cleanup_pending_mbox(vport); 10771 10772 if (phba->sli_rev == LPFC_SLI_REV4) 10773 lpfc_sli4_unreg_all_rpis(vport); 10774 10775 lpfc_mbx_unreg_vpi(vport); 10776 spin_lock_irq(shost->host_lock); 10777 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10778 if (phba->sli_rev == LPFC_SLI_REV4) 10779 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 10780 else 10781 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 10782 spin_unlock_irq(shost->host_lock); 10783 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 10784 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10785 /* 10786 * Driver needs to re-reg VPI in order for f/w 10787 * to update the MAC address. 10788 */ 10789 lpfc_register_new_vport(phba, vport, ndlp); 10790 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10791 goto out; 10792 } 10793 10794 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 10795 lpfc_issue_init_vpi(vport); 10796 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 10797 lpfc_register_new_vport(phba, vport, ndlp); 10798 else 10799 lpfc_do_scr_ns_plogi(phba, vport); 10800 10801 /* The FDISC completed successfully. Move the fabric ndlp to 10802 * UNMAPPED state and register with the transport. 10803 */ 10804 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10805 goto out; 10806 10807 fdisc_failed: 10808 if (vport->fc_vport && 10809 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 10810 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10811 /* Cancel discovery timer */ 10812 lpfc_can_disctmo(vport); 10813 out: 10814 lpfc_els_free_iocb(phba, cmdiocb); 10815 lpfc_nlp_put(ndlp); 10816 } 10817 10818 /** 10819 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 10820 * @vport: pointer to a virtual N_Port data structure. 10821 * @ndlp: pointer to a node-list data structure. 10822 * @retry: number of retries to the command IOCB. 10823 * 10824 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 10825 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 10826 * routine to issue the IOCB, which makes sure only one outstanding fabric 10827 * IOCB will be sent off HBA at any given time. 10828 * 10829 * Note that the ndlp reference count will be incremented by 1 for holding the 10830 * ndlp and the reference to ndlp will be stored into the context1 field of 10831 * the IOCB for the completion callback function to the FDISC ELS command. 10832 * 10833 * Return code 10834 * 0 - Successfully issued fdisc iocb command 10835 * 1 - Failed to issue fdisc iocb command 10836 **/ 10837 static int 10838 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 10839 uint8_t retry) 10840 { 10841 struct lpfc_hba *phba = vport->phba; 10842 IOCB_t *icmd; 10843 struct lpfc_iocbq *elsiocb; 10844 struct serv_parm *sp; 10845 uint8_t *pcmd; 10846 uint16_t cmdsize; 10847 int did = ndlp->nlp_DID; 10848 int rc; 10849 10850 vport->port_state = LPFC_FDISC; 10851 vport->fc_myDID = 0; 10852 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 10853 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 10854 ELS_CMD_FDISC); 10855 if (!elsiocb) { 10856 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10857 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10858 "0255 Issue FDISC: no IOCB\n"); 10859 return 1; 10860 } 10861 10862 icmd = &elsiocb->iocb; 10863 icmd->un.elsreq64.myID = 0; 10864 icmd->un.elsreq64.fl = 1; 10865 10866 /* 10867 * SLI3 ports require a different context type value than SLI4. 10868 * Catch SLI3 ports here and override the prep. 10869 */ 10870 if (phba->sli_rev == LPFC_SLI_REV3) { 10871 icmd->ulpCt_h = 1; 10872 icmd->ulpCt_l = 0; 10873 } 10874 10875 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 10876 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 10877 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 10878 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 10879 sp = (struct serv_parm *) pcmd; 10880 /* Setup CSPs accordingly for Fabric */ 10881 sp->cmn.e_d_tov = 0; 10882 sp->cmn.w2.r_a_tov = 0; 10883 sp->cmn.virtual_fabric_support = 0; 10884 sp->cls1.classValid = 0; 10885 sp->cls2.seqDelivery = 1; 10886 sp->cls3.seqDelivery = 1; 10887 10888 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 10889 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 10890 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 10891 pcmd += sizeof(uint32_t); /* Port Name */ 10892 memcpy(pcmd, &vport->fc_portname, 8); 10893 pcmd += sizeof(uint32_t); /* Node Name */ 10894 pcmd += sizeof(uint32_t); /* Node Name */ 10895 memcpy(pcmd, &vport->fc_nodename, 8); 10896 sp->cmn.valid_vendor_ver_level = 0; 10897 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 10898 lpfc_set_disctmo(vport); 10899 10900 phba->fc_stat.elsXmitFDISC++; 10901 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 10902 10903 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10904 "Issue FDISC: did:x%x", 10905 did, 0, 0); 10906 10907 elsiocb->context1 = lpfc_nlp_get(ndlp); 10908 if (!elsiocb->context1) { 10909 lpfc_els_free_iocb(phba, elsiocb); 10910 goto err_out; 10911 } 10912 10913 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 10914 if (rc == IOCB_ERROR) { 10915 lpfc_els_free_iocb(phba, elsiocb); 10916 lpfc_nlp_put(ndlp); 10917 goto err_out; 10918 } 10919 10920 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 10921 return 0; 10922 10923 err_out: 10924 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10925 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10926 "0256 Issue FDISC: Cannot send IOCB\n"); 10927 return 1; 10928 } 10929 10930 /** 10931 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 10932 * @phba: pointer to lpfc hba data structure. 10933 * @cmdiocb: pointer to lpfc command iocb data structure. 10934 * @rspiocb: pointer to lpfc response iocb data structure. 10935 * 10936 * This routine is the completion callback function to the issuing of a LOGO 10937 * ELS command off a vport. It frees the command IOCB and then decrement the 10938 * reference count held on ndlp for this completion function, indicating that 10939 * the reference to the ndlp is no long needed. Note that the 10940 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 10941 * callback function and an additional explicit ndlp reference decrementation 10942 * will trigger the actual release of the ndlp. 10943 **/ 10944 static void 10945 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10946 struct lpfc_iocbq *rspiocb) 10947 { 10948 struct lpfc_vport *vport = cmdiocb->vport; 10949 IOCB_t *irsp; 10950 struct lpfc_nodelist *ndlp; 10951 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10952 10953 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 10954 irsp = &rspiocb->iocb; 10955 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10956 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 10957 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 10958 10959 /* NPIV LOGO completes to NPort <nlp_DID> */ 10960 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10961 "2928 NPIV LOGO completes to NPort x%x " 10962 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 10963 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 10964 irsp->ulpTimeout, vport->num_disc_nodes, 10965 kref_read(&ndlp->kref), ndlp->nlp_flag, 10966 ndlp->fc4_xpt_flags); 10967 10968 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 10969 spin_lock_irq(shost->host_lock); 10970 vport->fc_flag &= ~FC_NDISC_ACTIVE; 10971 vport->fc_flag &= ~FC_FABRIC; 10972 spin_unlock_irq(shost->host_lock); 10973 lpfc_can_disctmo(vport); 10974 } 10975 10976 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 10977 /* Wake up lpfc_vport_delete if waiting...*/ 10978 if (ndlp->logo_waitq) 10979 wake_up(ndlp->logo_waitq); 10980 spin_lock_irq(&ndlp->lock); 10981 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 10982 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 10983 spin_unlock_irq(&ndlp->lock); 10984 } 10985 10986 /* Safe to release resources now. */ 10987 lpfc_els_free_iocb(phba, cmdiocb); 10988 lpfc_nlp_put(ndlp); 10989 } 10990 10991 /** 10992 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 10993 * @vport: pointer to a virtual N_Port data structure. 10994 * @ndlp: pointer to a node-list data structure. 10995 * 10996 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 10997 * 10998 * Note that the ndlp reference count will be incremented by 1 for holding the 10999 * ndlp and the reference to ndlp will be stored into the context1 field of 11000 * the IOCB for the completion callback function to the LOGO ELS command. 11001 * 11002 * Return codes 11003 * 0 - Successfully issued logo off the @vport 11004 * 1 - Failed to issue logo off the @vport 11005 **/ 11006 int 11007 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11008 { 11009 int rc = 0; 11010 struct lpfc_hba *phba = vport->phba; 11011 struct lpfc_iocbq *elsiocb; 11012 uint8_t *pcmd; 11013 uint16_t cmdsize; 11014 11015 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11016 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11017 ELS_CMD_LOGO); 11018 if (!elsiocb) 11019 return 1; 11020 11021 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 11022 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11023 pcmd += sizeof(uint32_t); 11024 11025 /* Fill in LOGO payload */ 11026 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11027 pcmd += sizeof(uint32_t); 11028 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11029 11030 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11031 "Issue LOGO npiv did:x%x flg:x%x", 11032 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11033 11034 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 11035 spin_lock_irq(&ndlp->lock); 11036 ndlp->nlp_flag |= NLP_LOGO_SND; 11037 spin_unlock_irq(&ndlp->lock); 11038 elsiocb->context1 = lpfc_nlp_get(ndlp); 11039 if (!elsiocb->context1) { 11040 lpfc_els_free_iocb(phba, elsiocb); 11041 goto err; 11042 } 11043 11044 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11045 if (rc == IOCB_ERROR) { 11046 lpfc_els_free_iocb(phba, elsiocb); 11047 lpfc_nlp_put(ndlp); 11048 goto err; 11049 } 11050 return 0; 11051 11052 err: 11053 spin_lock_irq(&ndlp->lock); 11054 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11055 spin_unlock_irq(&ndlp->lock); 11056 return 1; 11057 } 11058 11059 /** 11060 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11061 * @t: timer context used to obtain the lpfc hba. 11062 * 11063 * This routine is invoked by the fabric iocb block timer after 11064 * timeout. It posts the fabric iocb block timeout event by setting the 11065 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11066 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11067 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11068 * posted event WORKER_FABRIC_BLOCK_TMO. 11069 **/ 11070 void 11071 lpfc_fabric_block_timeout(struct timer_list *t) 11072 { 11073 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11074 unsigned long iflags; 11075 uint32_t tmo_posted; 11076 11077 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11078 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11079 if (!tmo_posted) 11080 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11081 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11082 11083 if (!tmo_posted) 11084 lpfc_worker_wake_up(phba); 11085 return; 11086 } 11087 11088 /** 11089 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11090 * @phba: pointer to lpfc hba data structure. 11091 * 11092 * This routine issues one fabric iocb from the driver internal list to 11093 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11094 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11095 * remove one pending fabric iocb from the driver internal list and invokes 11096 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11097 **/ 11098 static void 11099 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11100 { 11101 struct lpfc_iocbq *iocb; 11102 unsigned long iflags; 11103 int ret; 11104 IOCB_t *cmd; 11105 11106 repeat: 11107 iocb = NULL; 11108 spin_lock_irqsave(&phba->hbalock, iflags); 11109 /* Post any pending iocb to the SLI layer */ 11110 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11111 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11112 list); 11113 if (iocb) 11114 /* Increment fabric iocb count to hold the position */ 11115 atomic_inc(&phba->fabric_iocb_count); 11116 } 11117 spin_unlock_irqrestore(&phba->hbalock, iflags); 11118 if (iocb) { 11119 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11120 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11121 iocb->iocb_flag |= LPFC_IO_FABRIC; 11122 11123 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11124 "Fabric sched1: ste:x%x", 11125 iocb->vport->port_state, 0, 0); 11126 11127 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11128 11129 if (ret == IOCB_ERROR) { 11130 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11131 iocb->fabric_iocb_cmpl = NULL; 11132 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11133 cmd = &iocb->iocb; 11134 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 11135 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 11136 iocb->iocb_cmpl(phba, iocb, iocb); 11137 11138 atomic_dec(&phba->fabric_iocb_count); 11139 goto repeat; 11140 } 11141 } 11142 } 11143 11144 /** 11145 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11146 * @phba: pointer to lpfc hba data structure. 11147 * 11148 * This routine unblocks the issuing fabric iocb command. The function 11149 * will clear the fabric iocb block bit and then invoke the routine 11150 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11151 * from the driver internal fabric iocb list. 11152 **/ 11153 void 11154 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11155 { 11156 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11157 11158 lpfc_resume_fabric_iocbs(phba); 11159 return; 11160 } 11161 11162 /** 11163 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11164 * @phba: pointer to lpfc hba data structure. 11165 * 11166 * This routine blocks the issuing fabric iocb for a specified amount of 11167 * time (currently 100 ms). This is done by set the fabric iocb block bit 11168 * and set up a timeout timer for 100ms. When the block bit is set, no more 11169 * fabric iocb will be issued out of the HBA. 11170 **/ 11171 static void 11172 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11173 { 11174 int blocked; 11175 11176 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11177 /* Start a timer to unblock fabric iocbs after 100ms */ 11178 if (!blocked) 11179 mod_timer(&phba->fabric_block_timer, 11180 jiffies + msecs_to_jiffies(100)); 11181 11182 return; 11183 } 11184 11185 /** 11186 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11187 * @phba: pointer to lpfc hba data structure. 11188 * @cmdiocb: pointer to lpfc command iocb data structure. 11189 * @rspiocb: pointer to lpfc response iocb data structure. 11190 * 11191 * This routine is the callback function that is put to the fabric iocb's 11192 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 11193 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 11194 * function first restores and invokes the original iocb's callback function 11195 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11196 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11197 **/ 11198 static void 11199 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11200 struct lpfc_iocbq *rspiocb) 11201 { 11202 struct ls_rjt stat; 11203 11204 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11205 11206 switch (rspiocb->iocb.ulpStatus) { 11207 case IOSTAT_NPORT_RJT: 11208 case IOSTAT_FABRIC_RJT: 11209 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 11210 lpfc_block_fabric_iocbs(phba); 11211 } 11212 break; 11213 11214 case IOSTAT_NPORT_BSY: 11215 case IOSTAT_FABRIC_BSY: 11216 lpfc_block_fabric_iocbs(phba); 11217 break; 11218 11219 case IOSTAT_LS_RJT: 11220 stat.un.lsRjtError = 11221 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 11222 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11223 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11224 lpfc_block_fabric_iocbs(phba); 11225 break; 11226 } 11227 11228 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11229 11230 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 11231 cmdiocb->fabric_iocb_cmpl = NULL; 11232 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 11233 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 11234 11235 atomic_dec(&phba->fabric_iocb_count); 11236 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11237 /* Post any pending iocbs to HBA */ 11238 lpfc_resume_fabric_iocbs(phba); 11239 } 11240 } 11241 11242 /** 11243 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11244 * @phba: pointer to lpfc hba data structure. 11245 * @iocb: pointer to lpfc command iocb data structure. 11246 * 11247 * This routine is used as the top-level API for issuing a fabric iocb command 11248 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11249 * function makes sure that only one fabric bound iocb will be outstanding at 11250 * any given time. As such, this function will first check to see whether there 11251 * is already an outstanding fabric iocb on the wire. If so, it will put the 11252 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11253 * issued later. Otherwise, it will issue the iocb on the wire and update the 11254 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11255 * 11256 * Note, this implementation has a potential sending out fabric IOCBs out of 11257 * order. The problem is caused by the construction of the "ready" boolen does 11258 * not include the condition that the internal fabric IOCB list is empty. As 11259 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11260 * ahead of the fabric IOCBs in the internal list. 11261 * 11262 * Return code 11263 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11264 * IOCB_ERROR - failed to issue fabric iocb 11265 **/ 11266 static int 11267 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11268 { 11269 unsigned long iflags; 11270 int ready; 11271 int ret; 11272 11273 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11274 11275 spin_lock_irqsave(&phba->hbalock, iflags); 11276 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11277 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11278 11279 if (ready) 11280 /* Increment fabric iocb count to hold the position */ 11281 atomic_inc(&phba->fabric_iocb_count); 11282 spin_unlock_irqrestore(&phba->hbalock, iflags); 11283 if (ready) { 11284 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11285 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11286 iocb->iocb_flag |= LPFC_IO_FABRIC; 11287 11288 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11289 "Fabric sched2: ste:x%x", 11290 iocb->vport->port_state, 0, 0); 11291 11292 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11293 11294 if (ret == IOCB_ERROR) { 11295 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11296 iocb->fabric_iocb_cmpl = NULL; 11297 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11298 atomic_dec(&phba->fabric_iocb_count); 11299 } 11300 } else { 11301 spin_lock_irqsave(&phba->hbalock, iflags); 11302 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11303 spin_unlock_irqrestore(&phba->hbalock, iflags); 11304 ret = IOCB_SUCCESS; 11305 } 11306 return ret; 11307 } 11308 11309 /** 11310 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11311 * @vport: pointer to a virtual N_Port data structure. 11312 * 11313 * This routine aborts all the IOCBs associated with a @vport from the 11314 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11315 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11316 * list, removes each IOCB associated with the @vport off the list, set the 11317 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11318 * associated with the IOCB. 11319 **/ 11320 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11321 { 11322 LIST_HEAD(completions); 11323 struct lpfc_hba *phba = vport->phba; 11324 struct lpfc_iocbq *tmp_iocb, *piocb; 11325 11326 spin_lock_irq(&phba->hbalock); 11327 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11328 list) { 11329 11330 if (piocb->vport != vport) 11331 continue; 11332 11333 list_move_tail(&piocb->list, &completions); 11334 } 11335 spin_unlock_irq(&phba->hbalock); 11336 11337 /* Cancel all the IOCBs from the completions list */ 11338 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11339 IOERR_SLI_ABORTED); 11340 } 11341 11342 /** 11343 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11344 * @ndlp: pointer to a node-list data structure. 11345 * 11346 * This routine aborts all the IOCBs associated with an @ndlp from the 11347 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11348 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11349 * list, removes each IOCB associated with the @ndlp off the list, set the 11350 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11351 * associated with the IOCB. 11352 **/ 11353 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11354 { 11355 LIST_HEAD(completions); 11356 struct lpfc_hba *phba = ndlp->phba; 11357 struct lpfc_iocbq *tmp_iocb, *piocb; 11358 struct lpfc_sli_ring *pring; 11359 11360 pring = lpfc_phba_elsring(phba); 11361 11362 if (unlikely(!pring)) 11363 return; 11364 11365 spin_lock_irq(&phba->hbalock); 11366 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11367 list) { 11368 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11369 11370 list_move_tail(&piocb->list, &completions); 11371 } 11372 } 11373 spin_unlock_irq(&phba->hbalock); 11374 11375 /* Cancel all the IOCBs from the completions list */ 11376 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11377 IOERR_SLI_ABORTED); 11378 } 11379 11380 /** 11381 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11382 * @phba: pointer to lpfc hba data structure. 11383 * 11384 * This routine aborts all the IOCBs currently on the driver internal 11385 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11386 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11387 * list, removes IOCBs off the list, set the status field to 11388 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11389 * the IOCB. 11390 **/ 11391 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11392 { 11393 LIST_HEAD(completions); 11394 11395 spin_lock_irq(&phba->hbalock); 11396 list_splice_init(&phba->fabric_iocb_list, &completions); 11397 spin_unlock_irq(&phba->hbalock); 11398 11399 /* Cancel all the IOCBs from the completions list */ 11400 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11401 IOERR_SLI_ABORTED); 11402 } 11403 11404 /** 11405 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11406 * @vport: pointer to lpfc vport data structure. 11407 * 11408 * This routine is invoked by the vport cleanup for deletions and the cleanup 11409 * for an ndlp on removal. 11410 **/ 11411 void 11412 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11413 { 11414 struct lpfc_hba *phba = vport->phba; 11415 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11416 struct lpfc_nodelist *ndlp = NULL; 11417 unsigned long iflag = 0; 11418 11419 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11420 list_for_each_entry_safe(sglq_entry, sglq_next, 11421 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11422 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11423 lpfc_nlp_put(sglq_entry->ndlp); 11424 ndlp = sglq_entry->ndlp; 11425 sglq_entry->ndlp = NULL; 11426 11427 /* If the xri on the abts_els_sgl list is for the Fport 11428 * node and the vport is unloading, the xri aborted wcqe 11429 * likely isn't coming back. Just release the sgl. 11430 */ 11431 if ((vport->load_flag & FC_UNLOADING) && 11432 ndlp->nlp_DID == Fabric_DID) { 11433 list_del(&sglq_entry->list); 11434 sglq_entry->state = SGL_FREED; 11435 list_add_tail(&sglq_entry->list, 11436 &phba->sli4_hba.lpfc_els_sgl_list); 11437 } 11438 } 11439 } 11440 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11441 return; 11442 } 11443 11444 /** 11445 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11446 * @phba: pointer to lpfc hba data structure. 11447 * @axri: pointer to the els xri abort wcqe structure. 11448 * 11449 * This routine is invoked by the worker thread to process a SLI4 slow-path 11450 * ELS aborted xri. 11451 **/ 11452 void 11453 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11454 struct sli4_wcqe_xri_aborted *axri) 11455 { 11456 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11457 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11458 uint16_t lxri = 0; 11459 11460 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11461 unsigned long iflag = 0; 11462 struct lpfc_nodelist *ndlp; 11463 struct lpfc_sli_ring *pring; 11464 11465 pring = lpfc_phba_elsring(phba); 11466 11467 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11468 list_for_each_entry_safe(sglq_entry, sglq_next, 11469 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11470 if (sglq_entry->sli4_xritag == xri) { 11471 list_del(&sglq_entry->list); 11472 ndlp = sglq_entry->ndlp; 11473 sglq_entry->ndlp = NULL; 11474 list_add_tail(&sglq_entry->list, 11475 &phba->sli4_hba.lpfc_els_sgl_list); 11476 sglq_entry->state = SGL_FREED; 11477 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11478 iflag); 11479 11480 if (ndlp) { 11481 lpfc_set_rrq_active(phba, ndlp, 11482 sglq_entry->sli4_lxritag, 11483 rxid, 1); 11484 lpfc_nlp_put(ndlp); 11485 } 11486 11487 /* Check if TXQ queue needs to be serviced */ 11488 if (pring && !list_empty(&pring->txq)) 11489 lpfc_worker_wake_up(phba); 11490 return; 11491 } 11492 } 11493 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11494 lxri = lpfc_sli4_xri_inrange(phba, xri); 11495 if (lxri == NO_XRI) 11496 return; 11497 11498 spin_lock_irqsave(&phba->hbalock, iflag); 11499 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11500 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11501 spin_unlock_irqrestore(&phba->hbalock, iflag); 11502 return; 11503 } 11504 sglq_entry->state = SGL_XRI_ABORTED; 11505 spin_unlock_irqrestore(&phba->hbalock, iflag); 11506 return; 11507 } 11508 11509 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11510 * @vport: pointer to virtual port object. 11511 * @ndlp: nodelist pointer for the impacted node. 11512 * 11513 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11514 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11515 * the driver is required to send a LOGO to the remote node before it 11516 * attempts to recover its login to the remote node. 11517 */ 11518 void 11519 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11520 struct lpfc_nodelist *ndlp) 11521 { 11522 struct Scsi_Host *shost; 11523 struct lpfc_hba *phba; 11524 unsigned long flags = 0; 11525 11526 shost = lpfc_shost_from_vport(vport); 11527 phba = vport->phba; 11528 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11529 lpfc_printf_log(phba, KERN_INFO, 11530 LOG_SLI, "3093 No rport recovery needed. " 11531 "rport in state 0x%x\n", ndlp->nlp_state); 11532 return; 11533 } 11534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11535 "3094 Start rport recovery on shost id 0x%x " 11536 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11537 "flags 0x%x\n", 11538 shost->host_no, ndlp->nlp_DID, 11539 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11540 ndlp->nlp_flag); 11541 /* 11542 * The rport is not responding. Remove the FCP-2 flag to prevent 11543 * an ADISC in the follow-up recovery code. 11544 */ 11545 spin_lock_irqsave(&ndlp->lock, flags); 11546 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11547 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11548 spin_unlock_irqrestore(&ndlp->lock, flags); 11549 lpfc_unreg_rpi(vport, ndlp); 11550 } 11551 11552 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11553 { 11554 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11555 } 11556 11557 static void 11558 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11559 { 11560 u32 i; 11561 11562 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11563 return; 11564 11565 for (i = min; i <= max; i++) 11566 set_bit(i, vport->vmid_priority_range); 11567 } 11568 11569 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11570 { 11571 set_bit(ctcl_vmid, vport->vmid_priority_range); 11572 } 11573 11574 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11575 { 11576 u32 i; 11577 11578 i = find_first_bit(vport->vmid_priority_range, 11579 LPFC_VMID_MAX_PRIORITY_RANGE); 11580 11581 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11582 return 0; 11583 11584 clear_bit(i, vport->vmid_priority_range); 11585 return i; 11586 } 11587 11588 #define MAX_PRIORITY_DESC 255 11589 11590 static void 11591 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11592 struct lpfc_iocbq *rspiocb) 11593 { 11594 struct lpfc_vport *vport = cmdiocb->vport; 11595 struct priority_range_desc *desc; 11596 struct lpfc_dmabuf *prsp = NULL; 11597 struct lpfc_vmid_priority_range *vmid_range = NULL; 11598 u32 *data; 11599 struct lpfc_dmabuf *dmabuf = cmdiocb->context2; 11600 IOCB_t *irsp = &rspiocb->iocb; 11601 u8 *pcmd, max_desc; 11602 u32 len, i; 11603 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 11604 11605 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11606 if (!prsp) 11607 goto out; 11608 11609 pcmd = prsp->virt; 11610 data = (u32 *)pcmd; 11611 if (data[0] == ELS_CMD_LS_RJT) { 11612 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11613 "3277 QFPA LS_RJT x%x x%x\n", 11614 data[0], data[1]); 11615 goto out; 11616 } 11617 if (irsp->ulpStatus) { 11618 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 11619 "6529 QFPA failed with status x%x x%x\n", 11620 irsp->ulpStatus, irsp->un.ulpWord[4]); 11621 goto out; 11622 } 11623 11624 if (!vport->qfpa_res) { 11625 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 11626 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 11627 GFP_KERNEL); 11628 if (!vport->qfpa_res) 11629 goto out; 11630 } 11631 11632 len = *((u32 *)(pcmd + 4)); 11633 len = be32_to_cpu(len); 11634 memcpy(vport->qfpa_res, pcmd, len + 8); 11635 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 11636 11637 desc = (struct priority_range_desc *)(pcmd + 8); 11638 vmid_range = vport->vmid_priority.vmid_range; 11639 if (!vmid_range) { 11640 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 11641 GFP_KERNEL); 11642 if (!vmid_range) { 11643 kfree(vport->qfpa_res); 11644 goto out; 11645 } 11646 vport->vmid_priority.vmid_range = vmid_range; 11647 } 11648 vport->vmid_priority.num_descriptors = len; 11649 11650 for (i = 0; i < len; i++, vmid_range++, desc++) { 11651 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11652 "6539 vmid values low=%d, high=%d, qos=%d, " 11653 "local ve id=%d\n", desc->lo_range, 11654 desc->hi_range, desc->qos_priority, 11655 desc->local_ve_id); 11656 11657 vmid_range->low = desc->lo_range << 1; 11658 if (desc->local_ve_id == QFPA_ODD_ONLY) 11659 vmid_range->low++; 11660 if (desc->qos_priority) 11661 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 11662 vmid_range->qos = desc->qos_priority; 11663 11664 vmid_range->high = desc->hi_range << 1; 11665 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 11666 (desc->local_ve_id == QFPA_EVEN_ODD)) 11667 vmid_range->high++; 11668 } 11669 lpfc_init_cs_ctl_bitmap(vport); 11670 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 11671 lpfc_vmid_set_cs_ctl_range(vport, 11672 vport->vmid_priority.vmid_range[i].low, 11673 vport->vmid_priority.vmid_range[i].high); 11674 } 11675 11676 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 11677 out: 11678 lpfc_els_free_iocb(phba, cmdiocb); 11679 lpfc_nlp_put(ndlp); 11680 } 11681 11682 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 11683 { 11684 struct lpfc_hba *phba = vport->phba; 11685 struct lpfc_nodelist *ndlp; 11686 struct lpfc_iocbq *elsiocb; 11687 u8 *pcmd; 11688 int ret; 11689 11690 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11691 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11692 return -ENXIO; 11693 11694 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 11695 ndlp->nlp_DID, ELS_CMD_QFPA); 11696 if (!elsiocb) 11697 return -ENOMEM; 11698 11699 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11700 11701 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 11702 pcmd += 4; 11703 11704 elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa; 11705 11706 elsiocb->context1 = lpfc_nlp_get(ndlp); 11707 if (!elsiocb->context1) { 11708 lpfc_els_free_iocb(vport->phba, elsiocb); 11709 return -ENXIO; 11710 } 11711 11712 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 11713 if (ret != IOCB_SUCCESS) { 11714 lpfc_els_free_iocb(phba, elsiocb); 11715 lpfc_nlp_put(ndlp); 11716 return -EIO; 11717 } 11718 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 11719 return 0; 11720 } 11721 11722 int 11723 lpfc_vmid_uvem(struct lpfc_vport *vport, 11724 struct lpfc_vmid *vmid, bool instantiated) 11725 { 11726 struct lpfc_vem_id_desc *vem_id_desc; 11727 struct lpfc_nodelist *ndlp; 11728 struct lpfc_iocbq *elsiocb; 11729 struct instantiated_ve_desc *inst_desc; 11730 struct lpfc_vmid_context *vmid_context; 11731 u8 *pcmd; 11732 u32 *len; 11733 int ret = 0; 11734 11735 ndlp = lpfc_findnode_did(vport, Fabric_DID); 11736 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11737 return -ENXIO; 11738 11739 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 11740 if (!vmid_context) 11741 return -ENOMEM; 11742 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 11743 ndlp, Fabric_DID, ELS_CMD_UVEM); 11744 if (!elsiocb) 11745 goto out; 11746 11747 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11748 "3427 Host vmid %s %d\n", 11749 vmid->host_vmid, instantiated); 11750 vmid_context->vmp = vmid; 11751 vmid_context->nlp = ndlp; 11752 vmid_context->instantiated = instantiated; 11753 elsiocb->vmid_tag.vmid_context = vmid_context; 11754 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11755 11756 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 11757 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 11758 LPFC_COMPRESS_VMID_SIZE); 11759 11760 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 11761 len = (u32 *)(pcmd + 4); 11762 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 11763 11764 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 11765 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 11766 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 11767 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 11768 LPFC_COMPRESS_VMID_SIZE); 11769 11770 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 11771 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11772 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 11773 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 11774 LPFC_COMPRESS_VMID_SIZE); 11775 11776 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 11777 bf_set(lpfc_instantiated_local_id, inst_desc, 11778 vmid->un.cs_ctl_vmid); 11779 if (instantiated) { 11780 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11781 } else { 11782 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 11783 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 11784 } 11785 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 11786 11787 elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem; 11788 11789 elsiocb->context1 = lpfc_nlp_get(ndlp); 11790 if (!elsiocb->context1) { 11791 lpfc_els_free_iocb(vport->phba, elsiocb); 11792 goto out; 11793 } 11794 11795 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 11796 if (ret != IOCB_SUCCESS) { 11797 lpfc_els_free_iocb(vport->phba, elsiocb); 11798 lpfc_nlp_put(ndlp); 11799 goto out; 11800 } 11801 11802 return 0; 11803 out: 11804 kfree(vmid_context); 11805 return -EIO; 11806 } 11807 11808 static void 11809 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 11810 struct lpfc_iocbq *rspiocb) 11811 { 11812 struct lpfc_vport *vport = icmdiocb->vport; 11813 struct lpfc_dmabuf *prsp = NULL; 11814 struct lpfc_vmid_context *vmid_context = 11815 icmdiocb->vmid_tag.vmid_context; 11816 struct lpfc_nodelist *ndlp = icmdiocb->context1; 11817 u8 *pcmd; 11818 u32 *data; 11819 IOCB_t *irsp = &rspiocb->iocb; 11820 struct lpfc_dmabuf *dmabuf = icmdiocb->context2; 11821 struct lpfc_vmid *vmid; 11822 11823 vmid = vmid_context->vmp; 11824 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11825 ndlp = NULL; 11826 11827 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11828 if (!prsp) 11829 goto out; 11830 pcmd = prsp->virt; 11831 data = (u32 *)pcmd; 11832 if (data[0] == ELS_CMD_LS_RJT) { 11833 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11834 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 11835 goto out; 11836 } 11837 if (irsp->ulpStatus) { 11838 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11839 "4533 UVEM error status %x: %x\n", 11840 irsp->ulpStatus, irsp->un.ulpWord[4]); 11841 goto out; 11842 } 11843 spin_lock(&phba->hbalock); 11844 /* Set IN USE flag */ 11845 vport->vmid_flag |= LPFC_VMID_IN_USE; 11846 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 11847 spin_unlock(&phba->hbalock); 11848 11849 if (vmid_context->instantiated) { 11850 write_lock(&vport->vmid_lock); 11851 vmid->flag |= LPFC_VMID_REGISTERED; 11852 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 11853 write_unlock(&vport->vmid_lock); 11854 } 11855 11856 out: 11857 kfree(vmid_context); 11858 lpfc_els_free_iocb(phba, icmdiocb); 11859 lpfc_nlp_put(ndlp); 11860 } 11861