1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_debugfs.h" 46 47 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 52 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 53 struct lpfc_nodelist *ndlp, uint8_t retry); 54 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 55 struct lpfc_iocbq *iocb); 56 57 static int lpfc_max_els_tries = 3; 58 59 /** 60 * lpfc_els_chk_latt - Check host link attention event for a vport 61 * @vport: pointer to a host virtual N_Port data structure. 62 * 63 * This routine checks whether there is an outstanding host link 64 * attention event during the discovery process with the @vport. It is done 65 * by reading the HBA's Host Attention (HA) register. If there is any host 66 * link attention events during this @vport's discovery process, the @vport 67 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 68 * be issued if the link state is not already in host link cleared state, 69 * and a return code shall indicate whether the host link attention event 70 * had happened. 71 * 72 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 73 * state in LPFC_VPORT_READY, the request for checking host link attention 74 * event will be ignored and a return code shall indicate no host link 75 * attention event had happened. 76 * 77 * Return codes 78 * 0 - no host link attention event happened 79 * 1 - host link attention event happened 80 **/ 81 int 82 lpfc_els_chk_latt(struct lpfc_vport *vport) 83 { 84 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 85 struct lpfc_hba *phba = vport->phba; 86 uint32_t ha_copy; 87 88 if (vport->port_state >= LPFC_VPORT_READY || 89 phba->link_state == LPFC_LINK_DOWN || 90 phba->sli_rev > LPFC_SLI_REV3) 91 return 0; 92 93 /* Read the HBA Host Attention Register */ 94 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 95 return 1; 96 97 if (!(ha_copy & HA_LATT)) 98 return 0; 99 100 /* Pending Link Event during Discovery */ 101 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 102 "0237 Pending Link Event during " 103 "Discovery: State x%x\n", 104 phba->pport->port_state); 105 106 /* CLEAR_LA should re-enable link attention events and 107 * we should then immediately take a LATT event. The 108 * LATT processing should call lpfc_linkdown() which 109 * will cleanup any left over in-progress discovery 110 * events. 111 */ 112 spin_lock_irq(shost->host_lock); 113 vport->fc_flag |= FC_ABORT_DISCOVERY; 114 spin_unlock_irq(shost->host_lock); 115 116 if (phba->link_state != LPFC_CLEAR_LA) 117 lpfc_issue_clear_la(phba, vport); 118 119 return 1; 120 } 121 122 /** 123 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 124 * @vport: pointer to a host virtual N_Port data structure. 125 * @expectRsp: flag indicating whether response is expected. 126 * @cmdSize: size of the ELS command. 127 * @retry: number of retries to the command IOCB when it fails. 128 * @ndlp: pointer to a node-list data structure. 129 * @did: destination identifier. 130 * @elscmd: the ELS command code. 131 * 132 * This routine is used for allocating a lpfc-IOCB data structure from 133 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 134 * passed into the routine for discovery state machine to issue an Extended 135 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 136 * and preparation routine that is used by all the discovery state machine 137 * routines and the ELS command-specific fields will be later set up by 138 * the individual discovery machine routines after calling this routine 139 * allocating and preparing a generic IOCB data structure. It fills in the 140 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 141 * payload and response payload (if expected). The reference count on the 142 * ndlp is incremented by 1 and the reference to the ndlp is put into 143 * context1 of the IOCB data structure for this IOCB to hold the ndlp 144 * reference for the command's callback function to access later. 145 * 146 * Return code 147 * Pointer to the newly allocated/prepared els iocb data structure 148 * NULL - when els iocb data structure allocation/preparation failed 149 **/ 150 struct lpfc_iocbq * 151 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 152 uint16_t cmdSize, uint8_t retry, 153 struct lpfc_nodelist *ndlp, uint32_t did, 154 uint32_t elscmd) 155 { 156 struct lpfc_hba *phba = vport->phba; 157 struct lpfc_iocbq *elsiocb; 158 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 159 struct ulp_bde64 *bpl; 160 IOCB_t *icmd; 161 162 163 if (!lpfc_is_link_up(phba)) 164 return NULL; 165 166 /* Allocate buffer for command iocb */ 167 elsiocb = lpfc_sli_get_iocbq(phba); 168 169 if (elsiocb == NULL) 170 return NULL; 171 172 /* 173 * If this command is for fabric controller and HBA running 174 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 175 */ 176 if ((did == Fabric_DID) && 177 (phba->hba_flag & HBA_FIP_SUPPORT) && 178 ((elscmd == ELS_CMD_FLOGI) || 179 (elscmd == ELS_CMD_FDISC) || 180 (elscmd == ELS_CMD_LOGO))) 181 switch (elscmd) { 182 case ELS_CMD_FLOGI: 183 elsiocb->iocb_flag |= 184 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 185 & LPFC_FIP_ELS_ID_MASK); 186 break; 187 case ELS_CMD_FDISC: 188 elsiocb->iocb_flag |= 189 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 190 & LPFC_FIP_ELS_ID_MASK); 191 break; 192 case ELS_CMD_LOGO: 193 elsiocb->iocb_flag |= 194 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 } 198 else 199 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 200 201 icmd = &elsiocb->iocb; 202 203 /* fill in BDEs for command */ 204 /* Allocate buffer for command payload */ 205 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 206 if (pcmd) 207 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 208 if (!pcmd || !pcmd->virt) 209 goto els_iocb_free_pcmb_exit; 210 211 INIT_LIST_HEAD(&pcmd->list); 212 213 /* Allocate buffer for response payload */ 214 if (expectRsp) { 215 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 216 if (prsp) 217 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 218 &prsp->phys); 219 if (!prsp || !prsp->virt) 220 goto els_iocb_free_prsp_exit; 221 INIT_LIST_HEAD(&prsp->list); 222 } else 223 prsp = NULL; 224 225 /* Allocate buffer for Buffer ptr list */ 226 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 227 if (pbuflist) 228 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 229 &pbuflist->phys); 230 if (!pbuflist || !pbuflist->virt) 231 goto els_iocb_free_pbuf_exit; 232 233 INIT_LIST_HEAD(&pbuflist->list); 234 235 if (expectRsp) { 236 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 237 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 238 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 239 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 240 241 icmd->un.elsreq64.remoteID = did; /* DID */ 242 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 243 if (elscmd == ELS_CMD_FLOGI) 244 icmd->ulpTimeout = FF_DEF_RATOV * 2; 245 else 246 icmd->ulpTimeout = phba->fc_ratov * 2; 247 } else { 248 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 249 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 250 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 251 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 252 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 253 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 254 } 255 icmd->ulpBdeCount = 1; 256 icmd->ulpLe = 1; 257 icmd->ulpClass = CLASS3; 258 259 /* 260 * If we have NPIV enabled, we want to send ELS traffic by VPI. 261 * For SLI4, since the driver controls VPIs we also want to include 262 * all ELS pt2pt protocol traffic as well. 263 */ 264 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 265 ((phba->sli_rev == LPFC_SLI_REV4) && 266 (vport->fc_flag & FC_PT2PT))) { 267 268 if (expectRsp) { 269 icmd->un.elsreq64.myID = vport->fc_myDID; 270 271 /* For ELS_REQUEST64_CR, use the VPI by default */ 272 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 273 } 274 275 icmd->ulpCt_h = 0; 276 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 277 if (elscmd == ELS_CMD_ECHO) 278 icmd->ulpCt_l = 0; /* context = invalid RPI */ 279 else 280 icmd->ulpCt_l = 1; /* context = VPI */ 281 } 282 283 bpl = (struct ulp_bde64 *) pbuflist->virt; 284 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 285 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 286 bpl->tus.f.bdeSize = cmdSize; 287 bpl->tus.f.bdeFlags = 0; 288 bpl->tus.w = le32_to_cpu(bpl->tus.w); 289 290 if (expectRsp) { 291 bpl++; 292 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 293 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 294 bpl->tus.f.bdeSize = FCELSSIZE; 295 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 296 bpl->tus.w = le32_to_cpu(bpl->tus.w); 297 } 298 299 /* prevent preparing iocb with NULL ndlp reference */ 300 elsiocb->context1 = lpfc_nlp_get(ndlp); 301 if (!elsiocb->context1) 302 goto els_iocb_free_pbuf_exit; 303 elsiocb->context2 = pcmd; 304 elsiocb->context3 = pbuflist; 305 elsiocb->retry = retry; 306 elsiocb->vport = vport; 307 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 308 309 if (prsp) { 310 list_add(&prsp->list, &pcmd->list); 311 } 312 if (expectRsp) { 313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 315 "0116 Xmit ELS command x%x to remote " 316 "NPORT x%x I/O tag: x%x, port state:x%x" 317 " fc_flag:x%x\n", 318 elscmd, did, elsiocb->iotag, 319 vport->port_state, 320 vport->fc_flag); 321 } else { 322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 324 "0117 Xmit ELS response x%x to remote " 325 "NPORT x%x I/O tag: x%x, size: x%x " 326 "port_state x%x fc_flag x%x\n", 327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 328 cmdSize, vport->port_state, 329 vport->fc_flag); 330 } 331 return elsiocb; 332 333 els_iocb_free_pbuf_exit: 334 if (expectRsp) 335 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 336 kfree(pbuflist); 337 338 els_iocb_free_prsp_exit: 339 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 340 kfree(prsp); 341 342 els_iocb_free_pcmb_exit: 343 kfree(pcmd); 344 lpfc_sli_release_iocbq(phba, elsiocb); 345 return NULL; 346 } 347 348 /** 349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 350 * @vport: pointer to a host virtual N_Port data structure. 351 * 352 * This routine issues a fabric registration login for a @vport. An 353 * active ndlp node with Fabric_DID must already exist for this @vport. 354 * The routine invokes two mailbox commands to carry out fabric registration 355 * login through the HBA firmware: the first mailbox command requests the 356 * HBA to perform link configuration for the @vport; and the second mailbox 357 * command requests the HBA to perform the actual fabric registration login 358 * with the @vport. 359 * 360 * Return code 361 * 0 - successfully issued fabric registration login for @vport 362 * -ENXIO -- failed to issue fabric registration login for @vport 363 **/ 364 int 365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 366 { 367 struct lpfc_hba *phba = vport->phba; 368 LPFC_MBOXQ_t *mbox; 369 struct lpfc_dmabuf *mp; 370 struct lpfc_nodelist *ndlp; 371 struct serv_parm *sp; 372 int rc; 373 int err = 0; 374 375 sp = &phba->fc_fabparam; 376 ndlp = lpfc_findnode_did(vport, Fabric_DID); 377 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 378 err = 1; 379 goto fail; 380 } 381 382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 383 if (!mbox) { 384 err = 2; 385 goto fail; 386 } 387 388 vport->port_state = LPFC_FABRIC_CFG_LINK; 389 lpfc_config_link(phba, mbox); 390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 mbox->vport = vport; 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 3; 396 goto fail_free_mbox; 397 } 398 399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 400 if (!mbox) { 401 err = 4; 402 goto fail; 403 } 404 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 405 ndlp->nlp_rpi); 406 if (rc) { 407 err = 5; 408 goto fail_free_mbox; 409 } 410 411 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 412 mbox->vport = vport; 413 /* increment the reference count on ndlp to hold reference 414 * for the callback routine. 415 */ 416 mbox->context2 = lpfc_nlp_get(ndlp); 417 418 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 419 if (rc == MBX_NOT_FINISHED) { 420 err = 6; 421 goto fail_issue_reg_login; 422 } 423 424 return 0; 425 426 fail_issue_reg_login: 427 /* decrement the reference count on ndlp just incremented 428 * for the failed mbox command. 429 */ 430 lpfc_nlp_put(ndlp); 431 mp = (struct lpfc_dmabuf *) mbox->context1; 432 lpfc_mbuf_free(phba, mp->virt, mp->phys); 433 kfree(mp); 434 fail_free_mbox: 435 mempool_free(mbox, phba->mbox_mem_pool); 436 437 fail: 438 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 439 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 440 "0249 Cannot issue Register Fabric login: Err %d\n", err); 441 return -ENXIO; 442 } 443 444 /** 445 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 446 * @vport: pointer to a host virtual N_Port data structure. 447 * 448 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 449 * the @vport. This mailbox command is necessary for SLI4 port only. 450 * 451 * Return code 452 * 0 - successfully issued REG_VFI for @vport 453 * A failure code otherwise. 454 **/ 455 int 456 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 457 { 458 struct lpfc_hba *phba = vport->phba; 459 LPFC_MBOXQ_t *mboxq = NULL; 460 struct lpfc_nodelist *ndlp; 461 struct lpfc_dmabuf *dmabuf = NULL; 462 int rc = 0; 463 464 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 465 if ((phba->sli_rev == LPFC_SLI_REV4) && 466 !(phba->link_flag & LS_LOOPBACK_MODE) && 467 !(vport->fc_flag & FC_PT2PT)) { 468 ndlp = lpfc_findnode_did(vport, Fabric_DID); 469 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 470 rc = -ENODEV; 471 goto fail; 472 } 473 } 474 475 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 476 if (!mboxq) { 477 rc = -ENOMEM; 478 goto fail; 479 } 480 481 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 482 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 483 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 484 if (!dmabuf) { 485 rc = -ENOMEM; 486 goto fail; 487 } 488 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 489 if (!dmabuf->virt) { 490 rc = -ENOMEM; 491 goto fail; 492 } 493 memcpy(dmabuf->virt, &phba->fc_fabparam, 494 sizeof(struct serv_parm)); 495 } 496 497 vport->port_state = LPFC_FABRIC_CFG_LINK; 498 if (dmabuf) 499 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 500 else 501 lpfc_reg_vfi(mboxq, vport, 0); 502 503 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 504 mboxq->vport = vport; 505 mboxq->context1 = dmabuf; 506 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 507 if (rc == MBX_NOT_FINISHED) { 508 rc = -ENXIO; 509 goto fail; 510 } 511 return 0; 512 513 fail: 514 if (mboxq) 515 mempool_free(mboxq, phba->mbox_mem_pool); 516 if (dmabuf) { 517 if (dmabuf->virt) 518 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 519 kfree(dmabuf); 520 } 521 522 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 523 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 524 "0289 Issue Register VFI failed: Err %d\n", rc); 525 return rc; 526 } 527 528 /** 529 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 530 * @vport: pointer to a host virtual N_Port data structure. 531 * 532 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 533 * the @vport. This mailbox command is necessary for SLI4 port only. 534 * 535 * Return code 536 * 0 - successfully issued REG_VFI for @vport 537 * A failure code otherwise. 538 **/ 539 int 540 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 541 { 542 struct lpfc_hba *phba = vport->phba; 543 struct Scsi_Host *shost; 544 LPFC_MBOXQ_t *mboxq; 545 int rc; 546 547 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 548 if (!mboxq) { 549 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 550 "2556 UNREG_VFI mbox allocation failed" 551 "HBA state x%x\n", phba->pport->port_state); 552 return -ENOMEM; 553 } 554 555 lpfc_unreg_vfi(mboxq, vport); 556 mboxq->vport = vport; 557 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 558 559 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 560 if (rc == MBX_NOT_FINISHED) { 561 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 562 "2557 UNREG_VFI issue mbox failed rc x%x " 563 "HBA state x%x\n", 564 rc, phba->pport->port_state); 565 mempool_free(mboxq, phba->mbox_mem_pool); 566 return -EIO; 567 } 568 569 shost = lpfc_shost_from_vport(vport); 570 spin_lock_irq(shost->host_lock); 571 vport->fc_flag &= ~FC_VFI_REGISTERED; 572 spin_unlock_irq(shost->host_lock); 573 return 0; 574 } 575 576 /** 577 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 578 * @vport: pointer to a host virtual N_Port data structure. 579 * @sp: pointer to service parameter data structure. 580 * 581 * This routine is called from FLOGI/FDISC completion handler functions. 582 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 583 * node nodename is changed in the completion service parameter else return 584 * 0. This function also set flag in the vport data structure to delay 585 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 586 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 587 * node nodename is changed in the completion service parameter. 588 * 589 * Return code 590 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 591 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 592 * 593 **/ 594 static uint8_t 595 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 596 struct serv_parm *sp) 597 { 598 struct lpfc_hba *phba = vport->phba; 599 uint8_t fabric_param_changed = 0; 600 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 601 602 if ((vport->fc_prevDID != vport->fc_myDID) || 603 memcmp(&vport->fabric_portname, &sp->portName, 604 sizeof(struct lpfc_name)) || 605 memcmp(&vport->fabric_nodename, &sp->nodeName, 606 sizeof(struct lpfc_name)) || 607 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 608 fabric_param_changed = 1; 609 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 610 } 611 /* 612 * Word 1 Bit 31 in common service parameter is overloaded. 613 * Word 1 Bit 31 in FLOGI request is multiple NPort request 614 * Word 1 Bit 31 in FLOGI response is clean address bit 615 * 616 * If fabric parameter is changed and clean address bit is 617 * cleared delay nport discovery if 618 * - vport->fc_prevDID != 0 (not initial discovery) OR 619 * - lpfc_delay_discovery module parameter is set. 620 */ 621 if (fabric_param_changed && !sp->cmn.clean_address_bit && 622 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 623 spin_lock_irq(shost->host_lock); 624 vport->fc_flag |= FC_DISC_DELAYED; 625 spin_unlock_irq(shost->host_lock); 626 } 627 628 return fabric_param_changed; 629 } 630 631 632 /** 633 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 634 * @vport: pointer to a host virtual N_Port data structure. 635 * @ndlp: pointer to a node-list data structure. 636 * @sp: pointer to service parameter data structure. 637 * @irsp: pointer to the IOCB within the lpfc response IOCB. 638 * 639 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 640 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 641 * port in a fabric topology. It properly sets up the parameters to the @ndlp 642 * from the IOCB response. It also check the newly assigned N_Port ID to the 643 * @vport against the previously assigned N_Port ID. If it is different from 644 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 645 * is invoked on all the remaining nodes with the @vport to unregister the 646 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 647 * is invoked to register login to the fabric. 648 * 649 * Return code 650 * 0 - Success (currently, always return 0) 651 **/ 652 static int 653 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 654 struct serv_parm *sp, IOCB_t *irsp) 655 { 656 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 657 struct lpfc_hba *phba = vport->phba; 658 struct lpfc_nodelist *np; 659 struct lpfc_nodelist *next_np; 660 uint8_t fabric_param_changed; 661 662 spin_lock_irq(shost->host_lock); 663 vport->fc_flag |= FC_FABRIC; 664 spin_unlock_irq(shost->host_lock); 665 666 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 667 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 668 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 669 670 phba->fc_edtovResol = sp->cmn.edtovResolution; 671 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 672 673 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 674 spin_lock_irq(shost->host_lock); 675 vport->fc_flag |= FC_PUBLIC_LOOP; 676 spin_unlock_irq(shost->host_lock); 677 } 678 679 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 680 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 681 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 682 ndlp->nlp_class_sup = 0; 683 if (sp->cls1.classValid) 684 ndlp->nlp_class_sup |= FC_COS_CLASS1; 685 if (sp->cls2.classValid) 686 ndlp->nlp_class_sup |= FC_COS_CLASS2; 687 if (sp->cls3.classValid) 688 ndlp->nlp_class_sup |= FC_COS_CLASS3; 689 if (sp->cls4.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS4; 691 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 692 sp->cmn.bbRcvSizeLsb; 693 694 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 695 if (fabric_param_changed) { 696 /* Reset FDMI attribute masks based on config parameter */ 697 if (phba->cfg_enable_SmartSAN || 698 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 699 /* Setup appropriate attribute masks */ 700 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 701 if (phba->cfg_enable_SmartSAN) 702 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 703 else 704 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 705 } else { 706 vport->fdmi_hba_mask = 0; 707 vport->fdmi_port_mask = 0; 708 } 709 710 } 711 memcpy(&vport->fabric_portname, &sp->portName, 712 sizeof(struct lpfc_name)); 713 memcpy(&vport->fabric_nodename, &sp->nodeName, 714 sizeof(struct lpfc_name)); 715 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 716 717 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 718 if (sp->cmn.response_multiple_NPort) { 719 lpfc_printf_vlog(vport, KERN_WARNING, 720 LOG_ELS | LOG_VPORT, 721 "1816 FLOGI NPIV supported, " 722 "response data 0x%x\n", 723 sp->cmn.response_multiple_NPort); 724 spin_lock_irq(&phba->hbalock); 725 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 726 spin_unlock_irq(&phba->hbalock); 727 } else { 728 /* Because we asked f/w for NPIV it still expects us 729 to call reg_vnpid atleast for the physcial host */ 730 lpfc_printf_vlog(vport, KERN_WARNING, 731 LOG_ELS | LOG_VPORT, 732 "1817 Fabric does not support NPIV " 733 "- configuring single port mode.\n"); 734 spin_lock_irq(&phba->hbalock); 735 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 736 spin_unlock_irq(&phba->hbalock); 737 } 738 } 739 740 /* 741 * For FC we need to do some special processing because of the SLI 742 * Port's default settings of the Common Service Parameters. 743 */ 744 if ((phba->sli_rev == LPFC_SLI_REV4) && 745 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 746 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 747 if (fabric_param_changed) 748 lpfc_unregister_fcf_prep(phba); 749 750 /* This should just update the VFI CSPs*/ 751 if (vport->fc_flag & FC_VFI_REGISTERED) 752 lpfc_issue_reg_vfi(vport); 753 } 754 755 if (fabric_param_changed && 756 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 757 758 /* If our NportID changed, we need to ensure all 759 * remaining NPORTs get unreg_login'ed. 760 */ 761 list_for_each_entry_safe(np, next_np, 762 &vport->fc_nodes, nlp_listp) { 763 if (!NLP_CHK_NODE_ACT(np)) 764 continue; 765 if ((np->nlp_state != NLP_STE_NPR_NODE) || 766 !(np->nlp_flag & NLP_NPR_ADISC)) 767 continue; 768 spin_lock_irq(shost->host_lock); 769 np->nlp_flag &= ~NLP_NPR_ADISC; 770 spin_unlock_irq(shost->host_lock); 771 lpfc_unreg_rpi(vport, np); 772 } 773 lpfc_cleanup_pending_mbox(vport); 774 775 if (phba->sli_rev == LPFC_SLI_REV4) { 776 lpfc_sli4_unreg_all_rpis(vport); 777 lpfc_mbx_unreg_vpi(vport); 778 spin_lock_irq(shost->host_lock); 779 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 780 spin_unlock_irq(shost->host_lock); 781 } 782 783 /* 784 * For SLI3 and SLI4, the VPI needs to be reregistered in 785 * response to this fabric parameter change event. 786 */ 787 spin_lock_irq(shost->host_lock); 788 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 789 spin_unlock_irq(shost->host_lock); 790 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 791 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 792 /* 793 * Driver needs to re-reg VPI in order for f/w 794 * to update the MAC address. 795 */ 796 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 797 lpfc_register_new_vport(phba, vport, ndlp); 798 return 0; 799 } 800 801 if (phba->sli_rev < LPFC_SLI_REV4) { 802 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 803 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 804 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 805 lpfc_register_new_vport(phba, vport, ndlp); 806 else 807 lpfc_issue_fabric_reglogin(vport); 808 } else { 809 ndlp->nlp_type |= NLP_FABRIC; 810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 811 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 812 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 813 lpfc_start_fdiscs(phba); 814 lpfc_do_scr_ns_plogi(phba, vport); 815 } else if (vport->fc_flag & FC_VFI_REGISTERED) 816 lpfc_issue_init_vpi(vport); 817 else { 818 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 819 "3135 Need register VFI: (x%x/%x)\n", 820 vport->fc_prevDID, vport->fc_myDID); 821 lpfc_issue_reg_vfi(vport); 822 } 823 } 824 return 0; 825 } 826 827 /** 828 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 829 * @vport: pointer to a host virtual N_Port data structure. 830 * @ndlp: pointer to a node-list data structure. 831 * @sp: pointer to service parameter data structure. 832 * 833 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 834 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 835 * in a point-to-point topology. First, the @vport's N_Port Name is compared 836 * with the received N_Port Name: if the @vport's N_Port Name is greater than 837 * the received N_Port Name lexicographically, this node shall assign local 838 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 839 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 840 * this node shall just wait for the remote node to issue PLOGI and assign 841 * N_Port IDs. 842 * 843 * Return code 844 * 0 - Success 845 * -ENXIO - Fail 846 **/ 847 static int 848 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 849 struct serv_parm *sp) 850 { 851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 852 struct lpfc_hba *phba = vport->phba; 853 LPFC_MBOXQ_t *mbox; 854 int rc; 855 856 spin_lock_irq(shost->host_lock); 857 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 858 vport->fc_flag |= FC_PT2PT; 859 spin_unlock_irq(shost->host_lock); 860 861 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 862 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 863 lpfc_unregister_fcf_prep(phba); 864 865 spin_lock_irq(shost->host_lock); 866 vport->fc_flag &= ~FC_VFI_REGISTERED; 867 spin_unlock_irq(shost->host_lock); 868 phba->fc_topology_changed = 0; 869 } 870 871 rc = memcmp(&vport->fc_portname, &sp->portName, 872 sizeof(vport->fc_portname)); 873 874 if (rc >= 0) { 875 /* This side will initiate the PLOGI */ 876 spin_lock_irq(shost->host_lock); 877 vport->fc_flag |= FC_PT2PT_PLOGI; 878 spin_unlock_irq(shost->host_lock); 879 880 /* 881 * N_Port ID cannot be 0, set our Id to LocalID 882 * the other side will be RemoteID. 883 */ 884 885 /* not equal */ 886 if (rc) 887 vport->fc_myDID = PT2PT_LocalID; 888 889 /* Decrement ndlp reference count indicating that ndlp can be 890 * safely released when other references to it are done. 891 */ 892 lpfc_nlp_put(ndlp); 893 894 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 895 if (!ndlp) { 896 /* 897 * Cannot find existing Fabric ndlp, so allocate a 898 * new one 899 */ 900 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 901 if (!ndlp) 902 goto fail; 903 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 904 ndlp = lpfc_enable_node(vport, ndlp, 905 NLP_STE_UNUSED_NODE); 906 if(!ndlp) 907 goto fail; 908 } 909 910 memcpy(&ndlp->nlp_portname, &sp->portName, 911 sizeof(struct lpfc_name)); 912 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 913 sizeof(struct lpfc_name)); 914 /* Set state will put ndlp onto node list if not already done */ 915 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 916 spin_lock_irq(shost->host_lock); 917 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 918 spin_unlock_irq(shost->host_lock); 919 } else 920 /* This side will wait for the PLOGI, decrement ndlp reference 921 * count indicating that ndlp can be released when other 922 * references to it are done. 923 */ 924 lpfc_nlp_put(ndlp); 925 926 /* If we are pt2pt with another NPort, force NPIV off! */ 927 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 928 929 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 930 if (!mbox) 931 goto fail; 932 933 lpfc_config_link(phba, mbox); 934 935 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 936 mbox->vport = vport; 937 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 938 if (rc == MBX_NOT_FINISHED) { 939 mempool_free(mbox, phba->mbox_mem_pool); 940 goto fail; 941 } 942 943 return 0; 944 fail: 945 return -ENXIO; 946 } 947 948 /** 949 * lpfc_cmpl_els_flogi - Completion callback function for flogi 950 * @phba: pointer to lpfc hba data structure. 951 * @cmdiocb: pointer to lpfc command iocb data structure. 952 * @rspiocb: pointer to lpfc response iocb data structure. 953 * 954 * This routine is the top-level completion callback function for issuing 955 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 956 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 957 * retry has been made (either immediately or delayed with lpfc_els_retry() 958 * returning 1), the command IOCB will be released and function returned. 959 * If the retry attempt has been given up (possibly reach the maximum 960 * number of retries), one additional decrement of ndlp reference shall be 961 * invoked before going out after releasing the command IOCB. This will 962 * actually release the remote node (Note, lpfc_els_free_iocb() will also 963 * invoke one decrement of ndlp reference count). If no error reported in 964 * the IOCB status, the command Port ID field is used to determine whether 965 * this is a point-to-point topology or a fabric topology: if the Port ID 966 * field is assigned, it is a fabric topology; otherwise, it is a 967 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 968 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 969 * specific topology completion conditions. 970 **/ 971 static void 972 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 973 struct lpfc_iocbq *rspiocb) 974 { 975 struct lpfc_vport *vport = cmdiocb->vport; 976 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 977 IOCB_t *irsp = &rspiocb->iocb; 978 struct lpfc_nodelist *ndlp = cmdiocb->context1; 979 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 980 struct serv_parm *sp; 981 uint16_t fcf_index; 982 int rc; 983 984 /* Check to see if link went down during discovery */ 985 if (lpfc_els_chk_latt(vport)) { 986 /* One additional decrement on node reference count to 987 * trigger the release of the node 988 */ 989 lpfc_nlp_put(ndlp); 990 goto out; 991 } 992 993 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 994 "FLOGI cmpl: status:x%x/x%x state:x%x", 995 irsp->ulpStatus, irsp->un.ulpWord[4], 996 vport->port_state); 997 998 if (irsp->ulpStatus) { 999 /* 1000 * In case of FIP mode, perform roundrobin FCF failover 1001 * due to new FCF discovery 1002 */ 1003 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1004 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1005 if (phba->link_state < LPFC_LINK_UP) 1006 goto stop_rr_fcf_flogi; 1007 if ((phba->fcoe_cvl_eventtag_attn == 1008 phba->fcoe_cvl_eventtag) && 1009 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1010 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1011 IOERR_SLI_ABORTED)) 1012 goto stop_rr_fcf_flogi; 1013 else 1014 phba->fcoe_cvl_eventtag_attn = 1015 phba->fcoe_cvl_eventtag; 1016 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1017 "2611 FLOGI failed on FCF (x%x), " 1018 "status:x%x/x%x, tmo:x%x, perform " 1019 "roundrobin FCF failover\n", 1020 phba->fcf.current_rec.fcf_indx, 1021 irsp->ulpStatus, irsp->un.ulpWord[4], 1022 irsp->ulpTimeout); 1023 lpfc_sli4_set_fcf_flogi_fail(phba, 1024 phba->fcf.current_rec.fcf_indx); 1025 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1026 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1027 if (rc) 1028 goto out; 1029 } 1030 1031 stop_rr_fcf_flogi: 1032 /* FLOGI failure */ 1033 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1034 "2858 FLOGI failure Status:x%x/x%x TMO:x%x " 1035 "Data x%x x%x\n", 1036 irsp->ulpStatus, irsp->un.ulpWord[4], 1037 irsp->ulpTimeout, phba->hba_flag, 1038 phba->fcf.fcf_flag); 1039 1040 /* Check for retry */ 1041 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1042 goto out; 1043 1044 /* FLOGI failure */ 1045 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1046 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 1047 irsp->ulpStatus, irsp->un.ulpWord[4], 1048 irsp->ulpTimeout); 1049 1050 1051 /* If this is not a loop open failure, bail out */ 1052 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1054 IOERR_LOOP_OPEN_FAILURE))) 1055 goto flogifail; 1056 1057 /* FLOGI failed, so there is no fabric */ 1058 spin_lock_irq(shost->host_lock); 1059 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1060 spin_unlock_irq(shost->host_lock); 1061 1062 /* If private loop, then allow max outstanding els to be 1063 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1064 * alpa map would take too long otherwise. 1065 */ 1066 if (phba->alpa_map[0] == 0) 1067 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1068 if ((phba->sli_rev == LPFC_SLI_REV4) && 1069 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1070 (vport->fc_prevDID != vport->fc_myDID) || 1071 phba->fc_topology_changed)) { 1072 if (vport->fc_flag & FC_VFI_REGISTERED) { 1073 if (phba->fc_topology_changed) { 1074 lpfc_unregister_fcf_prep(phba); 1075 spin_lock_irq(shost->host_lock); 1076 vport->fc_flag &= ~FC_VFI_REGISTERED; 1077 spin_unlock_irq(shost->host_lock); 1078 phba->fc_topology_changed = 0; 1079 } else { 1080 lpfc_sli4_unreg_all_rpis(vport); 1081 } 1082 } 1083 1084 /* Do not register VFI if the driver aborted FLOGI */ 1085 if (!lpfc_error_lost_link(irsp)) 1086 lpfc_issue_reg_vfi(vport); 1087 lpfc_nlp_put(ndlp); 1088 goto out; 1089 } 1090 goto flogifail; 1091 } 1092 spin_lock_irq(shost->host_lock); 1093 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1094 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1095 spin_unlock_irq(shost->host_lock); 1096 1097 /* 1098 * The FLogI succeeded. Sync the data for the CPU before 1099 * accessing it. 1100 */ 1101 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1102 if (!prsp) 1103 goto out; 1104 sp = prsp->virt + sizeof(uint32_t); 1105 1106 /* FLOGI completes successfully */ 1107 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1108 "0101 FLOGI completes successfully, I/O tag:x%x, " 1109 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, 1110 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1111 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1112 vport->port_state, vport->fc_flag); 1113 1114 if (vport->port_state == LPFC_FLOGI) { 1115 /* 1116 * If Common Service Parameters indicate Nport 1117 * we are point to point, if Fport we are Fabric. 1118 */ 1119 if (sp->cmn.fPort) 1120 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1121 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1122 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1123 else { 1124 lpfc_printf_vlog(vport, KERN_ERR, 1125 LOG_FIP | LOG_ELS, 1126 "2831 FLOGI response with cleared Fabric " 1127 "bit fcf_index 0x%x " 1128 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1129 "Fabric Name " 1130 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1131 phba->fcf.current_rec.fcf_indx, 1132 phba->fcf.current_rec.switch_name[0], 1133 phba->fcf.current_rec.switch_name[1], 1134 phba->fcf.current_rec.switch_name[2], 1135 phba->fcf.current_rec.switch_name[3], 1136 phba->fcf.current_rec.switch_name[4], 1137 phba->fcf.current_rec.switch_name[5], 1138 phba->fcf.current_rec.switch_name[6], 1139 phba->fcf.current_rec.switch_name[7], 1140 phba->fcf.current_rec.fabric_name[0], 1141 phba->fcf.current_rec.fabric_name[1], 1142 phba->fcf.current_rec.fabric_name[2], 1143 phba->fcf.current_rec.fabric_name[3], 1144 phba->fcf.current_rec.fabric_name[4], 1145 phba->fcf.current_rec.fabric_name[5], 1146 phba->fcf.current_rec.fabric_name[6], 1147 phba->fcf.current_rec.fabric_name[7]); 1148 lpfc_nlp_put(ndlp); 1149 spin_lock_irq(&phba->hbalock); 1150 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1151 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1152 spin_unlock_irq(&phba->hbalock); 1153 goto out; 1154 } 1155 if (!rc) { 1156 /* Mark the FCF discovery process done */ 1157 if (phba->hba_flag & HBA_FIP_SUPPORT) 1158 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1159 LOG_ELS, 1160 "2769 FLOGI to FCF (x%x) " 1161 "completed successfully\n", 1162 phba->fcf.current_rec.fcf_indx); 1163 spin_lock_irq(&phba->hbalock); 1164 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1165 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1166 spin_unlock_irq(&phba->hbalock); 1167 goto out; 1168 } 1169 } 1170 1171 flogifail: 1172 spin_lock_irq(&phba->hbalock); 1173 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1174 spin_unlock_irq(&phba->hbalock); 1175 1176 lpfc_nlp_put(ndlp); 1177 1178 if (!lpfc_error_lost_link(irsp)) { 1179 /* FLOGI failed, so just use loop map to make discovery list */ 1180 lpfc_disc_list_loopmap(vport); 1181 1182 /* Start discovery */ 1183 lpfc_disc_start(vport); 1184 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1185 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1186 IOERR_SLI_ABORTED) && 1187 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1188 IOERR_SLI_DOWN))) && 1189 (phba->link_state != LPFC_CLEAR_LA)) { 1190 /* If FLOGI failed enable link interrupt. */ 1191 lpfc_issue_clear_la(phba, vport); 1192 } 1193 out: 1194 lpfc_els_free_iocb(phba, cmdiocb); 1195 } 1196 1197 /** 1198 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1199 * @vport: pointer to a host virtual N_Port data structure. 1200 * @ndlp: pointer to a node-list data structure. 1201 * @retry: number of retries to the command IOCB. 1202 * 1203 * This routine issues a Fabric Login (FLOGI) Request ELS command 1204 * for a @vport. The initiator service parameters are put into the payload 1205 * of the FLOGI Request IOCB and the top-level callback function pointer 1206 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1207 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1208 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1209 * 1210 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1211 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1212 * will be stored into the context1 field of the IOCB for the completion 1213 * callback function to the FLOGI ELS command. 1214 * 1215 * Return code 1216 * 0 - successfully issued flogi iocb for @vport 1217 * 1 - failed to issue flogi iocb for @vport 1218 **/ 1219 static int 1220 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1221 uint8_t retry) 1222 { 1223 struct lpfc_hba *phba = vport->phba; 1224 struct serv_parm *sp; 1225 IOCB_t *icmd; 1226 struct lpfc_iocbq *elsiocb; 1227 uint8_t *pcmd; 1228 uint16_t cmdsize; 1229 uint32_t tmo; 1230 int rc; 1231 1232 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1233 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1234 ndlp->nlp_DID, ELS_CMD_FLOGI); 1235 1236 if (!elsiocb) 1237 return 1; 1238 1239 icmd = &elsiocb->iocb; 1240 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1241 1242 /* For FLOGI request, remainder of payload is service parameters */ 1243 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1244 pcmd += sizeof(uint32_t); 1245 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1246 sp = (struct serv_parm *) pcmd; 1247 1248 /* Setup CSPs accordingly for Fabric */ 1249 sp->cmn.e_d_tov = 0; 1250 sp->cmn.w2.r_a_tov = 0; 1251 sp->cmn.virtual_fabric_support = 0; 1252 sp->cls1.classValid = 0; 1253 if (sp->cmn.fcphLow < FC_PH3) 1254 sp->cmn.fcphLow = FC_PH3; 1255 if (sp->cmn.fcphHigh < FC_PH3) 1256 sp->cmn.fcphHigh = FC_PH3; 1257 1258 if (phba->sli_rev == LPFC_SLI_REV4) { 1259 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1260 LPFC_SLI_INTF_IF_TYPE_0) { 1261 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1262 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1263 /* FLOGI needs to be 3 for WQE FCFI */ 1264 /* Set the fcfi to the fcfi we registered with */ 1265 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1266 } 1267 /* Can't do SLI4 class2 without support sequence coalescing */ 1268 sp->cls2.classValid = 0; 1269 sp->cls2.seqDelivery = 0; 1270 } else { 1271 /* Historical, setting sequential-delivery bit for SLI3 */ 1272 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1273 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1274 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1275 sp->cmn.request_multiple_Nport = 1; 1276 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1277 icmd->ulpCt_h = 1; 1278 icmd->ulpCt_l = 0; 1279 } else 1280 sp->cmn.request_multiple_Nport = 0; 1281 } 1282 1283 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1284 icmd->un.elsreq64.myID = 0; 1285 icmd->un.elsreq64.fl = 1; 1286 } 1287 1288 tmo = phba->fc_ratov; 1289 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1290 lpfc_set_disctmo(vport); 1291 phba->fc_ratov = tmo; 1292 1293 phba->fc_stat.elsXmitFLOGI++; 1294 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1295 1296 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1297 "Issue FLOGI: opt:x%x", 1298 phba->sli3_options, 0, 0); 1299 1300 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1301 if (rc == IOCB_ERROR) { 1302 lpfc_els_free_iocb(phba, elsiocb); 1303 return 1; 1304 } 1305 return 0; 1306 } 1307 1308 /** 1309 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1310 * @phba: pointer to lpfc hba data structure. 1311 * 1312 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1313 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1314 * list and issues an abort IOCB commond on each outstanding IOCB that 1315 * contains a active Fabric_DID ndlp. Note that this function is to issue 1316 * the abort IOCB command on all the outstanding IOCBs, thus when this 1317 * function returns, it does not guarantee all the IOCBs are actually aborted. 1318 * 1319 * Return code 1320 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1321 **/ 1322 int 1323 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1324 { 1325 struct lpfc_sli_ring *pring; 1326 struct lpfc_iocbq *iocb, *next_iocb; 1327 struct lpfc_nodelist *ndlp; 1328 IOCB_t *icmd; 1329 1330 /* Abort outstanding I/O on NPort <nlp_DID> */ 1331 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1332 "0201 Abort outstanding I/O on NPort x%x\n", 1333 Fabric_DID); 1334 1335 pring = lpfc_phba_elsring(phba); 1336 1337 /* 1338 * Check the txcmplq for an iocb that matches the nport the driver is 1339 * searching for. 1340 */ 1341 spin_lock_irq(&phba->hbalock); 1342 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1343 icmd = &iocb->iocb; 1344 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1345 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1346 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1347 (ndlp->nlp_DID == Fabric_DID)) 1348 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1349 } 1350 } 1351 spin_unlock_irq(&phba->hbalock); 1352 1353 return 0; 1354 } 1355 1356 /** 1357 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1358 * @vport: pointer to a host virtual N_Port data structure. 1359 * 1360 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1361 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1362 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1363 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1364 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1365 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1366 * @vport. 1367 * 1368 * Return code 1369 * 0 - failed to issue initial flogi for @vport 1370 * 1 - successfully issued initial flogi for @vport 1371 **/ 1372 int 1373 lpfc_initial_flogi(struct lpfc_vport *vport) 1374 { 1375 struct lpfc_nodelist *ndlp; 1376 1377 vport->port_state = LPFC_FLOGI; 1378 lpfc_set_disctmo(vport); 1379 1380 /* First look for the Fabric ndlp */ 1381 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1382 if (!ndlp) { 1383 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1384 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1385 if (!ndlp) 1386 return 0; 1387 /* Set the node type */ 1388 ndlp->nlp_type |= NLP_FABRIC; 1389 /* Put ndlp onto node list */ 1390 lpfc_enqueue_node(vport, ndlp); 1391 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1392 /* re-setup ndlp without removing from node list */ 1393 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1394 if (!ndlp) 1395 return 0; 1396 } 1397 1398 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1399 /* This decrement of reference count to node shall kick off 1400 * the release of the node. 1401 */ 1402 lpfc_nlp_put(ndlp); 1403 return 0; 1404 } 1405 return 1; 1406 } 1407 1408 /** 1409 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1410 * @vport: pointer to a host virtual N_Port data structure. 1411 * 1412 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1413 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1414 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1415 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1416 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1417 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1418 * @vport. 1419 * 1420 * Return code 1421 * 0 - failed to issue initial fdisc for @vport 1422 * 1 - successfully issued initial fdisc for @vport 1423 **/ 1424 int 1425 lpfc_initial_fdisc(struct lpfc_vport *vport) 1426 { 1427 struct lpfc_nodelist *ndlp; 1428 1429 /* First look for the Fabric ndlp */ 1430 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1431 if (!ndlp) { 1432 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1433 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1434 if (!ndlp) 1435 return 0; 1436 /* Put ndlp onto node list */ 1437 lpfc_enqueue_node(vport, ndlp); 1438 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1439 /* re-setup ndlp without removing from node list */ 1440 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1441 if (!ndlp) 1442 return 0; 1443 } 1444 1445 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1446 /* decrement node reference count to trigger the release of 1447 * the node. 1448 */ 1449 lpfc_nlp_put(ndlp); 1450 return 0; 1451 } 1452 return 1; 1453 } 1454 1455 /** 1456 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1457 * @vport: pointer to a host virtual N_Port data structure. 1458 * 1459 * This routine checks whether there are more remaining Port Logins 1460 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1461 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1462 * to issue ELS PLOGIs up to the configured discover threads with the 1463 * @vport (@vport->cfg_discovery_threads). The function also decrement 1464 * the @vport's num_disc_node by 1 if it is not already 0. 1465 **/ 1466 void 1467 lpfc_more_plogi(struct lpfc_vport *vport) 1468 { 1469 if (vport->num_disc_nodes) 1470 vport->num_disc_nodes--; 1471 1472 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1473 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1474 "0232 Continue discovery with %d PLOGIs to go " 1475 "Data: x%x x%x x%x\n", 1476 vport->num_disc_nodes, vport->fc_plogi_cnt, 1477 vport->fc_flag, vport->port_state); 1478 /* Check to see if there are more PLOGIs to be sent */ 1479 if (vport->fc_flag & FC_NLP_MORE) 1480 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1481 lpfc_els_disc_plogi(vport); 1482 1483 return; 1484 } 1485 1486 /** 1487 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1488 * @phba: pointer to lpfc hba data structure. 1489 * @prsp: pointer to response IOCB payload. 1490 * @ndlp: pointer to a node-list data structure. 1491 * 1492 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1493 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1494 * The following cases are considered N_Port confirmed: 1495 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1496 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1497 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1498 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1499 * 1) if there is a node on vport list other than the @ndlp with the same 1500 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1501 * on that node to release the RPI associated with the node; 2) if there is 1502 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1503 * into, a new node shall be allocated (or activated). In either case, the 1504 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1505 * be released and the new_ndlp shall be put on to the vport node list and 1506 * its pointer returned as the confirmed node. 1507 * 1508 * Note that before the @ndlp got "released", the keepDID from not-matching 1509 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1510 * of the @ndlp. This is because the release of @ndlp is actually to put it 1511 * into an inactive state on the vport node list and the vport node list 1512 * management algorithm does not allow two node with a same DID. 1513 * 1514 * Return code 1515 * pointer to the PLOGI N_Port @ndlp 1516 **/ 1517 static struct lpfc_nodelist * 1518 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1519 struct lpfc_nodelist *ndlp) 1520 { 1521 struct lpfc_vport *vport = ndlp->vport; 1522 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1523 struct lpfc_nodelist *new_ndlp; 1524 struct lpfc_rport_data *rdata; 1525 struct fc_rport *rport; 1526 struct serv_parm *sp; 1527 uint8_t name[sizeof(struct lpfc_name)]; 1528 uint32_t rc, keepDID = 0, keep_nlp_flag = 0; 1529 uint16_t keep_nlp_state; 1530 struct lpfc_nvme_rport *keep_nrport = NULL; 1531 int put_node; 1532 int put_rport; 1533 unsigned long *active_rrqs_xri_bitmap = NULL; 1534 1535 /* Fabric nodes can have the same WWPN so we don't bother searching 1536 * by WWPN. Just return the ndlp that was given to us. 1537 */ 1538 if (ndlp->nlp_type & NLP_FABRIC) 1539 return ndlp; 1540 1541 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1542 memset(name, 0, sizeof(struct lpfc_name)); 1543 1544 /* Now we find out if the NPort we are logging into, matches the WWPN 1545 * we have for that ndlp. If not, we have some work to do. 1546 */ 1547 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1548 1549 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1550 return ndlp; 1551 if (phba->sli_rev == LPFC_SLI_REV4) { 1552 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1553 GFP_KERNEL); 1554 if (active_rrqs_xri_bitmap) 1555 memset(active_rrqs_xri_bitmap, 0, 1556 phba->cfg_rrq_xri_bitmap_sz); 1557 } 1558 1559 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1560 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", 1561 ndlp, ndlp->nlp_DID, new_ndlp); 1562 1563 if (!new_ndlp) { 1564 rc = memcmp(&ndlp->nlp_portname, name, 1565 sizeof(struct lpfc_name)); 1566 if (!rc) { 1567 if (active_rrqs_xri_bitmap) 1568 mempool_free(active_rrqs_xri_bitmap, 1569 phba->active_rrq_pool); 1570 return ndlp; 1571 } 1572 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1573 if (!new_ndlp) { 1574 if (active_rrqs_xri_bitmap) 1575 mempool_free(active_rrqs_xri_bitmap, 1576 phba->active_rrq_pool); 1577 return ndlp; 1578 } 1579 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1580 rc = memcmp(&ndlp->nlp_portname, name, 1581 sizeof(struct lpfc_name)); 1582 if (!rc) { 1583 if (active_rrqs_xri_bitmap) 1584 mempool_free(active_rrqs_xri_bitmap, 1585 phba->active_rrq_pool); 1586 return ndlp; 1587 } 1588 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1589 NLP_STE_UNUSED_NODE); 1590 if (!new_ndlp) { 1591 if (active_rrqs_xri_bitmap) 1592 mempool_free(active_rrqs_xri_bitmap, 1593 phba->active_rrq_pool); 1594 return ndlp; 1595 } 1596 keepDID = new_ndlp->nlp_DID; 1597 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) 1598 memcpy(active_rrqs_xri_bitmap, 1599 new_ndlp->active_rrqs_xri_bitmap, 1600 phba->cfg_rrq_xri_bitmap_sz); 1601 } else { 1602 keepDID = new_ndlp->nlp_DID; 1603 if (phba->sli_rev == LPFC_SLI_REV4 && 1604 active_rrqs_xri_bitmap) 1605 memcpy(active_rrqs_xri_bitmap, 1606 new_ndlp->active_rrqs_xri_bitmap, 1607 phba->cfg_rrq_xri_bitmap_sz); 1608 } 1609 1610 lpfc_unreg_rpi(vport, new_ndlp); 1611 new_ndlp->nlp_DID = ndlp->nlp_DID; 1612 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1613 if (phba->sli_rev == LPFC_SLI_REV4) 1614 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1615 ndlp->active_rrqs_xri_bitmap, 1616 phba->cfg_rrq_xri_bitmap_sz); 1617 1618 spin_lock_irq(shost->host_lock); 1619 keep_nlp_flag = new_ndlp->nlp_flag; 1620 new_ndlp->nlp_flag = ndlp->nlp_flag; 1621 ndlp->nlp_flag = keep_nlp_flag; 1622 spin_unlock_irq(shost->host_lock); 1623 1624 /* Set nlp_states accordingly */ 1625 keep_nlp_state = new_ndlp->nlp_state; 1626 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1627 1628 /* interchange the nvme remoteport structs */ 1629 keep_nrport = new_ndlp->nrport; 1630 new_ndlp->nrport = ndlp->nrport; 1631 1632 /* Move this back to NPR state */ 1633 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1634 /* The new_ndlp is replacing ndlp totally, so we need 1635 * to put ndlp on UNUSED list and try to free it. 1636 */ 1637 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1638 "3179 PLOGI confirm NEW: %x %x\n", 1639 new_ndlp->nlp_DID, keepDID); 1640 1641 /* Fix up the rport accordingly */ 1642 rport = ndlp->rport; 1643 if (rport) { 1644 rdata = rport->dd_data; 1645 if (rdata->pnode == ndlp) { 1646 /* break the link before dropping the ref */ 1647 ndlp->rport = NULL; 1648 lpfc_nlp_put(ndlp); 1649 rdata->pnode = lpfc_nlp_get(new_ndlp); 1650 new_ndlp->rport = rport; 1651 } 1652 new_ndlp->nlp_type = ndlp->nlp_type; 1653 } 1654 1655 /* Fix up the nvme rport */ 1656 if (ndlp->nrport) { 1657 ndlp->nrport = NULL; 1658 lpfc_nlp_put(ndlp); 1659 } 1660 1661 /* We shall actually free the ndlp with both nlp_DID and 1662 * nlp_portname fields equals 0 to avoid any ndlp on the 1663 * nodelist never to be used. 1664 */ 1665 if (ndlp->nlp_DID == 0) { 1666 spin_lock_irq(&phba->ndlp_lock); 1667 NLP_SET_FREE_REQ(ndlp); 1668 spin_unlock_irq(&phba->ndlp_lock); 1669 } 1670 1671 /* Two ndlps cannot have the same did on the nodelist */ 1672 ndlp->nlp_DID = keepDID; 1673 if (phba->sli_rev == LPFC_SLI_REV4 && 1674 active_rrqs_xri_bitmap) 1675 memcpy(ndlp->active_rrqs_xri_bitmap, 1676 active_rrqs_xri_bitmap, 1677 phba->cfg_rrq_xri_bitmap_sz); 1678 1679 if (!NLP_CHK_NODE_ACT(ndlp)) 1680 lpfc_drop_node(vport, ndlp); 1681 } 1682 else { 1683 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1684 "3180 PLOGI confirm SWAP: %x %x\n", 1685 new_ndlp->nlp_DID, keepDID); 1686 1687 lpfc_unreg_rpi(vport, ndlp); 1688 1689 /* Two ndlps cannot have the same did */ 1690 ndlp->nlp_DID = keepDID; 1691 if (phba->sli_rev == LPFC_SLI_REV4 && 1692 active_rrqs_xri_bitmap) 1693 memcpy(ndlp->active_rrqs_xri_bitmap, 1694 active_rrqs_xri_bitmap, 1695 phba->cfg_rrq_xri_bitmap_sz); 1696 1697 /* Since we are switching over to the new_ndlp, 1698 * reset the old ndlp state 1699 */ 1700 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1701 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1702 keep_nlp_state = NLP_STE_NPR_NODE; 1703 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1704 1705 /* Previous ndlp no longer active with nvme host transport. 1706 * Remove reference from earlier registration unless the 1707 * nvme host took care of it. 1708 */ 1709 if (ndlp->nrport) 1710 lpfc_nlp_put(ndlp); 1711 ndlp->nrport = keep_nrport; 1712 1713 /* Fix up the rport accordingly */ 1714 rport = ndlp->rport; 1715 if (rport) { 1716 rdata = rport->dd_data; 1717 put_node = rdata->pnode != NULL; 1718 put_rport = ndlp->rport != NULL; 1719 rdata->pnode = NULL; 1720 ndlp->rport = NULL; 1721 if (put_node) 1722 lpfc_nlp_put(ndlp); 1723 if (put_rport) 1724 put_device(&rport->dev); 1725 } 1726 } 1727 if (phba->sli_rev == LPFC_SLI_REV4 && 1728 active_rrqs_xri_bitmap) 1729 mempool_free(active_rrqs_xri_bitmap, 1730 phba->active_rrq_pool); 1731 return new_ndlp; 1732 } 1733 1734 /** 1735 * lpfc_end_rscn - Check and handle more rscn for a vport 1736 * @vport: pointer to a host virtual N_Port data structure. 1737 * 1738 * This routine checks whether more Registration State Change 1739 * Notifications (RSCNs) came in while the discovery state machine was in 1740 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1741 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1742 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1743 * handling the RSCNs. 1744 **/ 1745 void 1746 lpfc_end_rscn(struct lpfc_vport *vport) 1747 { 1748 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1749 1750 if (vport->fc_flag & FC_RSCN_MODE) { 1751 /* 1752 * Check to see if more RSCNs came in while we were 1753 * processing this one. 1754 */ 1755 if (vport->fc_rscn_id_cnt || 1756 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1757 lpfc_els_handle_rscn(vport); 1758 else { 1759 spin_lock_irq(shost->host_lock); 1760 vport->fc_flag &= ~FC_RSCN_MODE; 1761 spin_unlock_irq(shost->host_lock); 1762 } 1763 } 1764 } 1765 1766 /** 1767 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1768 * @phba: pointer to lpfc hba data structure. 1769 * @cmdiocb: pointer to lpfc command iocb data structure. 1770 * @rspiocb: pointer to lpfc response iocb data structure. 1771 * 1772 * This routine will call the clear rrq function to free the rrq and 1773 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1774 * exist then the clear_rrq is still called because the rrq needs to 1775 * be freed. 1776 **/ 1777 1778 static void 1779 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1780 struct lpfc_iocbq *rspiocb) 1781 { 1782 struct lpfc_vport *vport = cmdiocb->vport; 1783 IOCB_t *irsp; 1784 struct lpfc_nodelist *ndlp; 1785 struct lpfc_node_rrq *rrq; 1786 1787 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1788 rrq = cmdiocb->context_un.rrq; 1789 cmdiocb->context_un.rsp_iocb = rspiocb; 1790 1791 irsp = &rspiocb->iocb; 1792 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1793 "RRQ cmpl: status:x%x/x%x did:x%x", 1794 irsp->ulpStatus, irsp->un.ulpWord[4], 1795 irsp->un.elsreq64.remoteID); 1796 1797 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1798 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1799 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1800 "2882 RRQ completes to NPort x%x " 1801 "with no ndlp. Data: x%x x%x x%x\n", 1802 irsp->un.elsreq64.remoteID, 1803 irsp->ulpStatus, irsp->un.ulpWord[4], 1804 irsp->ulpIoTag); 1805 goto out; 1806 } 1807 1808 /* rrq completes to NPort <nlp_DID> */ 1809 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1810 "2880 RRQ completes to NPort x%x " 1811 "Data: x%x x%x x%x x%x x%x\n", 1812 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1813 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1814 1815 if (irsp->ulpStatus) { 1816 /* Check for retry */ 1817 /* RRQ failed Don't print the vport to vport rjts */ 1818 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1819 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1820 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1821 (phba)->pport->cfg_log_verbose & LOG_ELS) 1822 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1823 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1824 ndlp->nlp_DID, irsp->ulpStatus, 1825 irsp->un.ulpWord[4]); 1826 } 1827 out: 1828 if (rrq) 1829 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1830 lpfc_els_free_iocb(phba, cmdiocb); 1831 return; 1832 } 1833 /** 1834 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1835 * @phba: pointer to lpfc hba data structure. 1836 * @cmdiocb: pointer to lpfc command iocb data structure. 1837 * @rspiocb: pointer to lpfc response iocb data structure. 1838 * 1839 * This routine is the completion callback function for issuing the Port 1840 * Login (PLOGI) command. For PLOGI completion, there must be an active 1841 * ndlp on the vport node list that matches the remote node ID from the 1842 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1843 * ignored and command IOCB released. The PLOGI response IOCB status is 1844 * checked for error conditons. If there is error status reported, PLOGI 1845 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1846 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1847 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1848 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1849 * there are additional N_Port nodes with the vport that need to perform 1850 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1851 * PLOGIs. 1852 **/ 1853 static void 1854 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1855 struct lpfc_iocbq *rspiocb) 1856 { 1857 struct lpfc_vport *vport = cmdiocb->vport; 1858 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1859 IOCB_t *irsp; 1860 struct lpfc_nodelist *ndlp; 1861 struct lpfc_dmabuf *prsp; 1862 int disc, rc; 1863 1864 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1865 cmdiocb->context_un.rsp_iocb = rspiocb; 1866 1867 irsp = &rspiocb->iocb; 1868 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1869 "PLOGI cmpl: status:x%x/x%x did:x%x", 1870 irsp->ulpStatus, irsp->un.ulpWord[4], 1871 irsp->un.elsreq64.remoteID); 1872 1873 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1874 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1875 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1876 "0136 PLOGI completes to NPort x%x " 1877 "with no ndlp. Data: x%x x%x x%x\n", 1878 irsp->un.elsreq64.remoteID, 1879 irsp->ulpStatus, irsp->un.ulpWord[4], 1880 irsp->ulpIoTag); 1881 goto out; 1882 } 1883 1884 /* Since ndlp can be freed in the disc state machine, note if this node 1885 * is being used during discovery. 1886 */ 1887 spin_lock_irq(shost->host_lock); 1888 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1889 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1890 spin_unlock_irq(shost->host_lock); 1891 rc = 0; 1892 1893 /* PLOGI completes to NPort <nlp_DID> */ 1894 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1895 "0102 PLOGI completes to NPort x%06x " 1896 "Data: x%x x%x x%x x%x x%x\n", 1897 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1898 irsp->ulpStatus, irsp->un.ulpWord[4], 1899 disc, vport->num_disc_nodes); 1900 1901 /* Check to see if link went down during discovery */ 1902 if (lpfc_els_chk_latt(vport)) { 1903 spin_lock_irq(shost->host_lock); 1904 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1905 spin_unlock_irq(shost->host_lock); 1906 goto out; 1907 } 1908 1909 if (irsp->ulpStatus) { 1910 /* Check for retry */ 1911 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1912 /* ELS command is being retried */ 1913 if (disc) { 1914 spin_lock_irq(shost->host_lock); 1915 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1916 spin_unlock_irq(shost->host_lock); 1917 } 1918 goto out; 1919 } 1920 /* PLOGI failed Don't print the vport to vport rjts */ 1921 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1922 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1923 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1924 (phba)->pport->cfg_log_verbose & LOG_ELS) 1925 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1926 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1927 ndlp->nlp_DID, irsp->ulpStatus, 1928 irsp->un.ulpWord[4]); 1929 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1930 if (lpfc_error_lost_link(irsp)) 1931 rc = NLP_STE_FREED_NODE; 1932 else 1933 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1934 NLP_EVT_CMPL_PLOGI); 1935 } else { 1936 /* Good status, call state machine */ 1937 prsp = list_entry(((struct lpfc_dmabuf *) 1938 cmdiocb->context2)->list.next, 1939 struct lpfc_dmabuf, list); 1940 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 1941 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1942 NLP_EVT_CMPL_PLOGI); 1943 } 1944 1945 if (disc && vport->num_disc_nodes) { 1946 /* Check to see if there are more PLOGIs to be sent */ 1947 lpfc_more_plogi(vport); 1948 1949 if (vport->num_disc_nodes == 0) { 1950 spin_lock_irq(shost->host_lock); 1951 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1952 spin_unlock_irq(shost->host_lock); 1953 1954 lpfc_can_disctmo(vport); 1955 lpfc_end_rscn(vport); 1956 } 1957 } 1958 1959 out: 1960 lpfc_els_free_iocb(phba, cmdiocb); 1961 return; 1962 } 1963 1964 /** 1965 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 1966 * @vport: pointer to a host virtual N_Port data structure. 1967 * @did: destination port identifier. 1968 * @retry: number of retries to the command IOCB. 1969 * 1970 * This routine issues a Port Login (PLOGI) command to a remote N_Port 1971 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 1972 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 1973 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 1974 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 1975 * 1976 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1977 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1978 * will be stored into the context1 field of the IOCB for the completion 1979 * callback function to the PLOGI ELS command. 1980 * 1981 * Return code 1982 * 0 - Successfully issued a plogi for @vport 1983 * 1 - failed to issue a plogi for @vport 1984 **/ 1985 int 1986 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1987 { 1988 struct lpfc_hba *phba = vport->phba; 1989 struct Scsi_Host *shost; 1990 struct serv_parm *sp; 1991 struct lpfc_nodelist *ndlp; 1992 struct lpfc_iocbq *elsiocb; 1993 uint8_t *pcmd; 1994 uint16_t cmdsize; 1995 int ret; 1996 1997 ndlp = lpfc_findnode_did(vport, did); 1998 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1999 ndlp = NULL; 2000 2001 /* If ndlp is not NULL, we will bump the reference count on it */ 2002 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2003 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2004 ELS_CMD_PLOGI); 2005 if (!elsiocb) 2006 return 1; 2007 2008 shost = lpfc_shost_from_vport(vport); 2009 spin_lock_irq(shost->host_lock); 2010 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2011 spin_unlock_irq(shost->host_lock); 2012 2013 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2014 2015 /* For PLOGI request, remainder of payload is service parameters */ 2016 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2017 pcmd += sizeof(uint32_t); 2018 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2019 sp = (struct serv_parm *) pcmd; 2020 2021 /* 2022 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2023 * to device on remote loops work. 2024 */ 2025 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2026 sp->cmn.altBbCredit = 1; 2027 2028 if (sp->cmn.fcphLow < FC_PH_4_3) 2029 sp->cmn.fcphLow = FC_PH_4_3; 2030 2031 if (sp->cmn.fcphHigh < FC_PH3) 2032 sp->cmn.fcphHigh = FC_PH3; 2033 2034 sp->cmn.valid_vendor_ver_level = 0; 2035 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2036 sp->cmn.bbRcvSizeMsb &= 0xF; 2037 2038 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2039 "Issue PLOGI: did:x%x", 2040 did, 0, 0); 2041 2042 /* If our firmware supports this feature, convey that 2043 * information to the target using the vendor specific field. 2044 */ 2045 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2046 sp->cmn.valid_vendor_ver_level = 1; 2047 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2048 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2049 } 2050 2051 phba->fc_stat.elsXmitPLOGI++; 2052 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2053 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2054 2055 if (ret == IOCB_ERROR) { 2056 lpfc_els_free_iocb(phba, elsiocb); 2057 return 1; 2058 } 2059 return 0; 2060 } 2061 2062 /** 2063 * lpfc_cmpl_els_prli - Completion callback function for prli 2064 * @phba: pointer to lpfc hba data structure. 2065 * @cmdiocb: pointer to lpfc command iocb data structure. 2066 * @rspiocb: pointer to lpfc response iocb data structure. 2067 * 2068 * This routine is the completion callback function for a Process Login 2069 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2070 * status. If there is error status reported, PRLI retry shall be attempted 2071 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2072 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2073 * ndlp to mark the PRLI completion. 2074 **/ 2075 static void 2076 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2077 struct lpfc_iocbq *rspiocb) 2078 { 2079 struct lpfc_vport *vport = cmdiocb->vport; 2080 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2081 IOCB_t *irsp; 2082 struct lpfc_nodelist *ndlp; 2083 2084 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2085 cmdiocb->context_un.rsp_iocb = rspiocb; 2086 2087 irsp = &(rspiocb->iocb); 2088 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2089 spin_lock_irq(shost->host_lock); 2090 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2091 spin_unlock_irq(shost->host_lock); 2092 2093 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2094 "PRLI cmpl: status:x%x/x%x did:x%x", 2095 irsp->ulpStatus, irsp->un.ulpWord[4], 2096 ndlp->nlp_DID); 2097 2098 /* Ddriver supports multiple FC4 types. Counters matter. */ 2099 vport->fc_prli_sent--; 2100 2101 /* PRLI completes to NPort <nlp_DID> */ 2102 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2103 "0103 PRLI completes to NPort x%06x " 2104 "Data: x%x x%x x%x x%x\n", 2105 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2106 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2107 2108 /* Check to see if link went down during discovery */ 2109 if (lpfc_els_chk_latt(vport)) 2110 goto out; 2111 2112 if (irsp->ulpStatus) { 2113 /* Check for retry */ 2114 ndlp->fc4_prli_sent--; 2115 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2116 /* ELS command is being retried */ 2117 goto out; 2118 } 2119 2120 /* PRLI failed */ 2121 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2122 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2123 "data: x%x\n", 2124 ndlp->nlp_DID, irsp->ulpStatus, 2125 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2126 2127 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2128 if (lpfc_error_lost_link(irsp)) 2129 goto out; 2130 else 2131 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2132 NLP_EVT_CMPL_PRLI); 2133 } else 2134 /* Good status, call state machine. However, if another 2135 * PRLI is outstanding, don't call the state machine 2136 * because final disposition to Mapped or Unmapped is 2137 * completed there. 2138 */ 2139 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2140 NLP_EVT_CMPL_PRLI); 2141 2142 out: 2143 lpfc_els_free_iocb(phba, cmdiocb); 2144 return; 2145 } 2146 2147 /** 2148 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2149 * @vport: pointer to a host virtual N_Port data structure. 2150 * @ndlp: pointer to a node-list data structure. 2151 * @retry: number of retries to the command IOCB. 2152 * 2153 * This routine issues a Process Login (PRLI) ELS command for the 2154 * @vport. The PRLI service parameters are set up in the payload of the 2155 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2156 * is put to the IOCB completion callback func field before invoking the 2157 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2158 * 2159 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2160 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2161 * will be stored into the context1 field of the IOCB for the completion 2162 * callback function to the PRLI ELS command. 2163 * 2164 * Return code 2165 * 0 - successfully issued prli iocb command for @vport 2166 * 1 - failed to issue prli iocb command for @vport 2167 **/ 2168 int 2169 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2170 uint8_t retry) 2171 { 2172 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2173 struct lpfc_hba *phba = vport->phba; 2174 PRLI *npr; 2175 struct lpfc_nvme_prli *npr_nvme; 2176 struct lpfc_iocbq *elsiocb; 2177 uint8_t *pcmd; 2178 uint16_t cmdsize; 2179 u32 local_nlp_type, elscmd; 2180 2181 /* 2182 * If we are in RSCN mode, the FC4 types supported from a 2183 * previous GFT_ID command may not be accurate. So, if we 2184 * are a NVME Initiator, always look for the possibility of 2185 * the remote NPort beng a NVME Target. 2186 */ 2187 if (phba->sli_rev == LPFC_SLI_REV4 && 2188 vport->fc_flag & FC_RSCN_MODE && 2189 vport->nvmei_support) 2190 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2191 local_nlp_type = ndlp->nlp_fc4_type; 2192 2193 send_next_prli: 2194 if (local_nlp_type & NLP_FC4_FCP) { 2195 /* Payload is 4 + 16 = 20 x14 bytes. */ 2196 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2197 elscmd = ELS_CMD_PRLI; 2198 } else if (local_nlp_type & NLP_FC4_NVME) { 2199 /* Payload is 4 + 20 = 24 x18 bytes. */ 2200 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2201 elscmd = ELS_CMD_NVMEPRLI; 2202 } else { 2203 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2204 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2205 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2206 return 1; 2207 } 2208 2209 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2210 * FC4 type, implicitly LOGO. 2211 */ 2212 if (phba->sli_rev == LPFC_SLI_REV3 && 2213 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2215 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2216 ndlp->nlp_type); 2217 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2218 return 1; 2219 } 2220 2221 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2222 ndlp->nlp_DID, elscmd); 2223 if (!elsiocb) 2224 return 1; 2225 2226 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2227 2228 /* For PRLI request, remainder of payload is service parameters */ 2229 memset(pcmd, 0, cmdsize); 2230 2231 if (local_nlp_type & NLP_FC4_FCP) { 2232 /* Remainder of payload is FCP PRLI parameter page. 2233 * Note: this data structure is defined as 2234 * BE/LE in the structure definition so no 2235 * byte swap call is made. 2236 */ 2237 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2238 pcmd += sizeof(uint32_t); 2239 npr = (PRLI *)pcmd; 2240 2241 /* 2242 * If our firmware version is 3.20 or later, 2243 * set the following bits for FC-TAPE support. 2244 */ 2245 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2246 npr->ConfmComplAllowed = 1; 2247 npr->Retry = 1; 2248 npr->TaskRetryIdReq = 1; 2249 } 2250 npr->estabImagePair = 1; 2251 npr->readXferRdyDis = 1; 2252 if (vport->cfg_first_burst_size) 2253 npr->writeXferRdyDis = 1; 2254 2255 /* For FCP support */ 2256 npr->prliType = PRLI_FCP_TYPE; 2257 npr->initiatorFunc = 1; 2258 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2259 2260 /* Remove FCP type - processed. */ 2261 local_nlp_type &= ~NLP_FC4_FCP; 2262 } else if (local_nlp_type & NLP_FC4_NVME) { 2263 /* Remainder of payload is NVME PRLI parameter page. 2264 * This data structure is the newer definition that 2265 * uses bf macros so a byte swap is required. 2266 */ 2267 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2268 pcmd += sizeof(uint32_t); 2269 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2270 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2271 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2272 2273 /* Only initiators request first burst. */ 2274 if ((phba->cfg_nvme_enable_fb) && 2275 !phba->nvmet_support) 2276 bf_set(prli_fba, npr_nvme, 1); 2277 2278 if (phba->nvmet_support) { 2279 bf_set(prli_tgt, npr_nvme, 1); 2280 bf_set(prli_disc, npr_nvme, 1); 2281 2282 } else { 2283 bf_set(prli_init, npr_nvme, 1); 2284 } 2285 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2286 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2287 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2288 2289 /* Remove NVME type - processed. */ 2290 local_nlp_type &= ~NLP_FC4_NVME; 2291 } 2292 2293 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2294 "Issue PRLI: did:x%x", 2295 ndlp->nlp_DID, 0, 0); 2296 2297 phba->fc_stat.elsXmitPRLI++; 2298 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2299 spin_lock_irq(shost->host_lock); 2300 ndlp->nlp_flag |= NLP_PRLI_SND; 2301 spin_unlock_irq(shost->host_lock); 2302 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2303 IOCB_ERROR) { 2304 spin_lock_irq(shost->host_lock); 2305 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2306 spin_unlock_irq(shost->host_lock); 2307 lpfc_els_free_iocb(phba, elsiocb); 2308 return 1; 2309 } 2310 2311 /* The vport counters are used for lpfc_scan_finished, but 2312 * the ndlp is used to track outstanding PRLIs for different 2313 * FC4 types. 2314 */ 2315 vport->fc_prli_sent++; 2316 ndlp->fc4_prli_sent++; 2317 2318 /* The driver supports 2 FC4 types. Make sure 2319 * a PRLI is issued for all types before exiting. 2320 */ 2321 if (phba->sli_rev == LPFC_SLI_REV4 && 2322 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2323 goto send_next_prli; 2324 2325 return 0; 2326 } 2327 2328 /** 2329 * lpfc_rscn_disc - Perform rscn discovery for a vport 2330 * @vport: pointer to a host virtual N_Port data structure. 2331 * 2332 * This routine performs Registration State Change Notification (RSCN) 2333 * discovery for a @vport. If the @vport's node port recovery count is not 2334 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2335 * the nodes that need recovery. If none of the PLOGI were needed through 2336 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2337 * invoked to check and handle possible more RSCN came in during the period 2338 * of processing the current ones. 2339 **/ 2340 static void 2341 lpfc_rscn_disc(struct lpfc_vport *vport) 2342 { 2343 lpfc_can_disctmo(vport); 2344 2345 /* RSCN discovery */ 2346 /* go thru NPR nodes and issue ELS PLOGIs */ 2347 if (vport->fc_npr_cnt) 2348 if (lpfc_els_disc_plogi(vport)) 2349 return; 2350 2351 lpfc_end_rscn(vport); 2352 } 2353 2354 /** 2355 * lpfc_adisc_done - Complete the adisc phase of discovery 2356 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2357 * 2358 * This function is called when the final ADISC is completed during discovery. 2359 * This function handles clearing link attention or issuing reg_vpi depending 2360 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2361 * discovery. 2362 * This function is called with no locks held. 2363 **/ 2364 static void 2365 lpfc_adisc_done(struct lpfc_vport *vport) 2366 { 2367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2368 struct lpfc_hba *phba = vport->phba; 2369 2370 /* 2371 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2372 * and continue discovery. 2373 */ 2374 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2375 !(vport->fc_flag & FC_RSCN_MODE) && 2376 (phba->sli_rev < LPFC_SLI_REV4)) { 2377 /* The ADISCs are complete. Doesn't matter if they 2378 * succeeded or failed because the ADISC completion 2379 * routine guarantees to call the state machine and 2380 * the RPI is either unregistered (failed ADISC response) 2381 * or the RPI is still valid and the node is marked 2382 * mapped for a target. The exchanges should be in the 2383 * correct state. This code is specific to SLI3. 2384 */ 2385 lpfc_issue_clear_la(phba, vport); 2386 lpfc_issue_reg_vpi(phba, vport); 2387 return; 2388 } 2389 /* 2390 * For SLI2, we need to set port_state to READY 2391 * and continue discovery. 2392 */ 2393 if (vport->port_state < LPFC_VPORT_READY) { 2394 /* If we get here, there is nothing to ADISC */ 2395 lpfc_issue_clear_la(phba, vport); 2396 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2397 vport->num_disc_nodes = 0; 2398 /* go thru NPR list, issue ELS PLOGIs */ 2399 if (vport->fc_npr_cnt) 2400 lpfc_els_disc_plogi(vport); 2401 if (!vport->num_disc_nodes) { 2402 spin_lock_irq(shost->host_lock); 2403 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2404 spin_unlock_irq(shost->host_lock); 2405 lpfc_can_disctmo(vport); 2406 lpfc_end_rscn(vport); 2407 } 2408 } 2409 vport->port_state = LPFC_VPORT_READY; 2410 } else 2411 lpfc_rscn_disc(vport); 2412 } 2413 2414 /** 2415 * lpfc_more_adisc - Issue more adisc as needed 2416 * @vport: pointer to a host virtual N_Port data structure. 2417 * 2418 * This routine determines whether there are more ndlps on a @vport 2419 * node list need to have Address Discover (ADISC) issued. If so, it will 2420 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2421 * remaining nodes which need to have ADISC sent. 2422 **/ 2423 void 2424 lpfc_more_adisc(struct lpfc_vport *vport) 2425 { 2426 if (vport->num_disc_nodes) 2427 vport->num_disc_nodes--; 2428 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2429 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2430 "0210 Continue discovery with %d ADISCs to go " 2431 "Data: x%x x%x x%x\n", 2432 vport->num_disc_nodes, vport->fc_adisc_cnt, 2433 vport->fc_flag, vport->port_state); 2434 /* Check to see if there are more ADISCs to be sent */ 2435 if (vport->fc_flag & FC_NLP_MORE) { 2436 lpfc_set_disctmo(vport); 2437 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2438 lpfc_els_disc_adisc(vport); 2439 } 2440 if (!vport->num_disc_nodes) 2441 lpfc_adisc_done(vport); 2442 return; 2443 } 2444 2445 /** 2446 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2447 * @phba: pointer to lpfc hba data structure. 2448 * @cmdiocb: pointer to lpfc command iocb data structure. 2449 * @rspiocb: pointer to lpfc response iocb data structure. 2450 * 2451 * This routine is the completion function for issuing the Address Discover 2452 * (ADISC) command. It first checks to see whether link went down during 2453 * the discovery process. If so, the node will be marked as node port 2454 * recovery for issuing discover IOCB by the link attention handler and 2455 * exit. Otherwise, the response status is checked. If error was reported 2456 * in the response status, the ADISC command shall be retried by invoking 2457 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2458 * the response status, the state machine is invoked to set transition 2459 * with respect to NLP_EVT_CMPL_ADISC event. 2460 **/ 2461 static void 2462 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2463 struct lpfc_iocbq *rspiocb) 2464 { 2465 struct lpfc_vport *vport = cmdiocb->vport; 2466 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2467 IOCB_t *irsp; 2468 struct lpfc_nodelist *ndlp; 2469 int disc; 2470 2471 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2472 cmdiocb->context_un.rsp_iocb = rspiocb; 2473 2474 irsp = &(rspiocb->iocb); 2475 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2476 2477 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2478 "ADISC cmpl: status:x%x/x%x did:x%x", 2479 irsp->ulpStatus, irsp->un.ulpWord[4], 2480 ndlp->nlp_DID); 2481 2482 /* Since ndlp can be freed in the disc state machine, note if this node 2483 * is being used during discovery. 2484 */ 2485 spin_lock_irq(shost->host_lock); 2486 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2487 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2488 spin_unlock_irq(shost->host_lock); 2489 /* ADISC completes to NPort <nlp_DID> */ 2490 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2491 "0104 ADISC completes to NPort x%x " 2492 "Data: x%x x%x x%x x%x x%x\n", 2493 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2494 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2495 /* Check to see if link went down during discovery */ 2496 if (lpfc_els_chk_latt(vport)) { 2497 spin_lock_irq(shost->host_lock); 2498 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2499 spin_unlock_irq(shost->host_lock); 2500 goto out; 2501 } 2502 2503 if (irsp->ulpStatus) { 2504 /* Check for retry */ 2505 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2506 /* ELS command is being retried */ 2507 if (disc) { 2508 spin_lock_irq(shost->host_lock); 2509 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2510 spin_unlock_irq(shost->host_lock); 2511 lpfc_set_disctmo(vport); 2512 } 2513 goto out; 2514 } 2515 /* ADISC failed */ 2516 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2517 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2518 ndlp->nlp_DID, irsp->ulpStatus, 2519 irsp->un.ulpWord[4]); 2520 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2521 if (!lpfc_error_lost_link(irsp)) 2522 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2523 NLP_EVT_CMPL_ADISC); 2524 } else 2525 /* Good status, call state machine */ 2526 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2527 NLP_EVT_CMPL_ADISC); 2528 2529 /* Check to see if there are more ADISCs to be sent */ 2530 if (disc && vport->num_disc_nodes) 2531 lpfc_more_adisc(vport); 2532 out: 2533 lpfc_els_free_iocb(phba, cmdiocb); 2534 return; 2535 } 2536 2537 /** 2538 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2539 * @vport: pointer to a virtual N_Port data structure. 2540 * @ndlp: pointer to a node-list data structure. 2541 * @retry: number of retries to the command IOCB. 2542 * 2543 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2544 * @vport. It prepares the payload of the ADISC ELS command, updates the 2545 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2546 * to issue the ADISC ELS command. 2547 * 2548 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2549 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2550 * will be stored into the context1 field of the IOCB for the completion 2551 * callback function to the ADISC ELS command. 2552 * 2553 * Return code 2554 * 0 - successfully issued adisc 2555 * 1 - failed to issue adisc 2556 **/ 2557 int 2558 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2559 uint8_t retry) 2560 { 2561 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2562 struct lpfc_hba *phba = vport->phba; 2563 ADISC *ap; 2564 struct lpfc_iocbq *elsiocb; 2565 uint8_t *pcmd; 2566 uint16_t cmdsize; 2567 2568 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2569 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2570 ndlp->nlp_DID, ELS_CMD_ADISC); 2571 if (!elsiocb) 2572 return 1; 2573 2574 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2575 2576 /* For ADISC request, remainder of payload is service parameters */ 2577 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2578 pcmd += sizeof(uint32_t); 2579 2580 /* Fill in ADISC payload */ 2581 ap = (ADISC *) pcmd; 2582 ap->hardAL_PA = phba->fc_pref_ALPA; 2583 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2584 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2585 ap->DID = be32_to_cpu(vport->fc_myDID); 2586 2587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2588 "Issue ADISC: did:x%x", 2589 ndlp->nlp_DID, 0, 0); 2590 2591 phba->fc_stat.elsXmitADISC++; 2592 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2593 spin_lock_irq(shost->host_lock); 2594 ndlp->nlp_flag |= NLP_ADISC_SND; 2595 spin_unlock_irq(shost->host_lock); 2596 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2597 IOCB_ERROR) { 2598 spin_lock_irq(shost->host_lock); 2599 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2600 spin_unlock_irq(shost->host_lock); 2601 lpfc_els_free_iocb(phba, elsiocb); 2602 return 1; 2603 } 2604 return 0; 2605 } 2606 2607 /** 2608 * lpfc_cmpl_els_logo - Completion callback function for logo 2609 * @phba: pointer to lpfc hba data structure. 2610 * @cmdiocb: pointer to lpfc command iocb data structure. 2611 * @rspiocb: pointer to lpfc response iocb data structure. 2612 * 2613 * This routine is the completion function for issuing the ELS Logout (LOGO) 2614 * command. If no error status was reported from the LOGO response, the 2615 * state machine of the associated ndlp shall be invoked for transition with 2616 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2617 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2618 **/ 2619 static void 2620 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2621 struct lpfc_iocbq *rspiocb) 2622 { 2623 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2624 struct lpfc_vport *vport = ndlp->vport; 2625 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2626 IOCB_t *irsp; 2627 struct lpfcMboxq *mbox; 2628 unsigned long flags; 2629 uint32_t skip_recovery = 0; 2630 2631 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2632 cmdiocb->context_un.rsp_iocb = rspiocb; 2633 2634 irsp = &(rspiocb->iocb); 2635 spin_lock_irq(shost->host_lock); 2636 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2637 spin_unlock_irq(shost->host_lock); 2638 2639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2640 "LOGO cmpl: status:x%x/x%x did:x%x", 2641 irsp->ulpStatus, irsp->un.ulpWord[4], 2642 ndlp->nlp_DID); 2643 2644 /* LOGO completes to NPort <nlp_DID> */ 2645 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2646 "0105 LOGO completes to NPort x%x " 2647 "Data: x%x x%x x%x x%x\n", 2648 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2649 irsp->ulpTimeout, vport->num_disc_nodes); 2650 2651 if (lpfc_els_chk_latt(vport)) { 2652 skip_recovery = 1; 2653 goto out; 2654 } 2655 2656 /* Check to see if link went down during discovery */ 2657 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2658 /* NLP_EVT_DEVICE_RM should unregister the RPI 2659 * which should abort all outstanding IOs. 2660 */ 2661 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2662 NLP_EVT_DEVICE_RM); 2663 skip_recovery = 1; 2664 goto out; 2665 } 2666 2667 if (irsp->ulpStatus) { 2668 /* Check for retry */ 2669 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2670 /* ELS command is being retried */ 2671 skip_recovery = 1; 2672 goto out; 2673 } 2674 /* LOGO failed */ 2675 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2676 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2677 ndlp->nlp_DID, irsp->ulpStatus, 2678 irsp->un.ulpWord[4]); 2679 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2680 if (lpfc_error_lost_link(irsp)) { 2681 skip_recovery = 1; 2682 goto out; 2683 } 2684 } 2685 2686 /* Call state machine. This will unregister the rpi if needed. */ 2687 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2688 2689 out: 2690 lpfc_els_free_iocb(phba, cmdiocb); 2691 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2692 if ((vport->fc_flag & FC_PT2PT) && 2693 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2694 phba->pport->fc_myDID = 0; 2695 2696 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2697 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 2698 if (phba->nvmet_support) 2699 lpfc_nvmet_update_targetport(phba); 2700 else 2701 lpfc_nvme_update_localport(phba->pport); 2702 } 2703 2704 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2705 if (mbox) { 2706 lpfc_config_link(phba, mbox); 2707 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2708 mbox->vport = vport; 2709 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2710 MBX_NOT_FINISHED) { 2711 mempool_free(mbox, phba->mbox_mem_pool); 2712 skip_recovery = 1; 2713 } 2714 } 2715 } 2716 2717 /* 2718 * If the node is a target, the handling attempts to recover the port. 2719 * For any other port type, the rpi is unregistered as an implicit 2720 * LOGO. 2721 */ 2722 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { 2723 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2724 spin_lock_irqsave(shost->host_lock, flags); 2725 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2726 spin_unlock_irqrestore(shost->host_lock, flags); 2727 2728 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2729 "3187 LOGO completes to NPort x%x: Start " 2730 "Recovery Data: x%x x%x x%x x%x\n", 2731 ndlp->nlp_DID, irsp->ulpStatus, 2732 irsp->un.ulpWord[4], irsp->ulpTimeout, 2733 vport->num_disc_nodes); 2734 lpfc_disc_start(vport); 2735 } 2736 return; 2737 } 2738 2739 /** 2740 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2741 * @vport: pointer to a virtual N_Port data structure. 2742 * @ndlp: pointer to a node-list data structure. 2743 * @retry: number of retries to the command IOCB. 2744 * 2745 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2746 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2747 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2748 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2749 * 2750 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2751 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2752 * will be stored into the context1 field of the IOCB for the completion 2753 * callback function to the LOGO ELS command. 2754 * 2755 * Return code 2756 * 0 - successfully issued logo 2757 * 1 - failed to issue logo 2758 **/ 2759 int 2760 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2761 uint8_t retry) 2762 { 2763 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2764 struct lpfc_hba *phba = vport->phba; 2765 struct lpfc_iocbq *elsiocb; 2766 uint8_t *pcmd; 2767 uint16_t cmdsize; 2768 int rc; 2769 2770 spin_lock_irq(shost->host_lock); 2771 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2772 spin_unlock_irq(shost->host_lock); 2773 return 0; 2774 } 2775 spin_unlock_irq(shost->host_lock); 2776 2777 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2778 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2779 ndlp->nlp_DID, ELS_CMD_LOGO); 2780 if (!elsiocb) 2781 return 1; 2782 2783 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2784 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2785 pcmd += sizeof(uint32_t); 2786 2787 /* Fill in LOGO payload */ 2788 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2789 pcmd += sizeof(uint32_t); 2790 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2791 2792 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2793 "Issue LOGO: did:x%x", 2794 ndlp->nlp_DID, 0, 0); 2795 2796 /* 2797 * If we are issuing a LOGO, we may try to recover the remote NPort 2798 * by issuing a PLOGI later. Even though we issue ELS cmds by the 2799 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while 2800 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI 2801 * for that ELS cmd. To avoid this situation, lets get rid of the 2802 * RPI right now, before any ELS cmds are sent. 2803 */ 2804 spin_lock_irq(shost->host_lock); 2805 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 2806 spin_unlock_irq(shost->host_lock); 2807 if (lpfc_unreg_rpi(vport, ndlp)) { 2808 lpfc_els_free_iocb(phba, elsiocb); 2809 return 0; 2810 } 2811 2812 phba->fc_stat.elsXmitLOGO++; 2813 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2814 spin_lock_irq(shost->host_lock); 2815 ndlp->nlp_flag |= NLP_LOGO_SND; 2816 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2817 spin_unlock_irq(shost->host_lock); 2818 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2819 2820 if (rc == IOCB_ERROR) { 2821 spin_lock_irq(shost->host_lock); 2822 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2823 spin_unlock_irq(shost->host_lock); 2824 lpfc_els_free_iocb(phba, elsiocb); 2825 return 1; 2826 } 2827 return 0; 2828 } 2829 2830 /** 2831 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2832 * @phba: pointer to lpfc hba data structure. 2833 * @cmdiocb: pointer to lpfc command iocb data structure. 2834 * @rspiocb: pointer to lpfc response iocb data structure. 2835 * 2836 * This routine is a generic completion callback function for ELS commands. 2837 * Specifically, it is the callback function which does not need to perform 2838 * any command specific operations. It is currently used by the ELS command 2839 * issuing routines for the ELS State Change Request (SCR), 2840 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2841 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2842 * certain debug loggings, this callback function simply invokes the 2843 * lpfc_els_chk_latt() routine to check whether link went down during the 2844 * discovery process. 2845 **/ 2846 static void 2847 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2848 struct lpfc_iocbq *rspiocb) 2849 { 2850 struct lpfc_vport *vport = cmdiocb->vport; 2851 IOCB_t *irsp; 2852 2853 irsp = &rspiocb->iocb; 2854 2855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2856 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2857 irsp->ulpStatus, irsp->un.ulpWord[4], 2858 irsp->un.elsreq64.remoteID); 2859 /* ELS cmd tag <ulpIoTag> completes */ 2860 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2861 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2862 irsp->ulpIoTag, irsp->ulpStatus, 2863 irsp->un.ulpWord[4], irsp->ulpTimeout); 2864 /* Check to see if link went down during discovery */ 2865 lpfc_els_chk_latt(vport); 2866 lpfc_els_free_iocb(phba, cmdiocb); 2867 return; 2868 } 2869 2870 /** 2871 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2872 * @vport: pointer to a host virtual N_Port data structure. 2873 * @nportid: N_Port identifier to the remote node. 2874 * @retry: number of retries to the command IOCB. 2875 * 2876 * This routine issues a State Change Request (SCR) to a fabric node 2877 * on a @vport. The remote node @nportid is passed into the function. It 2878 * first search the @vport node list to find the matching ndlp. If no such 2879 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 2880 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 2881 * routine is invoked to send the SCR IOCB. 2882 * 2883 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2884 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2885 * will be stored into the context1 field of the IOCB for the completion 2886 * callback function to the SCR ELS command. 2887 * 2888 * Return code 2889 * 0 - Successfully issued scr command 2890 * 1 - Failed to issue scr command 2891 **/ 2892 int 2893 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2894 { 2895 struct lpfc_hba *phba = vport->phba; 2896 struct lpfc_iocbq *elsiocb; 2897 uint8_t *pcmd; 2898 uint16_t cmdsize; 2899 struct lpfc_nodelist *ndlp; 2900 2901 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2902 2903 ndlp = lpfc_findnode_did(vport, nportid); 2904 if (!ndlp) { 2905 ndlp = lpfc_nlp_init(vport, nportid); 2906 if (!ndlp) 2907 return 1; 2908 lpfc_enqueue_node(vport, ndlp); 2909 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2910 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2911 if (!ndlp) 2912 return 1; 2913 } 2914 2915 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2916 ndlp->nlp_DID, ELS_CMD_SCR); 2917 2918 if (!elsiocb) { 2919 /* This will trigger the release of the node just 2920 * allocated 2921 */ 2922 lpfc_nlp_put(ndlp); 2923 return 1; 2924 } 2925 2926 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2927 2928 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 2929 pcmd += sizeof(uint32_t); 2930 2931 /* For SCR, remainder of payload is SCR parameter page */ 2932 memset(pcmd, 0, sizeof(SCR)); 2933 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 2934 2935 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2936 "Issue SCR: did:x%x", 2937 ndlp->nlp_DID, 0, 0); 2938 2939 phba->fc_stat.elsXmitSCR++; 2940 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2941 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2942 IOCB_ERROR) { 2943 /* The additional lpfc_nlp_put will cause the following 2944 * lpfc_els_free_iocb routine to trigger the rlease of 2945 * the node. 2946 */ 2947 lpfc_nlp_put(ndlp); 2948 lpfc_els_free_iocb(phba, elsiocb); 2949 return 1; 2950 } 2951 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2952 * trigger the release of node. 2953 */ 2954 2955 lpfc_nlp_put(ndlp); 2956 return 0; 2957 } 2958 2959 /** 2960 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 2961 * @vport: pointer to a host virtual N_Port data structure. 2962 * @nportid: N_Port identifier to the remote node. 2963 * @retry: number of retries to the command IOCB. 2964 * 2965 * This routine issues a Fibre Channel Address Resolution Response 2966 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 2967 * is passed into the function. It first search the @vport node list to find 2968 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 2969 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 2970 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 2971 * 2972 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2973 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2974 * will be stored into the context1 field of the IOCB for the completion 2975 * callback function to the PARPR ELS command. 2976 * 2977 * Return code 2978 * 0 - Successfully issued farpr command 2979 * 1 - Failed to issue farpr command 2980 **/ 2981 static int 2982 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2983 { 2984 struct lpfc_hba *phba = vport->phba; 2985 struct lpfc_iocbq *elsiocb; 2986 FARP *fp; 2987 uint8_t *pcmd; 2988 uint32_t *lp; 2989 uint16_t cmdsize; 2990 struct lpfc_nodelist *ondlp; 2991 struct lpfc_nodelist *ndlp; 2992 2993 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2994 2995 ndlp = lpfc_findnode_did(vport, nportid); 2996 if (!ndlp) { 2997 ndlp = lpfc_nlp_init(vport, nportid); 2998 if (!ndlp) 2999 return 1; 3000 lpfc_enqueue_node(vport, ndlp); 3001 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3002 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3003 if (!ndlp) 3004 return 1; 3005 } 3006 3007 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3008 ndlp->nlp_DID, ELS_CMD_RNID); 3009 if (!elsiocb) { 3010 /* This will trigger the release of the node just 3011 * allocated 3012 */ 3013 lpfc_nlp_put(ndlp); 3014 return 1; 3015 } 3016 3017 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3018 3019 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3020 pcmd += sizeof(uint32_t); 3021 3022 /* Fill in FARPR payload */ 3023 fp = (FARP *) (pcmd); 3024 memset(fp, 0, sizeof(FARP)); 3025 lp = (uint32_t *) pcmd; 3026 *lp++ = be32_to_cpu(nportid); 3027 *lp++ = be32_to_cpu(vport->fc_myDID); 3028 fp->Rflags = 0; 3029 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3030 3031 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3032 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3033 ondlp = lpfc_findnode_did(vport, nportid); 3034 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 3035 memcpy(&fp->OportName, &ondlp->nlp_portname, 3036 sizeof(struct lpfc_name)); 3037 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3038 sizeof(struct lpfc_name)); 3039 } 3040 3041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3042 "Issue FARPR: did:x%x", 3043 ndlp->nlp_DID, 0, 0); 3044 3045 phba->fc_stat.elsXmitFARPR++; 3046 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3047 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 3048 IOCB_ERROR) { 3049 /* The additional lpfc_nlp_put will cause the following 3050 * lpfc_els_free_iocb routine to trigger the release of 3051 * the node. 3052 */ 3053 lpfc_nlp_put(ndlp); 3054 lpfc_els_free_iocb(phba, elsiocb); 3055 return 1; 3056 } 3057 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3058 * trigger the release of the node. 3059 */ 3060 lpfc_nlp_put(ndlp); 3061 return 0; 3062 } 3063 3064 /** 3065 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3066 * @vport: pointer to a host virtual N_Port data structure. 3067 * @nlp: pointer to a node-list data structure. 3068 * 3069 * This routine cancels the timer with a delayed IOCB-command retry for 3070 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3071 * removes the ELS retry event if it presents. In addition, if the 3072 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3073 * commands are sent for the @vport's nodes that require issuing discovery 3074 * ADISC. 3075 **/ 3076 void 3077 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3078 { 3079 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3080 struct lpfc_work_evt *evtp; 3081 3082 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3083 return; 3084 spin_lock_irq(shost->host_lock); 3085 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3086 spin_unlock_irq(shost->host_lock); 3087 del_timer_sync(&nlp->nlp_delayfunc); 3088 nlp->nlp_last_elscmd = 0; 3089 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3090 list_del_init(&nlp->els_retry_evt.evt_listp); 3091 /* Decrement nlp reference count held for the delayed retry */ 3092 evtp = &nlp->els_retry_evt; 3093 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3094 } 3095 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3096 spin_lock_irq(shost->host_lock); 3097 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3098 spin_unlock_irq(shost->host_lock); 3099 if (vport->num_disc_nodes) { 3100 if (vport->port_state < LPFC_VPORT_READY) { 3101 /* Check if there are more ADISCs to be sent */ 3102 lpfc_more_adisc(vport); 3103 } else { 3104 /* Check if there are more PLOGIs to be sent */ 3105 lpfc_more_plogi(vport); 3106 if (vport->num_disc_nodes == 0) { 3107 spin_lock_irq(shost->host_lock); 3108 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3109 spin_unlock_irq(shost->host_lock); 3110 lpfc_can_disctmo(vport); 3111 lpfc_end_rscn(vport); 3112 } 3113 } 3114 } 3115 } 3116 return; 3117 } 3118 3119 /** 3120 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3121 * @ptr: holder for the pointer to the timer function associated data (ndlp). 3122 * 3123 * This routine is invoked by the ndlp delayed-function timer to check 3124 * whether there is any pending ELS retry event(s) with the node. If not, it 3125 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3126 * adds the delayed events to the HBA work list and invokes the 3127 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3128 * event. Note that lpfc_nlp_get() is called before posting the event to 3129 * the work list to hold reference count of ndlp so that it guarantees the 3130 * reference to ndlp will still be available when the worker thread gets 3131 * to the event associated with the ndlp. 3132 **/ 3133 void 3134 lpfc_els_retry_delay(unsigned long ptr) 3135 { 3136 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 3137 struct lpfc_vport *vport = ndlp->vport; 3138 struct lpfc_hba *phba = vport->phba; 3139 unsigned long flags; 3140 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3141 3142 spin_lock_irqsave(&phba->hbalock, flags); 3143 if (!list_empty(&evtp->evt_listp)) { 3144 spin_unlock_irqrestore(&phba->hbalock, flags); 3145 return; 3146 } 3147 3148 /* We need to hold the node by incrementing the reference 3149 * count until the queued work is done 3150 */ 3151 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3152 if (evtp->evt_arg1) { 3153 evtp->evt = LPFC_EVT_ELS_RETRY; 3154 list_add_tail(&evtp->evt_listp, &phba->work_list); 3155 lpfc_worker_wake_up(phba); 3156 } 3157 spin_unlock_irqrestore(&phba->hbalock, flags); 3158 return; 3159 } 3160 3161 /** 3162 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3163 * @ndlp: pointer to a node-list data structure. 3164 * 3165 * This routine is the worker-thread handler for processing the @ndlp delayed 3166 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3167 * the last ELS command from the associated ndlp and invokes the proper ELS 3168 * function according to the delayed ELS command to retry the command. 3169 **/ 3170 void 3171 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3172 { 3173 struct lpfc_vport *vport = ndlp->vport; 3174 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3175 uint32_t cmd, retry; 3176 3177 spin_lock_irq(shost->host_lock); 3178 cmd = ndlp->nlp_last_elscmd; 3179 ndlp->nlp_last_elscmd = 0; 3180 3181 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3182 spin_unlock_irq(shost->host_lock); 3183 return; 3184 } 3185 3186 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3187 spin_unlock_irq(shost->host_lock); 3188 /* 3189 * If a discovery event readded nlp_delayfunc after timer 3190 * firing and before processing the timer, cancel the 3191 * nlp_delayfunc. 3192 */ 3193 del_timer_sync(&ndlp->nlp_delayfunc); 3194 retry = ndlp->nlp_retry; 3195 ndlp->nlp_retry = 0; 3196 3197 switch (cmd) { 3198 case ELS_CMD_FLOGI: 3199 lpfc_issue_els_flogi(vport, ndlp, retry); 3200 break; 3201 case ELS_CMD_PLOGI: 3202 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3203 ndlp->nlp_prev_state = ndlp->nlp_state; 3204 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3205 } 3206 break; 3207 case ELS_CMD_ADISC: 3208 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3209 ndlp->nlp_prev_state = ndlp->nlp_state; 3210 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3211 } 3212 break; 3213 case ELS_CMD_PRLI: 3214 case ELS_CMD_NVMEPRLI: 3215 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3216 ndlp->nlp_prev_state = ndlp->nlp_state; 3217 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3218 } 3219 break; 3220 case ELS_CMD_LOGO: 3221 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3222 ndlp->nlp_prev_state = ndlp->nlp_state; 3223 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3224 } 3225 break; 3226 case ELS_CMD_FDISC: 3227 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3228 lpfc_issue_els_fdisc(vport, ndlp, retry); 3229 break; 3230 } 3231 return; 3232 } 3233 3234 /** 3235 * lpfc_els_retry - Make retry decision on an els command iocb 3236 * @phba: pointer to lpfc hba data structure. 3237 * @cmdiocb: pointer to lpfc command iocb data structure. 3238 * @rspiocb: pointer to lpfc response iocb data structure. 3239 * 3240 * This routine makes a retry decision on an ELS command IOCB, which has 3241 * failed. The following ELS IOCBs use this function for retrying the command 3242 * when previously issued command responsed with error status: FLOGI, PLOGI, 3243 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 3244 * returned error status, it makes the decision whether a retry shall be 3245 * issued for the command, and whether a retry shall be made immediately or 3246 * delayed. In the former case, the corresponding ELS command issuing-function 3247 * is called to retry the command. In the later case, the ELS command shall 3248 * be posted to the ndlp delayed event and delayed function timer set to the 3249 * ndlp for the delayed command issusing. 3250 * 3251 * Return code 3252 * 0 - No retry of els command is made 3253 * 1 - Immediate or delayed retry of els command is made 3254 **/ 3255 static int 3256 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3257 struct lpfc_iocbq *rspiocb) 3258 { 3259 struct lpfc_vport *vport = cmdiocb->vport; 3260 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3261 IOCB_t *irsp = &rspiocb->iocb; 3262 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3263 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3264 uint32_t *elscmd; 3265 struct ls_rjt stat; 3266 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3267 int logerr = 0; 3268 uint32_t cmd = 0; 3269 uint32_t did; 3270 3271 3272 /* Note: context2 may be 0 for internal driver abort 3273 * of delays ELS command. 3274 */ 3275 3276 if (pcmd && pcmd->virt) { 3277 elscmd = (uint32_t *) (pcmd->virt); 3278 cmd = *elscmd++; 3279 } 3280 3281 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 3282 did = ndlp->nlp_DID; 3283 else { 3284 /* We should only hit this case for retrying PLOGI */ 3285 did = irsp->un.elsreq64.remoteID; 3286 ndlp = lpfc_findnode_did(vport, did); 3287 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 3288 && (cmd != ELS_CMD_PLOGI)) 3289 return 1; 3290 } 3291 3292 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3293 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3294 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 3295 3296 switch (irsp->ulpStatus) { 3297 case IOSTAT_FCP_RSP_ERROR: 3298 break; 3299 case IOSTAT_REMOTE_STOP: 3300 if (phba->sli_rev == LPFC_SLI_REV4) { 3301 /* This IO was aborted by the target, we don't 3302 * know the rxid and because we did not send the 3303 * ABTS we cannot generate and RRQ. 3304 */ 3305 lpfc_set_rrq_active(phba, ndlp, 3306 cmdiocb->sli4_lxritag, 0, 0); 3307 } 3308 break; 3309 case IOSTAT_LOCAL_REJECT: 3310 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3311 case IOERR_LOOP_OPEN_FAILURE: 3312 if (cmd == ELS_CMD_FLOGI) { 3313 if (PCI_DEVICE_ID_HORNET == 3314 phba->pcidev->device) { 3315 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3316 phba->pport->fc_myDID = 0; 3317 phba->alpa_map[0] = 0; 3318 phba->alpa_map[1] = 0; 3319 } 3320 } 3321 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3322 delay = 1000; 3323 retry = 1; 3324 break; 3325 3326 case IOERR_ILLEGAL_COMMAND: 3327 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3328 "0124 Retry illegal cmd x%x " 3329 "retry:x%x delay:x%x\n", 3330 cmd, cmdiocb->retry, delay); 3331 retry = 1; 3332 /* All command's retry policy */ 3333 maxretry = 8; 3334 if (cmdiocb->retry > 2) 3335 delay = 1000; 3336 break; 3337 3338 case IOERR_NO_RESOURCES: 3339 logerr = 1; /* HBA out of resources */ 3340 retry = 1; 3341 if (cmdiocb->retry > 100) 3342 delay = 100; 3343 maxretry = 250; 3344 break; 3345 3346 case IOERR_ILLEGAL_FRAME: 3347 delay = 100; 3348 retry = 1; 3349 break; 3350 3351 case IOERR_SEQUENCE_TIMEOUT: 3352 case IOERR_INVALID_RPI: 3353 if (cmd == ELS_CMD_PLOGI && 3354 did == NameServer_DID) { 3355 /* Continue forever if plogi to */ 3356 /* the nameserver fails */ 3357 maxretry = 0; 3358 delay = 100; 3359 } 3360 retry = 1; 3361 break; 3362 } 3363 break; 3364 3365 case IOSTAT_NPORT_RJT: 3366 case IOSTAT_FABRIC_RJT: 3367 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3368 retry = 1; 3369 break; 3370 } 3371 break; 3372 3373 case IOSTAT_NPORT_BSY: 3374 case IOSTAT_FABRIC_BSY: 3375 logerr = 1; /* Fabric / Remote NPort out of resources */ 3376 retry = 1; 3377 break; 3378 3379 case IOSTAT_LS_RJT: 3380 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3381 /* Added for Vendor specifc support 3382 * Just keep retrying for these Rsn / Exp codes 3383 */ 3384 switch (stat.un.b.lsRjtRsnCode) { 3385 case LSRJT_UNABLE_TPC: 3386 /* The driver has a VALID PLOGI but the rport has 3387 * rejected the PRLI - can't do it now. Delay 3388 * for 1 second and try again - don't care about 3389 * the explanation. 3390 */ 3391 if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { 3392 delay = 1000; 3393 maxretry = lpfc_max_els_tries + 1; 3394 retry = 1; 3395 break; 3396 } 3397 3398 /* Legacy bug fix code for targets with PLOGI delays. */ 3399 if (stat.un.b.lsRjtRsnCodeExp == 3400 LSEXP_CMD_IN_PROGRESS) { 3401 if (cmd == ELS_CMD_PLOGI) { 3402 delay = 1000; 3403 maxretry = 48; 3404 } 3405 retry = 1; 3406 break; 3407 } 3408 if (stat.un.b.lsRjtRsnCodeExp == 3409 LSEXP_CANT_GIVE_DATA) { 3410 if (cmd == ELS_CMD_PLOGI) { 3411 delay = 1000; 3412 maxretry = 48; 3413 } 3414 retry = 1; 3415 break; 3416 } 3417 if (cmd == ELS_CMD_PLOGI) { 3418 delay = 1000; 3419 maxretry = lpfc_max_els_tries + 1; 3420 retry = 1; 3421 break; 3422 } 3423 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3424 (cmd == ELS_CMD_FDISC) && 3425 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3426 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3427 "0125 FDISC Failed (x%x). " 3428 "Fabric out of resources\n", 3429 stat.un.lsRjtError); 3430 lpfc_vport_set_state(vport, 3431 FC_VPORT_NO_FABRIC_RSCS); 3432 } 3433 break; 3434 3435 case LSRJT_LOGICAL_BSY: 3436 if ((cmd == ELS_CMD_PLOGI) || 3437 (cmd == ELS_CMD_PRLI) || 3438 (cmd == ELS_CMD_NVMEPRLI)) { 3439 delay = 1000; 3440 maxretry = 48; 3441 } else if (cmd == ELS_CMD_FDISC) { 3442 /* FDISC retry policy */ 3443 maxretry = 48; 3444 if (cmdiocb->retry >= 32) 3445 delay = 1000; 3446 } 3447 retry = 1; 3448 break; 3449 3450 case LSRJT_LOGICAL_ERR: 3451 /* There are some cases where switches return this 3452 * error when they are not ready and should be returning 3453 * Logical Busy. We should delay every time. 3454 */ 3455 if (cmd == ELS_CMD_FDISC && 3456 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3457 maxretry = 3; 3458 delay = 1000; 3459 retry = 1; 3460 } else if (cmd == ELS_CMD_FLOGI && 3461 stat.un.b.lsRjtRsnCodeExp == 3462 LSEXP_NOTHING_MORE) { 3463 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 3464 retry = 1; 3465 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3466 "0820 FLOGI Failed (x%x). " 3467 "BBCredit Not Supported\n", 3468 stat.un.lsRjtError); 3469 } 3470 break; 3471 3472 case LSRJT_PROTOCOL_ERR: 3473 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3474 (cmd == ELS_CMD_FDISC) && 3475 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3476 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3477 ) { 3478 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3479 "0122 FDISC Failed (x%x). " 3480 "Fabric Detected Bad WWN\n", 3481 stat.un.lsRjtError); 3482 lpfc_vport_set_state(vport, 3483 FC_VPORT_FABRIC_REJ_WWN); 3484 } 3485 break; 3486 case LSRJT_VENDOR_UNIQUE: 3487 if ((stat.un.b.vendorUnique == 0x45) && 3488 (cmd == ELS_CMD_FLOGI)) { 3489 goto out_retry; 3490 } 3491 break; 3492 case LSRJT_CMD_UNSUPPORTED: 3493 /* lpfc nvmet returns this type of LS_RJT when it 3494 * receives an FCP PRLI because lpfc nvmet only 3495 * support NVME. ELS request is terminated for FCP4 3496 * on this rport. 3497 */ 3498 if (stat.un.b.lsRjtRsnCodeExp == 3499 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 3500 spin_lock_irq(shost->host_lock); 3501 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 3502 spin_unlock_irq(shost->host_lock); 3503 retry = 0; 3504 goto out_retry; 3505 } 3506 break; 3507 } 3508 break; 3509 3510 case IOSTAT_INTERMED_RSP: 3511 case IOSTAT_BA_RJT: 3512 break; 3513 3514 default: 3515 break; 3516 } 3517 3518 if (did == FDMI_DID) 3519 retry = 1; 3520 3521 if ((cmd == ELS_CMD_FLOGI) && 3522 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3523 !lpfc_error_lost_link(irsp)) { 3524 /* FLOGI retry policy */ 3525 retry = 1; 3526 /* retry FLOGI forever */ 3527 if (phba->link_flag != LS_LOOPBACK_MODE) 3528 maxretry = 0; 3529 else 3530 maxretry = 2; 3531 3532 if (cmdiocb->retry >= 100) 3533 delay = 5000; 3534 else if (cmdiocb->retry >= 32) 3535 delay = 1000; 3536 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 3537 /* retry FDISCs every second up to devloss */ 3538 retry = 1; 3539 maxretry = vport->cfg_devloss_tmo; 3540 delay = 1000; 3541 } 3542 3543 cmdiocb->retry++; 3544 if (maxretry && (cmdiocb->retry >= maxretry)) { 3545 phba->fc_stat.elsRetryExceeded++; 3546 retry = 0; 3547 } 3548 3549 if ((vport->load_flag & FC_UNLOADING) != 0) 3550 retry = 0; 3551 3552 out_retry: 3553 if (retry) { 3554 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3555 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3556 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3557 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3558 "2849 Stop retry ELS command " 3559 "x%x to remote NPORT x%x, " 3560 "Data: x%x x%x\n", cmd, did, 3561 cmdiocb->retry, delay); 3562 return 0; 3563 } 3564 } 3565 3566 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3567 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3568 "0107 Retry ELS command x%x to remote " 3569 "NPORT x%x Data: x%x x%x\n", 3570 cmd, did, cmdiocb->retry, delay); 3571 3572 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3573 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3574 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 3575 IOERR_NO_RESOURCES))) { 3576 /* Don't reset timer for no resources */ 3577 3578 /* If discovery / RSCN timer is running, reset it */ 3579 if (timer_pending(&vport->fc_disctmo) || 3580 (vport->fc_flag & FC_RSCN_MODE)) 3581 lpfc_set_disctmo(vport); 3582 } 3583 3584 phba->fc_stat.elsXmitRetry++; 3585 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3586 phba->fc_stat.elsDelayRetry++; 3587 ndlp->nlp_retry = cmdiocb->retry; 3588 3589 /* delay is specified in milliseconds */ 3590 mod_timer(&ndlp->nlp_delayfunc, 3591 jiffies + msecs_to_jiffies(delay)); 3592 spin_lock_irq(shost->host_lock); 3593 ndlp->nlp_flag |= NLP_DELAY_TMO; 3594 spin_unlock_irq(shost->host_lock); 3595 3596 ndlp->nlp_prev_state = ndlp->nlp_state; 3597 if ((cmd == ELS_CMD_PRLI) || 3598 (cmd == ELS_CMD_NVMEPRLI)) 3599 lpfc_nlp_set_state(vport, ndlp, 3600 NLP_STE_PRLI_ISSUE); 3601 else 3602 lpfc_nlp_set_state(vport, ndlp, 3603 NLP_STE_NPR_NODE); 3604 ndlp->nlp_last_elscmd = cmd; 3605 3606 return 1; 3607 } 3608 switch (cmd) { 3609 case ELS_CMD_FLOGI: 3610 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3611 return 1; 3612 case ELS_CMD_FDISC: 3613 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3614 return 1; 3615 case ELS_CMD_PLOGI: 3616 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3617 ndlp->nlp_prev_state = ndlp->nlp_state; 3618 lpfc_nlp_set_state(vport, ndlp, 3619 NLP_STE_PLOGI_ISSUE); 3620 } 3621 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3622 return 1; 3623 case ELS_CMD_ADISC: 3624 ndlp->nlp_prev_state = ndlp->nlp_state; 3625 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3626 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3627 return 1; 3628 case ELS_CMD_PRLI: 3629 case ELS_CMD_NVMEPRLI: 3630 ndlp->nlp_prev_state = ndlp->nlp_state; 3631 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3632 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3633 return 1; 3634 case ELS_CMD_LOGO: 3635 ndlp->nlp_prev_state = ndlp->nlp_state; 3636 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3637 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3638 return 1; 3639 } 3640 } 3641 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3642 if (logerr) { 3643 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3644 "0137 No retry ELS command x%x to remote " 3645 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3646 cmd, did, irsp->ulpStatus, 3647 irsp->un.ulpWord[4]); 3648 } 3649 else { 3650 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3651 "0108 No retry ELS command x%x to remote " 3652 "NPORT x%x Retried:%d Error:x%x/%x\n", 3653 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3654 irsp->un.ulpWord[4]); 3655 } 3656 return 0; 3657 } 3658 3659 /** 3660 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3661 * @phba: pointer to lpfc hba data structure. 3662 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3663 * 3664 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3665 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3666 * checks to see whether there is a lpfc DMA buffer associated with the 3667 * response of the command IOCB. If so, it will be released before releasing 3668 * the lpfc DMA buffer associated with the IOCB itself. 3669 * 3670 * Return code 3671 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3672 **/ 3673 static int 3674 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3675 { 3676 struct lpfc_dmabuf *buf_ptr; 3677 3678 /* Free the response before processing the command. */ 3679 if (!list_empty(&buf_ptr1->list)) { 3680 list_remove_head(&buf_ptr1->list, buf_ptr, 3681 struct lpfc_dmabuf, 3682 list); 3683 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3684 kfree(buf_ptr); 3685 } 3686 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3687 kfree(buf_ptr1); 3688 return 0; 3689 } 3690 3691 /** 3692 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3693 * @phba: pointer to lpfc hba data structure. 3694 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3695 * 3696 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3697 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3698 * pool. 3699 * 3700 * Return code 3701 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3702 **/ 3703 static int 3704 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3705 { 3706 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3707 kfree(buf_ptr); 3708 return 0; 3709 } 3710 3711 /** 3712 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3713 * @phba: pointer to lpfc hba data structure. 3714 * @elsiocb: pointer to lpfc els command iocb data structure. 3715 * 3716 * This routine frees a command IOCB and its associated resources. The 3717 * command IOCB data structure contains the reference to various associated 3718 * resources, these fields must be set to NULL if the associated reference 3719 * not present: 3720 * context1 - reference to ndlp 3721 * context2 - reference to cmd 3722 * context2->next - reference to rsp 3723 * context3 - reference to bpl 3724 * 3725 * It first properly decrements the reference count held on ndlp for the 3726 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3727 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3728 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3729 * adds the DMA buffer the @phba data structure for the delayed release. 3730 * If reference to the Buffer Pointer List (BPL) is present, the 3731 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3732 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3733 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3734 * 3735 * Return code 3736 * 0 - Success (currently, always return 0) 3737 **/ 3738 int 3739 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3740 { 3741 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3742 struct lpfc_nodelist *ndlp; 3743 3744 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3745 if (ndlp) { 3746 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3747 lpfc_nlp_put(ndlp); 3748 3749 /* If the ndlp is not being used by another discovery 3750 * thread, free it. 3751 */ 3752 if (!lpfc_nlp_not_used(ndlp)) { 3753 /* If ndlp is being used by another discovery 3754 * thread, just clear NLP_DEFER_RM 3755 */ 3756 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3757 } 3758 } 3759 else 3760 lpfc_nlp_put(ndlp); 3761 elsiocb->context1 = NULL; 3762 } 3763 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3764 if (elsiocb->context2) { 3765 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3766 /* Firmware could still be in progress of DMAing 3767 * payload, so don't free data buffer till after 3768 * a hbeat. 3769 */ 3770 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3771 buf_ptr = elsiocb->context2; 3772 elsiocb->context2 = NULL; 3773 if (buf_ptr) { 3774 buf_ptr1 = NULL; 3775 spin_lock_irq(&phba->hbalock); 3776 if (!list_empty(&buf_ptr->list)) { 3777 list_remove_head(&buf_ptr->list, 3778 buf_ptr1, struct lpfc_dmabuf, 3779 list); 3780 INIT_LIST_HEAD(&buf_ptr1->list); 3781 list_add_tail(&buf_ptr1->list, 3782 &phba->elsbuf); 3783 phba->elsbuf_cnt++; 3784 } 3785 INIT_LIST_HEAD(&buf_ptr->list); 3786 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3787 phba->elsbuf_cnt++; 3788 spin_unlock_irq(&phba->hbalock); 3789 } 3790 } else { 3791 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3792 lpfc_els_free_data(phba, buf_ptr1); 3793 elsiocb->context2 = NULL; 3794 } 3795 } 3796 3797 if (elsiocb->context3) { 3798 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3799 lpfc_els_free_bpl(phba, buf_ptr); 3800 elsiocb->context3 = NULL; 3801 } 3802 lpfc_sli_release_iocbq(phba, elsiocb); 3803 return 0; 3804 } 3805 3806 /** 3807 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 3808 * @phba: pointer to lpfc hba data structure. 3809 * @cmdiocb: pointer to lpfc command iocb data structure. 3810 * @rspiocb: pointer to lpfc response iocb data structure. 3811 * 3812 * This routine is the completion callback function to the Logout (LOGO) 3813 * Accept (ACC) Response ELS command. This routine is invoked to indicate 3814 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 3815 * release the ndlp if it has the last reference remaining (reference count 3816 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 3817 * field to NULL to inform the following lpfc_els_free_iocb() routine no 3818 * ndlp reference count needs to be decremented. Otherwise, the ndlp 3819 * reference use-count shall be decremented by the lpfc_els_free_iocb() 3820 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 3821 * IOCB data structure. 3822 **/ 3823 static void 3824 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3825 struct lpfc_iocbq *rspiocb) 3826 { 3827 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3828 struct lpfc_vport *vport = cmdiocb->vport; 3829 IOCB_t *irsp; 3830 3831 irsp = &rspiocb->iocb; 3832 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3833 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 3834 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 3835 /* ACC to LOGO completes to NPort <nlp_DID> */ 3836 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3837 "0109 ACC to LOGO completes to NPort x%x " 3838 "Data: x%x x%x x%x\n", 3839 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3840 ndlp->nlp_rpi); 3841 3842 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 3843 /* NPort Recovery mode or node is just allocated */ 3844 if (!lpfc_nlp_not_used(ndlp)) { 3845 /* If the ndlp is being used by another discovery 3846 * thread, just unregister the RPI. 3847 */ 3848 lpfc_unreg_rpi(vport, ndlp); 3849 } else { 3850 /* Indicate the node has already released, should 3851 * not reference to it from within lpfc_els_free_iocb. 3852 */ 3853 cmdiocb->context1 = NULL; 3854 } 3855 } 3856 3857 /* 3858 * The driver received a LOGO from the rport and has ACK'd it. 3859 * At this point, the driver is done so release the IOCB 3860 */ 3861 lpfc_els_free_iocb(phba, cmdiocb); 3862 } 3863 3864 /** 3865 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 3866 * @phba: pointer to lpfc hba data structure. 3867 * @pmb: pointer to the driver internal queue element for mailbox command. 3868 * 3869 * This routine is the completion callback function for unregister default 3870 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 3871 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 3872 * decrements the ndlp reference count held for this completion callback 3873 * function. After that, it invokes the lpfc_nlp_not_used() to check 3874 * whether there is only one reference left on the ndlp. If so, it will 3875 * perform one more decrement and trigger the release of the ndlp. 3876 **/ 3877 void 3878 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3879 { 3880 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3881 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3882 3883 pmb->context1 = NULL; 3884 pmb->context2 = NULL; 3885 3886 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3887 kfree(mp); 3888 mempool_free(pmb, phba->mbox_mem_pool); 3889 if (ndlp) { 3890 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3891 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", 3892 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3893 kref_read(&ndlp->kref), 3894 ndlp->nlp_usg_map, ndlp); 3895 if (NLP_CHK_NODE_ACT(ndlp)) { 3896 lpfc_nlp_put(ndlp); 3897 /* This is the end of the default RPI cleanup logic for 3898 * this ndlp. If no other discovery threads are using 3899 * this ndlp, free all resources associated with it. 3900 */ 3901 lpfc_nlp_not_used(ndlp); 3902 } else { 3903 lpfc_drop_node(ndlp->vport, ndlp); 3904 } 3905 } 3906 3907 return; 3908 } 3909 3910 /** 3911 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 3912 * @phba: pointer to lpfc hba data structure. 3913 * @cmdiocb: pointer to lpfc command iocb data structure. 3914 * @rspiocb: pointer to lpfc response iocb data structure. 3915 * 3916 * This routine is the completion callback function for ELS Response IOCB 3917 * command. In normal case, this callback function just properly sets the 3918 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 3919 * field in the command IOCB is not NULL, the referred mailbox command will 3920 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 3921 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 3922 * link down event occurred during the discovery, the lpfc_nlp_not_used() 3923 * routine shall be invoked trying to release the ndlp if no other threads 3924 * are currently referring it. 3925 **/ 3926 static void 3927 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3928 struct lpfc_iocbq *rspiocb) 3929 { 3930 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3931 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 3932 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 3933 IOCB_t *irsp; 3934 uint8_t *pcmd; 3935 LPFC_MBOXQ_t *mbox = NULL; 3936 struct lpfc_dmabuf *mp = NULL; 3937 uint32_t ls_rjt = 0; 3938 3939 irsp = &rspiocb->iocb; 3940 3941 if (cmdiocb->context_un.mbox) 3942 mbox = cmdiocb->context_un.mbox; 3943 3944 /* First determine if this is a LS_RJT cmpl. Note, this callback 3945 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 3946 */ 3947 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 3948 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3949 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3950 /* A LS_RJT associated with Default RPI cleanup has its own 3951 * separate code path. 3952 */ 3953 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3954 ls_rjt = 1; 3955 } 3956 3957 /* Check to see if link went down during discovery */ 3958 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 3959 if (mbox) { 3960 mp = (struct lpfc_dmabuf *) mbox->context1; 3961 if (mp) { 3962 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3963 kfree(mp); 3964 } 3965 mempool_free(mbox, phba->mbox_mem_pool); 3966 } 3967 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3968 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3969 if (lpfc_nlp_not_used(ndlp)) { 3970 ndlp = NULL; 3971 /* Indicate the node has already released, 3972 * should not reference to it from within 3973 * the routine lpfc_els_free_iocb. 3974 */ 3975 cmdiocb->context1 = NULL; 3976 } 3977 goto out; 3978 } 3979 3980 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3981 "ELS rsp cmpl: status:x%x/x%x did:x%x", 3982 irsp->ulpStatus, irsp->un.ulpWord[4], 3983 cmdiocb->iocb.un.elsreq64.remoteID); 3984 /* ELS response tag <ulpIoTag> completes */ 3985 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3986 "0110 ELS response tag x%x completes " 3987 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 3988 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 3989 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 3990 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3991 ndlp->nlp_rpi); 3992 if (mbox) { 3993 if ((rspiocb->iocb.ulpStatus == 0) 3994 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 3995 if (!lpfc_unreg_rpi(vport, ndlp) && 3996 (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 3997 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) { 3998 lpfc_printf_vlog(vport, KERN_INFO, 3999 LOG_DISCOVERY, 4000 "0314 PLOGI recov DID x%x " 4001 "Data: x%x x%x x%x\n", 4002 ndlp->nlp_DID, ndlp->nlp_state, 4003 ndlp->nlp_rpi, ndlp->nlp_flag); 4004 mp = mbox->context1; 4005 if (mp) { 4006 lpfc_mbuf_free(phba, mp->virt, 4007 mp->phys); 4008 kfree(mp); 4009 } 4010 mempool_free(mbox, phba->mbox_mem_pool); 4011 goto out; 4012 } 4013 4014 /* Increment reference count to ndlp to hold the 4015 * reference to ndlp for the callback function. 4016 */ 4017 mbox->context2 = lpfc_nlp_get(ndlp); 4018 mbox->vport = vport; 4019 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4020 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4021 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4022 } 4023 else { 4024 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4025 ndlp->nlp_prev_state = ndlp->nlp_state; 4026 lpfc_nlp_set_state(vport, ndlp, 4027 NLP_STE_REG_LOGIN_ISSUE); 4028 } 4029 4030 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4031 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4032 != MBX_NOT_FINISHED) 4033 goto out; 4034 4035 /* Decrement the ndlp reference count we 4036 * set for this failed mailbox command. 4037 */ 4038 lpfc_nlp_put(ndlp); 4039 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4040 4041 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4042 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4043 "0138 ELS rsp: Cannot issue reg_login for x%x " 4044 "Data: x%x x%x x%x\n", 4045 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4046 ndlp->nlp_rpi); 4047 4048 if (lpfc_nlp_not_used(ndlp)) { 4049 ndlp = NULL; 4050 /* Indicate node has already been released, 4051 * should not reference to it from within 4052 * the routine lpfc_els_free_iocb. 4053 */ 4054 cmdiocb->context1 = NULL; 4055 } 4056 } else { 4057 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 4058 if (!lpfc_error_lost_link(irsp) && 4059 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 4060 if (lpfc_nlp_not_used(ndlp)) { 4061 ndlp = NULL; 4062 /* Indicate node has already been 4063 * released, should not reference 4064 * to it from within the routine 4065 * lpfc_els_free_iocb. 4066 */ 4067 cmdiocb->context1 = NULL; 4068 } 4069 } 4070 } 4071 mp = (struct lpfc_dmabuf *) mbox->context1; 4072 if (mp) { 4073 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4074 kfree(mp); 4075 } 4076 mempool_free(mbox, phba->mbox_mem_pool); 4077 } 4078 out: 4079 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 4080 spin_lock_irq(shost->host_lock); 4081 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 4082 spin_unlock_irq(shost->host_lock); 4083 4084 /* If the node is not being used by another discovery thread, 4085 * and we are sending a reject, we are done with it. 4086 * Release driver reference count here and free associated 4087 * resources. 4088 */ 4089 if (ls_rjt) 4090 if (lpfc_nlp_not_used(ndlp)) 4091 /* Indicate node has already been released, 4092 * should not reference to it from within 4093 * the routine lpfc_els_free_iocb. 4094 */ 4095 cmdiocb->context1 = NULL; 4096 4097 } 4098 4099 lpfc_els_free_iocb(phba, cmdiocb); 4100 return; 4101 } 4102 4103 /** 4104 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4105 * @vport: pointer to a host virtual N_Port data structure. 4106 * @flag: the els command code to be accepted. 4107 * @oldiocb: pointer to the original lpfc command iocb data structure. 4108 * @ndlp: pointer to a node-list data structure. 4109 * @mbox: pointer to the driver internal queue element for mailbox command. 4110 * 4111 * This routine prepares and issues an Accept (ACC) response IOCB 4112 * command. It uses the @flag to properly set up the IOCB field for the 4113 * specific ACC response command to be issued and invokes the 4114 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4115 * @mbox pointer is passed in, it will be put into the context_un.mbox 4116 * field of the IOCB for the completion callback function to issue the 4117 * mailbox command to the HBA later when callback is invoked. 4118 * 4119 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4120 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4121 * will be stored into the context1 field of the IOCB for the completion 4122 * callback function to the corresponding response ELS IOCB command. 4123 * 4124 * Return code 4125 * 0 - Successfully issued acc response 4126 * 1 - Failed to issue acc response 4127 **/ 4128 int 4129 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4130 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4131 LPFC_MBOXQ_t *mbox) 4132 { 4133 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4134 struct lpfc_hba *phba = vport->phba; 4135 IOCB_t *icmd; 4136 IOCB_t *oldcmd; 4137 struct lpfc_iocbq *elsiocb; 4138 uint8_t *pcmd; 4139 struct serv_parm *sp; 4140 uint16_t cmdsize; 4141 int rc; 4142 ELS_PKT *els_pkt_ptr; 4143 4144 oldcmd = &oldiocb->iocb; 4145 4146 switch (flag) { 4147 case ELS_CMD_ACC: 4148 cmdsize = sizeof(uint32_t); 4149 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4150 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4151 if (!elsiocb) { 4152 spin_lock_irq(shost->host_lock); 4153 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4154 spin_unlock_irq(shost->host_lock); 4155 return 1; 4156 } 4157 4158 icmd = &elsiocb->iocb; 4159 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4160 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4161 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4162 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4163 pcmd += sizeof(uint32_t); 4164 4165 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4166 "Issue ACC: did:x%x flg:x%x", 4167 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4168 break; 4169 case ELS_CMD_FLOGI: 4170 case ELS_CMD_PLOGI: 4171 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4172 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4173 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4174 if (!elsiocb) 4175 return 1; 4176 4177 icmd = &elsiocb->iocb; 4178 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4179 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4180 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4181 4182 if (mbox) 4183 elsiocb->context_un.mbox = mbox; 4184 4185 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4186 pcmd += sizeof(uint32_t); 4187 sp = (struct serv_parm *)pcmd; 4188 4189 if (flag == ELS_CMD_FLOGI) { 4190 /* Copy the received service parameters back */ 4191 memcpy(sp, &phba->fc_fabparam, 4192 sizeof(struct serv_parm)); 4193 4194 /* Clear the F_Port bit */ 4195 sp->cmn.fPort = 0; 4196 4197 /* Mark all class service parameters as invalid */ 4198 sp->cls1.classValid = 0; 4199 sp->cls2.classValid = 0; 4200 sp->cls3.classValid = 0; 4201 sp->cls4.classValid = 0; 4202 4203 /* Copy our worldwide names */ 4204 memcpy(&sp->portName, &vport->fc_sparam.portName, 4205 sizeof(struct lpfc_name)); 4206 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4207 sizeof(struct lpfc_name)); 4208 } else { 4209 memcpy(pcmd, &vport->fc_sparam, 4210 sizeof(struct serv_parm)); 4211 4212 sp->cmn.valid_vendor_ver_level = 0; 4213 memset(sp->un.vendorVersion, 0, 4214 sizeof(sp->un.vendorVersion)); 4215 sp->cmn.bbRcvSizeMsb &= 0xF; 4216 4217 /* If our firmware supports this feature, convey that 4218 * info to the target using the vendor specific field. 4219 */ 4220 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4221 sp->cmn.valid_vendor_ver_level = 1; 4222 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4223 sp->un.vv.flags = 4224 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4225 } 4226 } 4227 4228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4229 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4230 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4231 break; 4232 case ELS_CMD_PRLO: 4233 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4234 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4235 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4236 if (!elsiocb) 4237 return 1; 4238 4239 icmd = &elsiocb->iocb; 4240 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4241 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4242 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4243 4244 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4245 sizeof(uint32_t) + sizeof(PRLO)); 4246 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4247 els_pkt_ptr = (ELS_PKT *) pcmd; 4248 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4249 4250 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4251 "Issue ACC PRLO: did:x%x flg:x%x", 4252 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4253 break; 4254 default: 4255 return 1; 4256 } 4257 /* Xmit ELS ACC response tag <ulpIoTag> */ 4258 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4259 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 4260 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x " 4261 "fc_flag x%x\n", 4262 elsiocb->iotag, elsiocb->iocb.ulpContext, 4263 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4264 ndlp->nlp_rpi, vport->fc_flag); 4265 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4266 spin_lock_irq(shost->host_lock); 4267 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4268 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4269 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4270 spin_unlock_irq(shost->host_lock); 4271 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4272 } else { 4273 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4274 } 4275 4276 phba->fc_stat.elsXmitACC++; 4277 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4278 if (rc == IOCB_ERROR) { 4279 lpfc_els_free_iocb(phba, elsiocb); 4280 return 1; 4281 } 4282 return 0; 4283 } 4284 4285 /** 4286 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 4287 * @vport: pointer to a virtual N_Port data structure. 4288 * @rejectError: 4289 * @oldiocb: pointer to the original lpfc command iocb data structure. 4290 * @ndlp: pointer to a node-list data structure. 4291 * @mbox: pointer to the driver internal queue element for mailbox command. 4292 * 4293 * This routine prepares and issue an Reject (RJT) response IOCB 4294 * command. If a @mbox pointer is passed in, it will be put into the 4295 * context_un.mbox field of the IOCB for the completion callback function 4296 * to issue to the HBA later. 4297 * 4298 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4299 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4300 * will be stored into the context1 field of the IOCB for the completion 4301 * callback function to the reject response ELS IOCB command. 4302 * 4303 * Return code 4304 * 0 - Successfully issued reject response 4305 * 1 - Failed to issue reject response 4306 **/ 4307 int 4308 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 4309 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4310 LPFC_MBOXQ_t *mbox) 4311 { 4312 struct lpfc_hba *phba = vport->phba; 4313 IOCB_t *icmd; 4314 IOCB_t *oldcmd; 4315 struct lpfc_iocbq *elsiocb; 4316 uint8_t *pcmd; 4317 uint16_t cmdsize; 4318 int rc; 4319 4320 cmdsize = 2 * sizeof(uint32_t); 4321 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4322 ndlp->nlp_DID, ELS_CMD_LS_RJT); 4323 if (!elsiocb) 4324 return 1; 4325 4326 icmd = &elsiocb->iocb; 4327 oldcmd = &oldiocb->iocb; 4328 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4329 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4330 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4331 4332 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 4333 pcmd += sizeof(uint32_t); 4334 *((uint32_t *) (pcmd)) = rejectError; 4335 4336 if (mbox) 4337 elsiocb->context_un.mbox = mbox; 4338 4339 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 4340 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4341 "0129 Xmit ELS RJT x%x response tag x%x " 4342 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 4343 "rpi x%x\n", 4344 rejectError, elsiocb->iotag, 4345 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4346 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4347 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4348 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4349 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4350 4351 phba->fc_stat.elsXmitLSRJT++; 4352 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4353 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4354 4355 if (rc == IOCB_ERROR) { 4356 lpfc_els_free_iocb(phba, elsiocb); 4357 return 1; 4358 } 4359 return 0; 4360 } 4361 4362 /** 4363 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4364 * @vport: pointer to a virtual N_Port data structure. 4365 * @oldiocb: pointer to the original lpfc command iocb data structure. 4366 * @ndlp: pointer to a node-list data structure. 4367 * 4368 * This routine prepares and issues an Accept (ACC) response to Address 4369 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4370 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4371 * 4372 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4373 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4374 * will be stored into the context1 field of the IOCB for the completion 4375 * callback function to the ADISC Accept response ELS IOCB command. 4376 * 4377 * Return code 4378 * 0 - Successfully issued acc adisc response 4379 * 1 - Failed to issue adisc acc response 4380 **/ 4381 int 4382 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4383 struct lpfc_nodelist *ndlp) 4384 { 4385 struct lpfc_hba *phba = vport->phba; 4386 ADISC *ap; 4387 IOCB_t *icmd, *oldcmd; 4388 struct lpfc_iocbq *elsiocb; 4389 uint8_t *pcmd; 4390 uint16_t cmdsize; 4391 int rc; 4392 4393 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4395 ndlp->nlp_DID, ELS_CMD_ACC); 4396 if (!elsiocb) 4397 return 1; 4398 4399 icmd = &elsiocb->iocb; 4400 oldcmd = &oldiocb->iocb; 4401 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4402 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4403 4404 /* Xmit ADISC ACC response tag <ulpIoTag> */ 4405 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4406 "0130 Xmit ADISC ACC response iotag x%x xri: " 4407 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 4408 elsiocb->iotag, elsiocb->iocb.ulpContext, 4409 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4410 ndlp->nlp_rpi); 4411 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4412 4413 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4414 pcmd += sizeof(uint32_t); 4415 4416 ap = (ADISC *) (pcmd); 4417 ap->hardAL_PA = phba->fc_pref_ALPA; 4418 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4419 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4420 ap->DID = be32_to_cpu(vport->fc_myDID); 4421 4422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4423 "Issue ACC ADISC: did:x%x flg:x%x", 4424 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4425 4426 phba->fc_stat.elsXmitACC++; 4427 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4428 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4429 if (rc == IOCB_ERROR) { 4430 lpfc_els_free_iocb(phba, elsiocb); 4431 return 1; 4432 } 4433 return 0; 4434 } 4435 4436 /** 4437 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 4438 * @vport: pointer to a virtual N_Port data structure. 4439 * @oldiocb: pointer to the original lpfc command iocb data structure. 4440 * @ndlp: pointer to a node-list data structure. 4441 * 4442 * This routine prepares and issues an Accept (ACC) response to Process 4443 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 4444 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4445 * 4446 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4447 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4448 * will be stored into the context1 field of the IOCB for the completion 4449 * callback function to the PRLI Accept response ELS IOCB command. 4450 * 4451 * Return code 4452 * 0 - Successfully issued acc prli response 4453 * 1 - Failed to issue acc prli response 4454 **/ 4455 int 4456 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4457 struct lpfc_nodelist *ndlp) 4458 { 4459 struct lpfc_hba *phba = vport->phba; 4460 PRLI *npr; 4461 struct lpfc_nvme_prli *npr_nvme; 4462 lpfc_vpd_t *vpd; 4463 IOCB_t *icmd; 4464 IOCB_t *oldcmd; 4465 struct lpfc_iocbq *elsiocb; 4466 uint8_t *pcmd; 4467 uint16_t cmdsize; 4468 uint32_t prli_fc4_req, *req_payload; 4469 struct lpfc_dmabuf *req_buf; 4470 int rc; 4471 u32 elsrspcmd; 4472 4473 /* Need the incoming PRLI payload to determine if the ACC is for an 4474 * FC4 or NVME PRLI type. The PRLI type is at word 1. 4475 */ 4476 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 4477 req_payload = (((uint32_t *)req_buf->virt) + 1); 4478 4479 /* PRLI type payload is at byte 3 for FCP or NVME. */ 4480 prli_fc4_req = be32_to_cpu(*req_payload); 4481 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 4482 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4483 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 4484 prli_fc4_req, *((uint32_t *)req_payload)); 4485 4486 if (prli_fc4_req == PRLI_FCP_TYPE) { 4487 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 4488 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 4489 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 4490 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 4491 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 4492 } else { 4493 return 1; 4494 } 4495 4496 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4497 ndlp->nlp_DID, elsrspcmd); 4498 if (!elsiocb) 4499 return 1; 4500 4501 icmd = &elsiocb->iocb; 4502 oldcmd = &oldiocb->iocb; 4503 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4504 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4505 4506 /* Xmit PRLI ACC response tag <ulpIoTag> */ 4507 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4508 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 4509 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4510 elsiocb->iotag, elsiocb->iocb.ulpContext, 4511 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4512 ndlp->nlp_rpi); 4513 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4514 memset(pcmd, 0, cmdsize); 4515 4516 *((uint32_t *)(pcmd)) = elsrspcmd; 4517 pcmd += sizeof(uint32_t); 4518 4519 /* For PRLI, remainder of payload is PRLI parameter page */ 4520 vpd = &phba->vpd; 4521 4522 if (prli_fc4_req == PRLI_FCP_TYPE) { 4523 /* 4524 * If the remote port is a target and our firmware version 4525 * is 3.20 or later, set the following bits for FC-TAPE 4526 * support. 4527 */ 4528 npr = (PRLI *) pcmd; 4529 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4530 (vpd->rev.feaLevelHigh >= 0x02)) { 4531 npr->ConfmComplAllowed = 1; 4532 npr->Retry = 1; 4533 npr->TaskRetryIdReq = 1; 4534 } 4535 npr->acceptRspCode = PRLI_REQ_EXECUTED; 4536 npr->estabImagePair = 1; 4537 npr->readXferRdyDis = 1; 4538 npr->ConfmComplAllowed = 1; 4539 npr->prliType = PRLI_FCP_TYPE; 4540 npr->initiatorFunc = 1; 4541 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 4542 /* Respond with an NVME PRLI Type */ 4543 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 4544 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 4545 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 4546 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 4547 if (phba->nvmet_support) { 4548 bf_set(prli_tgt, npr_nvme, 1); 4549 bf_set(prli_disc, npr_nvme, 1); 4550 if (phba->cfg_nvme_enable_fb) { 4551 bf_set(prli_fba, npr_nvme, 1); 4552 4553 /* TBD. Target mode needs to post buffers 4554 * that support the configured first burst 4555 * byte size. 4556 */ 4557 bf_set(prli_fb_sz, npr_nvme, 4558 phba->cfg_nvmet_fb_size); 4559 } 4560 } else { 4561 bf_set(prli_init, npr_nvme, 1); 4562 } 4563 4564 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 4565 "6015 NVME issue PRLI ACC word1 x%08x " 4566 "word4 x%08x word5 x%08x flag x%x, " 4567 "fcp_info x%x nlp_type x%x\n", 4568 npr_nvme->word1, npr_nvme->word4, 4569 npr_nvme->word5, ndlp->nlp_flag, 4570 ndlp->nlp_fcp_info, ndlp->nlp_type); 4571 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 4572 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 4573 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 4574 } else 4575 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4576 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 4577 prli_fc4_req, ndlp->nlp_fc4_type, 4578 ndlp->nlp_DID); 4579 4580 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4581 "Issue ACC PRLI: did:x%x flg:x%x", 4582 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4583 4584 phba->fc_stat.elsXmitACC++; 4585 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4586 4587 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4588 if (rc == IOCB_ERROR) { 4589 lpfc_els_free_iocb(phba, elsiocb); 4590 return 1; 4591 } 4592 return 0; 4593 } 4594 4595 /** 4596 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 4597 * @vport: pointer to a virtual N_Port data structure. 4598 * @format: rnid command format. 4599 * @oldiocb: pointer to the original lpfc command iocb data structure. 4600 * @ndlp: pointer to a node-list data structure. 4601 * 4602 * This routine issues a Request Node Identification Data (RNID) Accept 4603 * (ACC) response. It constructs the RNID ACC response command according to 4604 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 4605 * issue the response. Note that this command does not need to hold the ndlp 4606 * reference count for the callback. So, the ndlp reference count taken by 4607 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 4608 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4609 * there is no ndlp reference available. 4610 * 4611 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4612 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4613 * will be stored into the context1 field of the IOCB for the completion 4614 * callback function. However, for the RNID Accept Response ELS command, 4615 * this is undone later by this routine after the IOCB is allocated. 4616 * 4617 * Return code 4618 * 0 - Successfully issued acc rnid response 4619 * 1 - Failed to issue acc rnid response 4620 **/ 4621 static int 4622 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4623 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4624 { 4625 struct lpfc_hba *phba = vport->phba; 4626 RNID *rn; 4627 IOCB_t *icmd, *oldcmd; 4628 struct lpfc_iocbq *elsiocb; 4629 uint8_t *pcmd; 4630 uint16_t cmdsize; 4631 int rc; 4632 4633 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4634 + (2 * sizeof(struct lpfc_name)); 4635 if (format) 4636 cmdsize += sizeof(RNID_TOP_DISC); 4637 4638 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4639 ndlp->nlp_DID, ELS_CMD_ACC); 4640 if (!elsiocb) 4641 return 1; 4642 4643 icmd = &elsiocb->iocb; 4644 oldcmd = &oldiocb->iocb; 4645 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4646 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4647 4648 /* Xmit RNID ACC response tag <ulpIoTag> */ 4649 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4650 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4651 elsiocb->iotag, elsiocb->iocb.ulpContext); 4652 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4653 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4654 pcmd += sizeof(uint32_t); 4655 4656 memset(pcmd, 0, sizeof(RNID)); 4657 rn = (RNID *) (pcmd); 4658 rn->Format = format; 4659 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4660 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4661 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4662 switch (format) { 4663 case 0: 4664 rn->SpecificLen = 0; 4665 break; 4666 case RNID_TOPOLOGY_DISC: 4667 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4668 memcpy(&rn->un.topologyDisc.portName, 4669 &vport->fc_portname, sizeof(struct lpfc_name)); 4670 rn->un.topologyDisc.unitType = RNID_HBA; 4671 rn->un.topologyDisc.physPort = 0; 4672 rn->un.topologyDisc.attachedNodes = 0; 4673 break; 4674 default: 4675 rn->CommonLen = 0; 4676 rn->SpecificLen = 0; 4677 break; 4678 } 4679 4680 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4681 "Issue ACC RNID: did:x%x flg:x%x", 4682 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4683 4684 phba->fc_stat.elsXmitACC++; 4685 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4686 4687 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4688 if (rc == IOCB_ERROR) { 4689 lpfc_els_free_iocb(phba, elsiocb); 4690 return 1; 4691 } 4692 return 0; 4693 } 4694 4695 /** 4696 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4697 * @vport: pointer to a virtual N_Port data structure. 4698 * @iocb: pointer to the lpfc command iocb data structure. 4699 * @ndlp: pointer to a node-list data structure. 4700 * 4701 * Return 4702 **/ 4703 static void 4704 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4705 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4706 { 4707 struct lpfc_hba *phba = vport->phba; 4708 uint8_t *pcmd; 4709 struct RRQ *rrq; 4710 uint16_t rxid; 4711 uint16_t xri; 4712 struct lpfc_node_rrq *prrq; 4713 4714 4715 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4716 pcmd += sizeof(uint32_t); 4717 rrq = (struct RRQ *)pcmd; 4718 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4719 rxid = bf_get(rrq_rxid, rrq); 4720 4721 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4722 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4723 " x%x x%x\n", 4724 be32_to_cpu(bf_get(rrq_did, rrq)), 4725 bf_get(rrq_oxid, rrq), 4726 rxid, 4727 iocb->iotag, iocb->iocb.ulpContext); 4728 4729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4730 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4731 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4732 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4733 xri = bf_get(rrq_oxid, rrq); 4734 else 4735 xri = rxid; 4736 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4737 if (prrq) 4738 lpfc_clr_rrq_active(phba, xri, prrq); 4739 return; 4740 } 4741 4742 /** 4743 * lpfc_els_rsp_echo_acc - Issue echo acc response 4744 * @vport: pointer to a virtual N_Port data structure. 4745 * @data: pointer to echo data to return in the accept. 4746 * @oldiocb: pointer to the original lpfc command iocb data structure. 4747 * @ndlp: pointer to a node-list data structure. 4748 * 4749 * Return code 4750 * 0 - Successfully issued acc echo response 4751 * 1 - Failed to issue acc echo response 4752 **/ 4753 static int 4754 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4755 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4756 { 4757 struct lpfc_hba *phba = vport->phba; 4758 struct lpfc_iocbq *elsiocb; 4759 uint8_t *pcmd; 4760 uint16_t cmdsize; 4761 int rc; 4762 4763 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4764 4765 /* The accumulated length can exceed the BPL_SIZE. For 4766 * now, use this as the limit 4767 */ 4768 if (cmdsize > LPFC_BPL_SIZE) 4769 cmdsize = LPFC_BPL_SIZE; 4770 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4771 ndlp->nlp_DID, ELS_CMD_ACC); 4772 if (!elsiocb) 4773 return 1; 4774 4775 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 4776 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 4777 4778 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4779 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4780 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4781 elsiocb->iotag, elsiocb->iocb.ulpContext); 4782 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4783 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4784 pcmd += sizeof(uint32_t); 4785 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4786 4787 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4788 "Issue ACC ECHO: did:x%x flg:x%x", 4789 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4790 4791 phba->fc_stat.elsXmitACC++; 4792 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4793 4794 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4795 if (rc == IOCB_ERROR) { 4796 lpfc_els_free_iocb(phba, elsiocb); 4797 return 1; 4798 } 4799 return 0; 4800 } 4801 4802 /** 4803 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 4804 * @vport: pointer to a host virtual N_Port data structure. 4805 * 4806 * This routine issues Address Discover (ADISC) ELS commands to those 4807 * N_Ports which are in node port recovery state and ADISC has not been issued 4808 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 4809 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 4810 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 4811 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 4812 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 4813 * IOCBs quit for later pick up. On the other hand, after walking through 4814 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 4815 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 4816 * no more ADISC need to be sent. 4817 * 4818 * Return code 4819 * The number of N_Ports with adisc issued. 4820 **/ 4821 int 4822 lpfc_els_disc_adisc(struct lpfc_vport *vport) 4823 { 4824 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4825 struct lpfc_nodelist *ndlp, *next_ndlp; 4826 int sentadisc = 0; 4827 4828 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 4829 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4830 if (!NLP_CHK_NODE_ACT(ndlp)) 4831 continue; 4832 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4833 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4834 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 4835 spin_lock_irq(shost->host_lock); 4836 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4837 spin_unlock_irq(shost->host_lock); 4838 ndlp->nlp_prev_state = ndlp->nlp_state; 4839 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4840 lpfc_issue_els_adisc(vport, ndlp, 0); 4841 sentadisc++; 4842 vport->num_disc_nodes++; 4843 if (vport->num_disc_nodes >= 4844 vport->cfg_discovery_threads) { 4845 spin_lock_irq(shost->host_lock); 4846 vport->fc_flag |= FC_NLP_MORE; 4847 spin_unlock_irq(shost->host_lock); 4848 break; 4849 } 4850 } 4851 } 4852 if (sentadisc == 0) { 4853 spin_lock_irq(shost->host_lock); 4854 vport->fc_flag &= ~FC_NLP_MORE; 4855 spin_unlock_irq(shost->host_lock); 4856 } 4857 return sentadisc; 4858 } 4859 4860 /** 4861 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 4862 * @vport: pointer to a host virtual N_Port data structure. 4863 * 4864 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 4865 * which are in node port recovery state, with a @vport. Each time an ELS 4866 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 4867 * the per @vport number of discover count (num_disc_nodes) shall be 4868 * incremented. If the num_disc_nodes reaches a pre-configured threshold 4869 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 4870 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 4871 * later pick up. On the other hand, after walking through all the ndlps with 4872 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 4873 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 4874 * PLOGI need to be sent. 4875 * 4876 * Return code 4877 * The number of N_Ports with plogi issued. 4878 **/ 4879 int 4880 lpfc_els_disc_plogi(struct lpfc_vport *vport) 4881 { 4882 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4883 struct lpfc_nodelist *ndlp, *next_ndlp; 4884 int sentplogi = 0; 4885 4886 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 4887 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4888 if (!NLP_CHK_NODE_ACT(ndlp)) 4889 continue; 4890 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4891 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4892 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4893 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4894 ndlp->nlp_prev_state = ndlp->nlp_state; 4895 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4896 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4897 sentplogi++; 4898 vport->num_disc_nodes++; 4899 if (vport->num_disc_nodes >= 4900 vport->cfg_discovery_threads) { 4901 spin_lock_irq(shost->host_lock); 4902 vport->fc_flag |= FC_NLP_MORE; 4903 spin_unlock_irq(shost->host_lock); 4904 break; 4905 } 4906 } 4907 } 4908 if (sentplogi) { 4909 lpfc_set_disctmo(vport); 4910 } 4911 else { 4912 spin_lock_irq(shost->host_lock); 4913 vport->fc_flag &= ~FC_NLP_MORE; 4914 spin_unlock_irq(shost->host_lock); 4915 } 4916 return sentplogi; 4917 } 4918 4919 static uint32_t 4920 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 4921 uint32_t word0) 4922 { 4923 4924 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 4925 desc->payload.els_req = word0; 4926 desc->length = cpu_to_be32(sizeof(desc->payload)); 4927 4928 return sizeof(struct fc_rdp_link_service_desc); 4929 } 4930 4931 static uint32_t 4932 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 4933 uint8_t *page_a0, uint8_t *page_a2) 4934 { 4935 uint16_t wavelength; 4936 uint16_t temperature; 4937 uint16_t rx_power; 4938 uint16_t tx_bias; 4939 uint16_t tx_power; 4940 uint16_t vcc; 4941 uint16_t flag = 0; 4942 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 4943 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 4944 4945 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 4946 4947 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 4948 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 4949 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 4950 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 4951 4952 if ((trasn_code_byte4->fc_sw_laser) || 4953 (trasn_code_byte5->fc_sw_laser_sl) || 4954 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 4955 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 4956 } else if (trasn_code_byte4->fc_lw_laser) { 4957 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 4958 page_a0[SSF_WAVELENGTH_B0]; 4959 if (wavelength == SFP_WAVELENGTH_LC1310) 4960 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 4961 if (wavelength == SFP_WAVELENGTH_LL1550) 4962 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 4963 } 4964 /* check if its SFP+ */ 4965 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 4966 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 4967 << SFP_FLAG_CT_SHIFT; 4968 4969 /* check if its OPTICAL */ 4970 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 4971 SFP_FLAG_IS_OPTICAL_PORT : 0) 4972 << SFP_FLAG_IS_OPTICAL_SHIFT; 4973 4974 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 4975 page_a2[SFF_TEMPERATURE_B0]); 4976 vcc = (page_a2[SFF_VCC_B1] << 8 | 4977 page_a2[SFF_VCC_B0]); 4978 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 4979 page_a2[SFF_TXPOWER_B0]); 4980 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 4981 page_a2[SFF_TX_BIAS_CURRENT_B0]); 4982 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 4983 page_a2[SFF_RXPOWER_B0]); 4984 desc->sfp_info.temperature = cpu_to_be16(temperature); 4985 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 4986 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 4987 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 4988 desc->sfp_info.vcc = cpu_to_be16(vcc); 4989 4990 desc->sfp_info.flags = cpu_to_be16(flag); 4991 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 4992 4993 return sizeof(struct fc_rdp_sfp_desc); 4994 } 4995 4996 static uint32_t 4997 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 4998 READ_LNK_VAR *stat) 4999 { 5000 uint32_t type; 5001 5002 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5003 5004 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5005 5006 desc->info.port_type = cpu_to_be32(type); 5007 5008 desc->info.link_status.link_failure_cnt = 5009 cpu_to_be32(stat->linkFailureCnt); 5010 desc->info.link_status.loss_of_synch_cnt = 5011 cpu_to_be32(stat->lossSyncCnt); 5012 desc->info.link_status.loss_of_signal_cnt = 5013 cpu_to_be32(stat->lossSignalCnt); 5014 desc->info.link_status.primitive_seq_proto_err = 5015 cpu_to_be32(stat->primSeqErrCnt); 5016 desc->info.link_status.invalid_trans_word = 5017 cpu_to_be32(stat->invalidXmitWord); 5018 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5019 5020 desc->length = cpu_to_be32(sizeof(desc->info)); 5021 5022 return sizeof(struct fc_rdp_link_error_status_desc); 5023 } 5024 5025 static uint32_t 5026 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5027 struct lpfc_vport *vport) 5028 { 5029 uint32_t bbCredit; 5030 5031 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5032 5033 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5034 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5035 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5036 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5037 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5038 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5039 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5040 } else { 5041 desc->bbc_info.attached_port_bbc = 0; 5042 } 5043 5044 desc->bbc_info.rtt = 0; 5045 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5046 5047 return sizeof(struct fc_rdp_bbc_desc); 5048 } 5049 5050 static uint32_t 5051 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5052 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5053 { 5054 uint32_t flags = 0; 5055 5056 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5057 5058 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5059 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5060 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5061 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5062 5063 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5064 flags |= RDP_OET_HIGH_ALARM; 5065 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5066 flags |= RDP_OET_LOW_ALARM; 5067 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5068 flags |= RDP_OET_HIGH_WARNING; 5069 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5070 flags |= RDP_OET_LOW_WARNING; 5071 5072 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5073 desc->oed_info.function_flags = cpu_to_be32(flags); 5074 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5075 return sizeof(struct fc_rdp_oed_sfp_desc); 5076 } 5077 5078 static uint32_t 5079 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5080 struct fc_rdp_oed_sfp_desc *desc, 5081 uint8_t *page_a2) 5082 { 5083 uint32_t flags = 0; 5084 5085 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5086 5087 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5088 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5089 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5090 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5091 5092 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5093 flags |= RDP_OET_HIGH_ALARM; 5094 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5095 flags |= RDP_OET_LOW_ALARM; 5096 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5097 flags |= RDP_OET_HIGH_WARNING; 5098 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5099 flags |= RDP_OET_LOW_WARNING; 5100 5101 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5102 desc->oed_info.function_flags = cpu_to_be32(flags); 5103 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5104 return sizeof(struct fc_rdp_oed_sfp_desc); 5105 } 5106 5107 static uint32_t 5108 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5109 struct fc_rdp_oed_sfp_desc *desc, 5110 uint8_t *page_a2) 5111 { 5112 uint32_t flags = 0; 5113 5114 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5115 5116 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5117 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5118 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5119 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5120 5121 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5122 flags |= RDP_OET_HIGH_ALARM; 5123 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5124 flags |= RDP_OET_LOW_ALARM; 5125 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5126 flags |= RDP_OET_HIGH_WARNING; 5127 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5128 flags |= RDP_OET_LOW_WARNING; 5129 5130 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5131 desc->oed_info.function_flags = cpu_to_be32(flags); 5132 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5133 return sizeof(struct fc_rdp_oed_sfp_desc); 5134 } 5135 5136 static uint32_t 5137 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5138 struct fc_rdp_oed_sfp_desc *desc, 5139 uint8_t *page_a2) 5140 { 5141 uint32_t flags = 0; 5142 5143 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5144 5145 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5146 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5147 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5148 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5149 5150 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5151 flags |= RDP_OET_HIGH_ALARM; 5152 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5153 flags |= RDP_OET_LOW_ALARM; 5154 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5155 flags |= RDP_OET_HIGH_WARNING; 5156 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5157 flags |= RDP_OET_LOW_WARNING; 5158 5159 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5160 desc->oed_info.function_flags = cpu_to_be32(flags); 5161 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5162 return sizeof(struct fc_rdp_oed_sfp_desc); 5163 } 5164 5165 5166 static uint32_t 5167 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5168 struct fc_rdp_oed_sfp_desc *desc, 5169 uint8_t *page_a2) 5170 { 5171 uint32_t flags = 0; 5172 5173 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5174 5175 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5176 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5177 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5178 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5179 5180 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5181 flags |= RDP_OET_HIGH_ALARM; 5182 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5183 flags |= RDP_OET_LOW_ALARM; 5184 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5185 flags |= RDP_OET_HIGH_WARNING; 5186 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5187 flags |= RDP_OET_LOW_WARNING; 5188 5189 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5190 desc->oed_info.function_flags = cpu_to_be32(flags); 5191 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5192 return sizeof(struct fc_rdp_oed_sfp_desc); 5193 } 5194 5195 static uint32_t 5196 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5197 uint8_t *page_a0, struct lpfc_vport *vport) 5198 { 5199 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5200 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5201 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5202 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5203 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5204 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5205 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5206 return sizeof(struct fc_rdp_opd_sfp_desc); 5207 } 5208 5209 static uint32_t 5210 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5211 { 5212 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5213 return 0; 5214 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5215 5216 desc->info.CorrectedBlocks = 5217 cpu_to_be32(stat->fecCorrBlkCount); 5218 desc->info.UncorrectableBlocks = 5219 cpu_to_be32(stat->fecUncorrBlkCount); 5220 5221 desc->length = cpu_to_be32(sizeof(desc->info)); 5222 5223 return sizeof(struct fc_fec_rdp_desc); 5224 } 5225 5226 static uint32_t 5227 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 5228 { 5229 uint16_t rdp_cap = 0; 5230 uint16_t rdp_speed; 5231 5232 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 5233 5234 switch (phba->fc_linkspeed) { 5235 case LPFC_LINK_SPEED_1GHZ: 5236 rdp_speed = RDP_PS_1GB; 5237 break; 5238 case LPFC_LINK_SPEED_2GHZ: 5239 rdp_speed = RDP_PS_2GB; 5240 break; 5241 case LPFC_LINK_SPEED_4GHZ: 5242 rdp_speed = RDP_PS_4GB; 5243 break; 5244 case LPFC_LINK_SPEED_8GHZ: 5245 rdp_speed = RDP_PS_8GB; 5246 break; 5247 case LPFC_LINK_SPEED_10GHZ: 5248 rdp_speed = RDP_PS_10GB; 5249 break; 5250 case LPFC_LINK_SPEED_16GHZ: 5251 rdp_speed = RDP_PS_16GB; 5252 break; 5253 case LPFC_LINK_SPEED_32GHZ: 5254 rdp_speed = RDP_PS_32GB; 5255 break; 5256 default: 5257 rdp_speed = RDP_PS_UNKNOWN; 5258 break; 5259 } 5260 5261 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5262 5263 if (phba->lmt & LMT_32Gb) 5264 rdp_cap |= RDP_PS_32GB; 5265 if (phba->lmt & LMT_16Gb) 5266 rdp_cap |= RDP_PS_16GB; 5267 if (phba->lmt & LMT_10Gb) 5268 rdp_cap |= RDP_PS_10GB; 5269 if (phba->lmt & LMT_8Gb) 5270 rdp_cap |= RDP_PS_8GB; 5271 if (phba->lmt & LMT_4Gb) 5272 rdp_cap |= RDP_PS_4GB; 5273 if (phba->lmt & LMT_2Gb) 5274 rdp_cap |= RDP_PS_2GB; 5275 if (phba->lmt & LMT_1Gb) 5276 rdp_cap |= RDP_PS_1GB; 5277 5278 if (rdp_cap == 0) 5279 rdp_cap = RDP_CAP_UNKNOWN; 5280 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 5281 rdp_cap |= RDP_CAP_USER_CONFIGURED; 5282 5283 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 5284 desc->length = cpu_to_be32(sizeof(desc->info)); 5285 return sizeof(struct fc_rdp_port_speed_desc); 5286 } 5287 5288 static uint32_t 5289 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5290 struct lpfc_vport *vport) 5291 { 5292 5293 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5294 5295 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 5296 sizeof(desc->port_names.wwnn)); 5297 5298 memcpy(desc->port_names.wwpn, &vport->fc_portname, 5299 sizeof(desc->port_names.wwpn)); 5300 5301 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5302 return sizeof(struct fc_rdp_port_name_desc); 5303 } 5304 5305 static uint32_t 5306 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5307 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5308 { 5309 5310 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5311 if (vport->fc_flag & FC_FABRIC) { 5312 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5313 sizeof(desc->port_names.wwnn)); 5314 5315 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5316 sizeof(desc->port_names.wwpn)); 5317 } else { /* Point to Point */ 5318 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5319 sizeof(desc->port_names.wwnn)); 5320 5321 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname, 5322 sizeof(desc->port_names.wwpn)); 5323 } 5324 5325 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5326 return sizeof(struct fc_rdp_port_name_desc); 5327 } 5328 5329 static void 5330 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5331 int status) 5332 { 5333 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 5334 struct lpfc_vport *vport = ndlp->vport; 5335 struct lpfc_iocbq *elsiocb; 5336 struct ulp_bde64 *bpl; 5337 IOCB_t *icmd; 5338 uint8_t *pcmd; 5339 struct ls_rjt *stat; 5340 struct fc_rdp_res_frame *rdp_res; 5341 uint32_t cmdsize, len; 5342 uint16_t *flag_ptr; 5343 int rc; 5344 5345 if (status != SUCCESS) 5346 goto error; 5347 5348 /* This will change once we know the true size of the RDP payload */ 5349 cmdsize = sizeof(struct fc_rdp_res_frame); 5350 5351 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 5352 lpfc_max_els_tries, rdp_context->ndlp, 5353 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 5354 lpfc_nlp_put(ndlp); 5355 if (!elsiocb) 5356 goto free_rdp_context; 5357 5358 icmd = &elsiocb->iocb; 5359 icmd->ulpContext = rdp_context->rx_id; 5360 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5361 5362 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5363 "2171 Xmit RDP response tag x%x xri x%x, " 5364 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 5365 elsiocb->iotag, elsiocb->iocb.ulpContext, 5366 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5367 ndlp->nlp_rpi); 5368 rdp_res = (struct fc_rdp_res_frame *) 5369 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5370 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5371 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 5372 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5373 5374 /* Update Alarm and Warning */ 5375 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 5376 phba->sfp_alarm |= *flag_ptr; 5377 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 5378 phba->sfp_warning |= *flag_ptr; 5379 5380 /* For RDP payload */ 5381 len = 8; 5382 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 5383 (len + pcmd), ELS_CMD_RDP); 5384 5385 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 5386 rdp_context->page_a0, rdp_context->page_a2); 5387 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 5388 phba); 5389 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 5390 (len + pcmd), &rdp_context->link_stat); 5391 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 5392 (len + pcmd), vport); 5393 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 5394 (len + pcmd), vport, ndlp); 5395 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5396 &rdp_context->link_stat); 5397 /* Check if nport is logged, BZ190632 */ 5398 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) 5399 goto lpfc_skip_descriptor; 5400 5401 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 5402 &rdp_context->link_stat, vport); 5403 len += lpfc_rdp_res_oed_temp_desc(phba, 5404 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5405 rdp_context->page_a2); 5406 len += lpfc_rdp_res_oed_voltage_desc(phba, 5407 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5408 rdp_context->page_a2); 5409 len += lpfc_rdp_res_oed_txbias_desc(phba, 5410 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5411 rdp_context->page_a2); 5412 len += lpfc_rdp_res_oed_txpower_desc(phba, 5413 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5414 rdp_context->page_a2); 5415 len += lpfc_rdp_res_oed_rxpower_desc(phba, 5416 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5417 rdp_context->page_a2); 5418 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 5419 rdp_context->page_a0, vport); 5420 5421 lpfc_skip_descriptor: 5422 rdp_res->length = cpu_to_be32(len - 8); 5423 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5424 5425 /* Now that we know the true size of the payload, update the BPL */ 5426 bpl = (struct ulp_bde64 *) 5427 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 5428 bpl->tus.f.bdeSize = len; 5429 bpl->tus.f.bdeFlags = 0; 5430 bpl->tus.w = le32_to_cpu(bpl->tus.w); 5431 5432 phba->fc_stat.elsXmitACC++; 5433 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5434 if (rc == IOCB_ERROR) 5435 lpfc_els_free_iocb(phba, elsiocb); 5436 5437 kfree(rdp_context); 5438 5439 return; 5440 error: 5441 cmdsize = 2 * sizeof(uint32_t); 5442 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 5443 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 5444 lpfc_nlp_put(ndlp); 5445 if (!elsiocb) 5446 goto free_rdp_context; 5447 5448 icmd = &elsiocb->iocb; 5449 icmd->ulpContext = rdp_context->rx_id; 5450 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5451 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5452 5453 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5454 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 5455 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5456 5457 phba->fc_stat.elsXmitLSRJT++; 5458 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5459 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5460 5461 if (rc == IOCB_ERROR) 5462 lpfc_els_free_iocb(phba, elsiocb); 5463 free_rdp_context: 5464 kfree(rdp_context); 5465 } 5466 5467 static int 5468 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 5469 { 5470 LPFC_MBOXQ_t *mbox = NULL; 5471 int rc; 5472 5473 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5474 if (!mbox) { 5475 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 5476 "7105 failed to allocate mailbox memory"); 5477 return 1; 5478 } 5479 5480 if (lpfc_sli4_dump_page_a0(phba, mbox)) 5481 goto prep_mbox_fail; 5482 mbox->vport = rdp_context->ndlp->vport; 5483 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 5484 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 5485 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5486 if (rc == MBX_NOT_FINISHED) 5487 goto issue_mbox_fail; 5488 5489 return 0; 5490 5491 prep_mbox_fail: 5492 issue_mbox_fail: 5493 mempool_free(mbox, phba->mbox_mem_pool); 5494 return 1; 5495 } 5496 5497 /* 5498 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 5499 * @vport: pointer to a host virtual N_Port data structure. 5500 * @cmdiocb: pointer to lpfc command iocb data structure. 5501 * @ndlp: pointer to a node-list data structure. 5502 * 5503 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 5504 * IOCB. First, the payload of the unsolicited RDP is checked. 5505 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 5506 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 5507 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 5508 * gather all data and send RDP response. 5509 * 5510 * Return code 5511 * 0 - Sent the acc response 5512 * 1 - Sent the reject response. 5513 */ 5514 static int 5515 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5516 struct lpfc_nodelist *ndlp) 5517 { 5518 struct lpfc_hba *phba = vport->phba; 5519 struct lpfc_dmabuf *pcmd; 5520 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 5521 struct fc_rdp_req_frame *rdp_req; 5522 struct lpfc_rdp_context *rdp_context; 5523 IOCB_t *cmd = NULL; 5524 struct ls_rjt stat; 5525 5526 if (phba->sli_rev < LPFC_SLI_REV4 || 5527 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 5528 LPFC_SLI_INTF_IF_TYPE_2) { 5529 rjt_err = LSRJT_UNABLE_TPC; 5530 rjt_expl = LSEXP_REQ_UNSUPPORTED; 5531 goto error; 5532 } 5533 5534 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 5535 rjt_err = LSRJT_UNABLE_TPC; 5536 rjt_expl = LSEXP_REQ_UNSUPPORTED; 5537 goto error; 5538 } 5539 5540 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5541 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 5542 5543 5544 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5545 "2422 ELS RDP Request " 5546 "dec len %d tag x%x port_id %d len %d\n", 5547 be32_to_cpu(rdp_req->rdp_des_length), 5548 be32_to_cpu(rdp_req->nport_id_desc.tag), 5549 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 5550 be32_to_cpu(rdp_req->nport_id_desc.length)); 5551 5552 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) && 5553 !phba->cfg_enable_SmartSAN) { 5554 rjt_err = LSRJT_UNABLE_TPC; 5555 rjt_expl = LSEXP_PORT_LOGIN_REQ; 5556 goto error; 5557 } 5558 if (sizeof(struct fc_rdp_nport_desc) != 5559 be32_to_cpu(rdp_req->rdp_des_length)) 5560 goto rjt_logerr; 5561 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 5562 goto rjt_logerr; 5563 if (RDP_NPORT_ID_SIZE != 5564 be32_to_cpu(rdp_req->nport_id_desc.length)) 5565 goto rjt_logerr; 5566 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 5567 if (!rdp_context) { 5568 rjt_err = LSRJT_UNABLE_TPC; 5569 goto error; 5570 } 5571 5572 cmd = &cmdiocb->iocb; 5573 rdp_context->ndlp = lpfc_nlp_get(ndlp); 5574 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 5575 rdp_context->rx_id = cmd->ulpContext; 5576 rdp_context->cmpl = lpfc_els_rdp_cmpl; 5577 if (lpfc_get_rdp_info(phba, rdp_context)) { 5578 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 5579 "2423 Unable to send mailbox"); 5580 kfree(rdp_context); 5581 rjt_err = LSRJT_UNABLE_TPC; 5582 lpfc_nlp_put(ndlp); 5583 goto error; 5584 } 5585 5586 return 0; 5587 5588 rjt_logerr: 5589 rjt_err = LSRJT_LOGICAL_ERR; 5590 5591 error: 5592 memset(&stat, 0, sizeof(stat)); 5593 stat.un.b.lsRjtRsnCode = rjt_err; 5594 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 5595 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5596 return 1; 5597 } 5598 5599 5600 static void 5601 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5602 { 5603 MAILBOX_t *mb; 5604 IOCB_t *icmd; 5605 uint8_t *pcmd; 5606 struct lpfc_iocbq *elsiocb; 5607 struct lpfc_nodelist *ndlp; 5608 struct ls_rjt *stat; 5609 union lpfc_sli4_cfg_shdr *shdr; 5610 struct lpfc_lcb_context *lcb_context; 5611 struct fc_lcb_res_frame *lcb_res; 5612 uint32_t cmdsize, shdr_status, shdr_add_status; 5613 int rc; 5614 5615 mb = &pmb->u.mb; 5616 lcb_context = (struct lpfc_lcb_context *)pmb->context1; 5617 ndlp = lcb_context->ndlp; 5618 pmb->context1 = NULL; 5619 pmb->context2 = NULL; 5620 5621 shdr = (union lpfc_sli4_cfg_shdr *) 5622 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 5623 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5624 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5625 5626 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 5627 "0194 SET_BEACON_CONFIG mailbox " 5628 "completed with status x%x add_status x%x," 5629 " mbx status x%x\n", 5630 shdr_status, shdr_add_status, mb->mbxStatus); 5631 5632 if (mb->mbxStatus && !(shdr_status && 5633 shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) { 5634 mempool_free(pmb, phba->mbox_mem_pool); 5635 goto error; 5636 } 5637 5638 mempool_free(pmb, phba->mbox_mem_pool); 5639 cmdsize = sizeof(struct fc_lcb_res_frame); 5640 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5641 lpfc_max_els_tries, ndlp, 5642 ndlp->nlp_DID, ELS_CMD_ACC); 5643 5644 /* Decrement the ndlp reference count from previous mbox command */ 5645 lpfc_nlp_put(ndlp); 5646 5647 if (!elsiocb) 5648 goto free_lcb_context; 5649 5650 lcb_res = (struct fc_lcb_res_frame *) 5651 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5652 5653 icmd = &elsiocb->iocb; 5654 icmd->ulpContext = lcb_context->rx_id; 5655 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 5656 5657 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5658 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 5659 lcb_res->lcb_sub_command = lcb_context->sub_command; 5660 lcb_res->lcb_type = lcb_context->type; 5661 lcb_res->lcb_frequency = lcb_context->frequency; 5662 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5663 phba->fc_stat.elsXmitACC++; 5664 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5665 if (rc == IOCB_ERROR) 5666 lpfc_els_free_iocb(phba, elsiocb); 5667 5668 kfree(lcb_context); 5669 return; 5670 5671 error: 5672 cmdsize = sizeof(struct fc_lcb_res_frame); 5673 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5674 lpfc_max_els_tries, ndlp, 5675 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5676 lpfc_nlp_put(ndlp); 5677 if (!elsiocb) 5678 goto free_lcb_context; 5679 5680 icmd = &elsiocb->iocb; 5681 icmd->ulpContext = lcb_context->rx_id; 5682 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 5683 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5684 5685 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 5686 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 5687 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5688 5689 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5690 phba->fc_stat.elsXmitLSRJT++; 5691 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5692 if (rc == IOCB_ERROR) 5693 lpfc_els_free_iocb(phba, elsiocb); 5694 free_lcb_context: 5695 kfree(lcb_context); 5696 } 5697 5698 static int 5699 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 5700 struct lpfc_lcb_context *lcb_context, 5701 uint32_t beacon_state) 5702 { 5703 struct lpfc_hba *phba = vport->phba; 5704 LPFC_MBOXQ_t *mbox = NULL; 5705 uint32_t len; 5706 int rc; 5707 5708 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5709 if (!mbox) 5710 return 1; 5711 5712 len = sizeof(struct lpfc_mbx_set_beacon_config) - 5713 sizeof(struct lpfc_sli4_cfg_mhdr); 5714 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5715 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 5716 LPFC_SLI4_MBX_EMBED); 5717 mbox->context1 = (void *)lcb_context; 5718 mbox->vport = phba->pport; 5719 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 5720 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 5721 phba->sli4_hba.physical_port); 5722 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 5723 beacon_state); 5724 bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1); 5725 bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0); 5726 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5727 if (rc == MBX_NOT_FINISHED) { 5728 mempool_free(mbox, phba->mbox_mem_pool); 5729 return 1; 5730 } 5731 5732 return 0; 5733 } 5734 5735 5736 /** 5737 * lpfc_els_rcv_lcb - Process an unsolicited LCB 5738 * @vport: pointer to a host virtual N_Port data structure. 5739 * @cmdiocb: pointer to lpfc command iocb data structure. 5740 * @ndlp: pointer to a node-list data structure. 5741 * 5742 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 5743 * First, the payload of the unsolicited LCB is checked. 5744 * Then based on Subcommand beacon will either turn on or off. 5745 * 5746 * Return code 5747 * 0 - Sent the acc response 5748 * 1 - Sent the reject response. 5749 **/ 5750 static int 5751 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5752 struct lpfc_nodelist *ndlp) 5753 { 5754 struct lpfc_hba *phba = vport->phba; 5755 struct lpfc_dmabuf *pcmd; 5756 uint8_t *lp; 5757 struct fc_lcb_request_frame *beacon; 5758 struct lpfc_lcb_context *lcb_context; 5759 uint8_t state, rjt_err; 5760 struct ls_rjt stat; 5761 5762 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 5763 lp = (uint8_t *)pcmd->virt; 5764 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 5765 5766 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5767 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 5768 "type x%x frequency %x duration x%x\n", 5769 lp[0], lp[1], lp[2], 5770 beacon->lcb_command, 5771 beacon->lcb_sub_command, 5772 beacon->lcb_type, 5773 beacon->lcb_frequency, 5774 be16_to_cpu(beacon->lcb_duration)); 5775 5776 if (phba->sli_rev < LPFC_SLI_REV4 || 5777 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 5778 LPFC_SLI_INTF_IF_TYPE_2)) { 5779 rjt_err = LSRJT_CMD_UNSUPPORTED; 5780 goto rjt; 5781 } 5782 5783 if (phba->hba_flag & HBA_FCOE_MODE) { 5784 rjt_err = LSRJT_CMD_UNSUPPORTED; 5785 goto rjt; 5786 } 5787 if (beacon->lcb_sub_command != LPFC_LCB_ON && 5788 beacon->lcb_sub_command != LPFC_LCB_OFF) { 5789 rjt_err = LSRJT_CMD_UNSUPPORTED; 5790 goto rjt; 5791 } 5792 if (beacon->lcb_sub_command == LPFC_LCB_ON && 5793 be16_to_cpu(beacon->lcb_duration) != 0) { 5794 rjt_err = LSRJT_CMD_UNSUPPORTED; 5795 goto rjt; 5796 } 5797 5798 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 5799 if (!lcb_context) { 5800 rjt_err = LSRJT_UNABLE_TPC; 5801 goto rjt; 5802 } 5803 5804 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 5805 lcb_context->sub_command = beacon->lcb_sub_command; 5806 lcb_context->type = beacon->lcb_type; 5807 lcb_context->frequency = beacon->lcb_frequency; 5808 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 5809 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 5810 lcb_context->ndlp = lpfc_nlp_get(ndlp); 5811 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 5812 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 5813 LOG_ELS, "0193 failed to send mail box"); 5814 kfree(lcb_context); 5815 lpfc_nlp_put(ndlp); 5816 rjt_err = LSRJT_UNABLE_TPC; 5817 goto rjt; 5818 } 5819 return 0; 5820 rjt: 5821 memset(&stat, 0, sizeof(stat)); 5822 stat.un.b.lsRjtRsnCode = rjt_err; 5823 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5824 return 1; 5825 } 5826 5827 5828 /** 5829 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 5830 * @vport: pointer to a host virtual N_Port data structure. 5831 * 5832 * This routine cleans up any Registration State Change Notification 5833 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 5834 * @vport together with the host_lock is used to prevent multiple thread 5835 * trying to access the RSCN array on a same @vport at the same time. 5836 **/ 5837 void 5838 lpfc_els_flush_rscn(struct lpfc_vport *vport) 5839 { 5840 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5841 struct lpfc_hba *phba = vport->phba; 5842 int i; 5843 5844 spin_lock_irq(shost->host_lock); 5845 if (vport->fc_rscn_flush) { 5846 /* Another thread is walking fc_rscn_id_list on this vport */ 5847 spin_unlock_irq(shost->host_lock); 5848 return; 5849 } 5850 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 5851 vport->fc_rscn_flush = 1; 5852 spin_unlock_irq(shost->host_lock); 5853 5854 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 5855 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 5856 vport->fc_rscn_id_list[i] = NULL; 5857 } 5858 spin_lock_irq(shost->host_lock); 5859 vport->fc_rscn_id_cnt = 0; 5860 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 5861 spin_unlock_irq(shost->host_lock); 5862 lpfc_can_disctmo(vport); 5863 /* Indicate we are done walking this fc_rscn_id_list */ 5864 vport->fc_rscn_flush = 0; 5865 } 5866 5867 /** 5868 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 5869 * @vport: pointer to a host virtual N_Port data structure. 5870 * @did: remote destination port identifier. 5871 * 5872 * This routine checks whether there is any pending Registration State 5873 * Configuration Notification (RSCN) to a @did on @vport. 5874 * 5875 * Return code 5876 * None zero - The @did matched with a pending rscn 5877 * 0 - not able to match @did with a pending rscn 5878 **/ 5879 int 5880 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 5881 { 5882 D_ID ns_did; 5883 D_ID rscn_did; 5884 uint32_t *lp; 5885 uint32_t payload_len, i; 5886 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5887 5888 ns_did.un.word = did; 5889 5890 /* Never match fabric nodes for RSCNs */ 5891 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 5892 return 0; 5893 5894 /* If we are doing a FULL RSCN rediscovery, match everything */ 5895 if (vport->fc_flag & FC_RSCN_DISCOVERY) 5896 return did; 5897 5898 spin_lock_irq(shost->host_lock); 5899 if (vport->fc_rscn_flush) { 5900 /* Another thread is walking fc_rscn_id_list on this vport */ 5901 spin_unlock_irq(shost->host_lock); 5902 return 0; 5903 } 5904 /* Indicate we are walking fc_rscn_id_list on this vport */ 5905 vport->fc_rscn_flush = 1; 5906 spin_unlock_irq(shost->host_lock); 5907 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 5908 lp = vport->fc_rscn_id_list[i]->virt; 5909 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 5910 payload_len -= sizeof(uint32_t); /* take off word 0 */ 5911 while (payload_len) { 5912 rscn_did.un.word = be32_to_cpu(*lp++); 5913 payload_len -= sizeof(uint32_t); 5914 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 5915 case RSCN_ADDRESS_FORMAT_PORT: 5916 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 5917 && (ns_did.un.b.area == rscn_did.un.b.area) 5918 && (ns_did.un.b.id == rscn_did.un.b.id)) 5919 goto return_did_out; 5920 break; 5921 case RSCN_ADDRESS_FORMAT_AREA: 5922 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 5923 && (ns_did.un.b.area == rscn_did.un.b.area)) 5924 goto return_did_out; 5925 break; 5926 case RSCN_ADDRESS_FORMAT_DOMAIN: 5927 if (ns_did.un.b.domain == rscn_did.un.b.domain) 5928 goto return_did_out; 5929 break; 5930 case RSCN_ADDRESS_FORMAT_FABRIC: 5931 goto return_did_out; 5932 } 5933 } 5934 } 5935 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 5936 vport->fc_rscn_flush = 0; 5937 return 0; 5938 return_did_out: 5939 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 5940 vport->fc_rscn_flush = 0; 5941 return did; 5942 } 5943 5944 /** 5945 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 5946 * @vport: pointer to a host virtual N_Port data structure. 5947 * 5948 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 5949 * state machine for a @vport's nodes that are with pending RSCN (Registration 5950 * State Change Notification). 5951 * 5952 * Return code 5953 * 0 - Successful (currently alway return 0) 5954 **/ 5955 static int 5956 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 5957 { 5958 struct lpfc_nodelist *ndlp = NULL; 5959 5960 /* Move all affected nodes by pending RSCNs to NPR state. */ 5961 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5962 if (!NLP_CHK_NODE_ACT(ndlp) || 5963 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 5964 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 5965 continue; 5966 5967 /* NVME Target mode does not do RSCN Recovery. */ 5968 if (vport->phba->nvmet_support) 5969 continue; 5970 5971 lpfc_disc_state_machine(vport, ndlp, NULL, 5972 NLP_EVT_DEVICE_RECOVERY); 5973 lpfc_cancel_retry_delay_tmo(vport, ndlp); 5974 } 5975 return 0; 5976 } 5977 5978 /** 5979 * lpfc_send_rscn_event - Send an RSCN event to management application 5980 * @vport: pointer to a host virtual N_Port data structure. 5981 * @cmdiocb: pointer to lpfc command iocb data structure. 5982 * 5983 * lpfc_send_rscn_event sends an RSCN netlink event to management 5984 * applications. 5985 */ 5986 static void 5987 lpfc_send_rscn_event(struct lpfc_vport *vport, 5988 struct lpfc_iocbq *cmdiocb) 5989 { 5990 struct lpfc_dmabuf *pcmd; 5991 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5992 uint32_t *payload_ptr; 5993 uint32_t payload_len; 5994 struct lpfc_rscn_event_header *rscn_event_data; 5995 5996 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5997 payload_ptr = (uint32_t *) pcmd->virt; 5998 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 5999 6000 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6001 payload_len, GFP_KERNEL); 6002 if (!rscn_event_data) { 6003 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6004 "0147 Failed to allocate memory for RSCN event\n"); 6005 return; 6006 } 6007 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6008 rscn_event_data->payload_length = payload_len; 6009 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6010 payload_len); 6011 6012 fc_host_post_vendor_event(shost, 6013 fc_get_event_number(), 6014 sizeof(struct lpfc_rscn_event_header) + payload_len, 6015 (char *)rscn_event_data, 6016 LPFC_NL_VENDOR_ID); 6017 6018 kfree(rscn_event_data); 6019 } 6020 6021 /** 6022 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6023 * @vport: pointer to a host virtual N_Port data structure. 6024 * @cmdiocb: pointer to lpfc command iocb data structure. 6025 * @ndlp: pointer to a node-list data structure. 6026 * 6027 * This routine processes an unsolicited RSCN (Registration State Change 6028 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6029 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6030 * discover state machine is about to begin discovery, it just accepts the 6031 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6032 * contains N_Port IDs for other vports on this HBA, it just accepts the 6033 * RSCN and ignore processing it. If the state machine is in the recovery 6034 * state, the fc_rscn_id_list of this @vport is walked and the 6035 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6036 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6037 * routine is invoked to handle the RSCN event. 6038 * 6039 * Return code 6040 * 0 - Just sent the acc response 6041 * 1 - Sent the acc response and waited for name server completion 6042 **/ 6043 static int 6044 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6045 struct lpfc_nodelist *ndlp) 6046 { 6047 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6048 struct lpfc_hba *phba = vport->phba; 6049 struct lpfc_dmabuf *pcmd; 6050 uint32_t *lp, *datap; 6051 uint32_t payload_len, length, nportid, *cmd; 6052 int rscn_cnt; 6053 int rscn_id = 0, hba_id = 0; 6054 int i; 6055 6056 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6057 lp = (uint32_t *) pcmd->virt; 6058 6059 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6060 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6061 /* RSCN received */ 6062 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6063 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6064 vport->fc_flag, payload_len, *lp, 6065 vport->fc_rscn_id_cnt); 6066 6067 /* Send an RSCN event to the management application */ 6068 lpfc_send_rscn_event(vport, cmdiocb); 6069 6070 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6071 fc_host_post_event(shost, fc_get_event_number(), 6072 FCH_EVT_RSCN, lp[i]); 6073 6074 /* If we are about to begin discovery, just ACC the RSCN. 6075 * Discovery processing will satisfy it. 6076 */ 6077 if (vport->port_state <= LPFC_NS_QRY) { 6078 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6079 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6080 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6081 6082 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6083 return 0; 6084 } 6085 6086 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6087 * just ACC and ignore it. 6088 */ 6089 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6090 !(vport->cfg_peer_port_login)) { 6091 i = payload_len; 6092 datap = lp; 6093 while (i > 0) { 6094 nportid = *datap++; 6095 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6096 i -= sizeof(uint32_t); 6097 rscn_id++; 6098 if (lpfc_find_vport_by_did(phba, nportid)) 6099 hba_id++; 6100 } 6101 if (rscn_id == hba_id) { 6102 /* ALL NPortIDs in RSCN are on HBA */ 6103 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6104 "0219 Ignore RSCN " 6105 "Data: x%x x%x x%x x%x\n", 6106 vport->fc_flag, payload_len, 6107 *lp, vport->fc_rscn_id_cnt); 6108 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6109 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 6110 ndlp->nlp_DID, vport->port_state, 6111 ndlp->nlp_flag); 6112 6113 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 6114 ndlp, NULL); 6115 return 0; 6116 } 6117 } 6118 6119 spin_lock_irq(shost->host_lock); 6120 if (vport->fc_rscn_flush) { 6121 /* Another thread is walking fc_rscn_id_list on this vport */ 6122 vport->fc_flag |= FC_RSCN_DISCOVERY; 6123 spin_unlock_irq(shost->host_lock); 6124 /* Send back ACC */ 6125 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6126 return 0; 6127 } 6128 /* Indicate we are walking fc_rscn_id_list on this vport */ 6129 vport->fc_rscn_flush = 1; 6130 spin_unlock_irq(shost->host_lock); 6131 /* Get the array count after successfully have the token */ 6132 rscn_cnt = vport->fc_rscn_id_cnt; 6133 /* If we are already processing an RSCN, save the received 6134 * RSCN payload buffer, cmdiocb->context2 to process later. 6135 */ 6136 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 6137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6138 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 6139 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6140 6141 spin_lock_irq(shost->host_lock); 6142 vport->fc_flag |= FC_RSCN_DEFERRED; 6143 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 6144 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 6145 vport->fc_flag |= FC_RSCN_MODE; 6146 spin_unlock_irq(shost->host_lock); 6147 if (rscn_cnt) { 6148 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 6149 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 6150 } 6151 if ((rscn_cnt) && 6152 (payload_len + length <= LPFC_BPL_SIZE)) { 6153 *cmd &= ELS_CMD_MASK; 6154 *cmd |= cpu_to_be32(payload_len + length); 6155 memcpy(((uint8_t *)cmd) + length, lp, 6156 payload_len); 6157 } else { 6158 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 6159 vport->fc_rscn_id_cnt++; 6160 /* If we zero, cmdiocb->context2, the calling 6161 * routine will not try to free it. 6162 */ 6163 cmdiocb->context2 = NULL; 6164 } 6165 /* Deferred RSCN */ 6166 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6167 "0235 Deferred RSCN " 6168 "Data: x%x x%x x%x\n", 6169 vport->fc_rscn_id_cnt, vport->fc_flag, 6170 vport->port_state); 6171 } else { 6172 vport->fc_flag |= FC_RSCN_DISCOVERY; 6173 spin_unlock_irq(shost->host_lock); 6174 /* ReDiscovery RSCN */ 6175 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6176 "0234 ReDiscovery RSCN " 6177 "Data: x%x x%x x%x\n", 6178 vport->fc_rscn_id_cnt, vport->fc_flag, 6179 vport->port_state); 6180 } 6181 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6182 vport->fc_rscn_flush = 0; 6183 /* Send back ACC */ 6184 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6185 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6186 lpfc_rscn_recovery_check(vport); 6187 spin_lock_irq(shost->host_lock); 6188 vport->fc_flag &= ~FC_RSCN_DEFERRED; 6189 spin_unlock_irq(shost->host_lock); 6190 return 0; 6191 } 6192 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6193 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 6194 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6195 6196 spin_lock_irq(shost->host_lock); 6197 vport->fc_flag |= FC_RSCN_MODE; 6198 spin_unlock_irq(shost->host_lock); 6199 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 6200 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6201 vport->fc_rscn_flush = 0; 6202 /* 6203 * If we zero, cmdiocb->context2, the calling routine will 6204 * not try to free it. 6205 */ 6206 cmdiocb->context2 = NULL; 6207 lpfc_set_disctmo(vport); 6208 /* Send back ACC */ 6209 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6210 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6211 lpfc_rscn_recovery_check(vport); 6212 return lpfc_els_handle_rscn(vport); 6213 } 6214 6215 /** 6216 * lpfc_els_handle_rscn - Handle rscn for a vport 6217 * @vport: pointer to a host virtual N_Port data structure. 6218 * 6219 * This routine handles the Registration State Configuration Notification 6220 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 6221 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 6222 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 6223 * NameServer shall be issued. If CT command to the NameServer fails to be 6224 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 6225 * RSCN activities with the @vport. 6226 * 6227 * Return code 6228 * 0 - Cleaned up rscn on the @vport 6229 * 1 - Wait for plogi to name server before proceed 6230 **/ 6231 int 6232 lpfc_els_handle_rscn(struct lpfc_vport *vport) 6233 { 6234 struct lpfc_nodelist *ndlp; 6235 6236 /* Ignore RSCN if the port is being torn down. */ 6237 if (vport->load_flag & FC_UNLOADING) { 6238 lpfc_els_flush_rscn(vport); 6239 return 0; 6240 } 6241 6242 /* Start timer for RSCN processing */ 6243 lpfc_set_disctmo(vport); 6244 6245 /* RSCN processed */ 6246 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6247 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 6248 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 6249 vport->port_state); 6250 6251 /* To process RSCN, first compare RSCN data with NameServer */ 6252 vport->fc_ns_retry = 0; 6253 vport->num_disc_nodes = 0; 6254 6255 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6256 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 6257 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 6258 /* Good ndlp, issue CT Request to NameServer. Need to 6259 * know how many gidfts were issued. If none, then just 6260 * flush the RSCN. Otherwise, the outstanding requests 6261 * need to complete. 6262 */ 6263 vport->gidft_inp = 0; 6264 if (lpfc_issue_gidft(vport) > 0) 6265 return 1; 6266 } else { 6267 /* Nameserver login in question. Revalidate. */ 6268 if (ndlp) { 6269 ndlp = lpfc_enable_node(vport, ndlp, 6270 NLP_STE_PLOGI_ISSUE); 6271 if (!ndlp) { 6272 lpfc_els_flush_rscn(vport); 6273 return 0; 6274 } 6275 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 6276 } else { 6277 ndlp = lpfc_nlp_init(vport, NameServer_DID); 6278 if (!ndlp) { 6279 lpfc_els_flush_rscn(vport); 6280 return 0; 6281 } 6282 ndlp->nlp_prev_state = ndlp->nlp_state; 6283 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6284 } 6285 ndlp->nlp_type |= NLP_FABRIC; 6286 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 6287 /* Wait for NameServer login cmpl before we can 6288 * continue 6289 */ 6290 return 1; 6291 } 6292 6293 lpfc_els_flush_rscn(vport); 6294 return 0; 6295 } 6296 6297 /** 6298 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 6299 * @vport: pointer to a host virtual N_Port data structure. 6300 * @cmdiocb: pointer to lpfc command iocb data structure. 6301 * @ndlp: pointer to a node-list data structure. 6302 * 6303 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 6304 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 6305 * point topology. As an unsolicited FLOGI should not be received in a loop 6306 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 6307 * lpfc_check_sparm() routine is invoked to check the parameters in the 6308 * unsolicited FLOGI. If parameters validation failed, the routine 6309 * lpfc_els_rsp_reject() shall be called with reject reason code set to 6310 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 6311 * FLOGI shall be compared with the Port WWN of the @vport to determine who 6312 * will initiate PLOGI. The higher lexicographical value party shall has 6313 * higher priority (as the winning port) and will initiate PLOGI and 6314 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 6315 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 6316 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 6317 * 6318 * Return code 6319 * 0 - Successfully processed the unsolicited flogi 6320 * 1 - Failed to process the unsolicited flogi 6321 **/ 6322 static int 6323 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6324 struct lpfc_nodelist *ndlp) 6325 { 6326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6327 struct lpfc_hba *phba = vport->phba; 6328 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6329 uint32_t *lp = (uint32_t *) pcmd->virt; 6330 IOCB_t *icmd = &cmdiocb->iocb; 6331 struct serv_parm *sp; 6332 LPFC_MBOXQ_t *mbox; 6333 uint32_t cmd, did; 6334 int rc; 6335 uint32_t fc_flag = 0; 6336 uint32_t port_state = 0; 6337 6338 cmd = *lp++; 6339 sp = (struct serv_parm *) lp; 6340 6341 /* FLOGI received */ 6342 6343 lpfc_set_disctmo(vport); 6344 6345 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6346 /* We should never receive a FLOGI in loop mode, ignore it */ 6347 did = icmd->un.elsreq64.remoteID; 6348 6349 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 6350 Loop Mode */ 6351 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6352 "0113 An FLOGI ELS command x%x was " 6353 "received from DID x%x in Loop Mode\n", 6354 cmd, did); 6355 return 1; 6356 } 6357 6358 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 6359 6360 /* 6361 * If our portname is greater than the remote portname, 6362 * then we initiate Nport login. 6363 */ 6364 6365 rc = memcmp(&vport->fc_portname, &sp->portName, 6366 sizeof(struct lpfc_name)); 6367 6368 if (!rc) { 6369 if (phba->sli_rev < LPFC_SLI_REV4) { 6370 mbox = mempool_alloc(phba->mbox_mem_pool, 6371 GFP_KERNEL); 6372 if (!mbox) 6373 return 1; 6374 lpfc_linkdown(phba); 6375 lpfc_init_link(phba, mbox, 6376 phba->cfg_topology, 6377 phba->cfg_link_speed); 6378 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 6379 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 6380 mbox->vport = vport; 6381 rc = lpfc_sli_issue_mbox(phba, mbox, 6382 MBX_NOWAIT); 6383 lpfc_set_loopback_flag(phba); 6384 if (rc == MBX_NOT_FINISHED) 6385 mempool_free(mbox, phba->mbox_mem_pool); 6386 return 1; 6387 } 6388 6389 /* abort the flogi coming back to ourselves 6390 * due to external loopback on the port. 6391 */ 6392 lpfc_els_abort_flogi(phba); 6393 return 0; 6394 6395 } else if (rc > 0) { /* greater than */ 6396 spin_lock_irq(shost->host_lock); 6397 vport->fc_flag |= FC_PT2PT_PLOGI; 6398 spin_unlock_irq(shost->host_lock); 6399 6400 /* If we have the high WWPN we can assign our own 6401 * myDID; otherwise, we have to WAIT for a PLOGI 6402 * from the remote NPort to find out what it 6403 * will be. 6404 */ 6405 vport->fc_myDID = PT2PT_LocalID; 6406 } else { 6407 vport->fc_myDID = PT2PT_RemoteID; 6408 } 6409 6410 /* 6411 * The vport state should go to LPFC_FLOGI only 6412 * AFTER we issue a FLOGI, not receive one. 6413 */ 6414 spin_lock_irq(shost->host_lock); 6415 fc_flag = vport->fc_flag; 6416 port_state = vport->port_state; 6417 vport->fc_flag |= FC_PT2PT; 6418 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 6419 spin_unlock_irq(shost->host_lock); 6420 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6421 "3311 Rcv Flogi PS x%x new PS x%x " 6422 "fc_flag x%x new fc_flag x%x\n", 6423 port_state, vport->port_state, 6424 fc_flag, vport->fc_flag); 6425 6426 /* 6427 * We temporarily set fc_myDID to make it look like we are 6428 * a Fabric. This is done just so we end up with the right 6429 * did / sid on the FLOGI ACC rsp. 6430 */ 6431 did = vport->fc_myDID; 6432 vport->fc_myDID = Fabric_DID; 6433 6434 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 6435 6436 /* Send back ACC */ 6437 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 6438 6439 /* Now lets put fc_myDID back to what its supposed to be */ 6440 vport->fc_myDID = did; 6441 6442 return 0; 6443 } 6444 6445 /** 6446 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 6447 * @vport: pointer to a host virtual N_Port data structure. 6448 * @cmdiocb: pointer to lpfc command iocb data structure. 6449 * @ndlp: pointer to a node-list data structure. 6450 * 6451 * This routine processes Request Node Identification Data (RNID) IOCB 6452 * received as an ELS unsolicited event. Only when the RNID specified format 6453 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 6454 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 6455 * Accept (ACC) the RNID ELS command. All the other RNID formats are 6456 * rejected by invoking the lpfc_els_rsp_reject() routine. 6457 * 6458 * Return code 6459 * 0 - Successfully processed rnid iocb (currently always return 0) 6460 **/ 6461 static int 6462 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6463 struct lpfc_nodelist *ndlp) 6464 { 6465 struct lpfc_dmabuf *pcmd; 6466 uint32_t *lp; 6467 RNID *rn; 6468 struct ls_rjt stat; 6469 uint32_t cmd; 6470 6471 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6472 lp = (uint32_t *) pcmd->virt; 6473 6474 cmd = *lp++; 6475 rn = (RNID *) lp; 6476 6477 /* RNID received */ 6478 6479 switch (rn->Format) { 6480 case 0: 6481 case RNID_TOPOLOGY_DISC: 6482 /* Send back ACC */ 6483 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 6484 break; 6485 default: 6486 /* Reject this request because format not supported */ 6487 stat.un.b.lsRjtRsvd0 = 0; 6488 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6489 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6490 stat.un.b.vendorUnique = 0; 6491 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 6492 NULL); 6493 } 6494 return 0; 6495 } 6496 6497 /** 6498 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 6499 * @vport: pointer to a host virtual N_Port data structure. 6500 * @cmdiocb: pointer to lpfc command iocb data structure. 6501 * @ndlp: pointer to a node-list data structure. 6502 * 6503 * Return code 6504 * 0 - Successfully processed echo iocb (currently always return 0) 6505 **/ 6506 static int 6507 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6508 struct lpfc_nodelist *ndlp) 6509 { 6510 uint8_t *pcmd; 6511 6512 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 6513 6514 /* skip over first word of echo command to find echo data */ 6515 pcmd += sizeof(uint32_t); 6516 6517 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 6518 return 0; 6519 } 6520 6521 /** 6522 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 6523 * @vport: pointer to a host virtual N_Port data structure. 6524 * @cmdiocb: pointer to lpfc command iocb data structure. 6525 * @ndlp: pointer to a node-list data structure. 6526 * 6527 * This routine processes a Link Incident Report Registration(LIRR) IOCB 6528 * received as an ELS unsolicited event. Currently, this function just invokes 6529 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 6530 * 6531 * Return code 6532 * 0 - Successfully processed lirr iocb (currently always return 0) 6533 **/ 6534 static int 6535 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6536 struct lpfc_nodelist *ndlp) 6537 { 6538 struct ls_rjt stat; 6539 6540 /* For now, unconditionally reject this command */ 6541 stat.un.b.lsRjtRsvd0 = 0; 6542 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6543 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6544 stat.un.b.vendorUnique = 0; 6545 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6546 return 0; 6547 } 6548 6549 /** 6550 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 6551 * @vport: pointer to a host virtual N_Port data structure. 6552 * @cmdiocb: pointer to lpfc command iocb data structure. 6553 * @ndlp: pointer to a node-list data structure. 6554 * 6555 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 6556 * received as an ELS unsolicited event. A request to RRQ shall only 6557 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 6558 * Nx_Port N_Port_ID of the target Exchange is the same as the 6559 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 6560 * not accepted, an LS_RJT with reason code "Unable to perform 6561 * command request" and reason code explanation "Invalid Originator 6562 * S_ID" shall be returned. For now, we just unconditionally accept 6563 * RRQ from the target. 6564 **/ 6565 static void 6566 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6567 struct lpfc_nodelist *ndlp) 6568 { 6569 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6570 if (vport->phba->sli_rev == LPFC_SLI_REV4) 6571 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 6572 } 6573 6574 /** 6575 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 6576 * @phba: pointer to lpfc hba data structure. 6577 * @pmb: pointer to the driver internal queue element for mailbox command. 6578 * 6579 * This routine is the completion callback function for the MBX_READ_LNK_STAT 6580 * mailbox command. This callback function is to actually send the Accept 6581 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 6582 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 6583 * mailbox command, constructs the RPS response with the link statistics 6584 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 6585 * response to the RPS. 6586 * 6587 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 6588 * will be incremented by 1 for holding the ndlp and the reference to ndlp 6589 * will be stored into the context1 field of the IOCB for the completion 6590 * callback function to the RPS Accept Response ELS IOCB command. 6591 * 6592 **/ 6593 static void 6594 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6595 { 6596 MAILBOX_t *mb; 6597 IOCB_t *icmd; 6598 struct RLS_RSP *rls_rsp; 6599 uint8_t *pcmd; 6600 struct lpfc_iocbq *elsiocb; 6601 struct lpfc_nodelist *ndlp; 6602 uint16_t oxid; 6603 uint16_t rxid; 6604 uint32_t cmdsize; 6605 6606 mb = &pmb->u.mb; 6607 6608 ndlp = (struct lpfc_nodelist *) pmb->context2; 6609 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 6610 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 6611 pmb->context1 = NULL; 6612 pmb->context2 = NULL; 6613 6614 if (mb->mbxStatus) { 6615 mempool_free(pmb, phba->mbox_mem_pool); 6616 return; 6617 } 6618 6619 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 6620 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6621 lpfc_max_els_tries, ndlp, 6622 ndlp->nlp_DID, ELS_CMD_ACC); 6623 6624 /* Decrement the ndlp reference count from previous mbox command */ 6625 lpfc_nlp_put(ndlp); 6626 6627 if (!elsiocb) { 6628 mempool_free(pmb, phba->mbox_mem_pool); 6629 return; 6630 } 6631 6632 icmd = &elsiocb->iocb; 6633 icmd->ulpContext = rxid; 6634 icmd->unsli3.rcvsli3.ox_id = oxid; 6635 6636 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6637 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6638 pcmd += sizeof(uint32_t); /* Skip past command */ 6639 rls_rsp = (struct RLS_RSP *)pcmd; 6640 6641 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 6642 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 6643 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 6644 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 6645 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 6646 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 6647 mempool_free(pmb, phba->mbox_mem_pool); 6648 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 6649 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 6650 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 6651 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6652 elsiocb->iotag, elsiocb->iocb.ulpContext, 6653 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6654 ndlp->nlp_rpi); 6655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6656 phba->fc_stat.elsXmitACC++; 6657 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 6658 lpfc_els_free_iocb(phba, elsiocb); 6659 } 6660 6661 /** 6662 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 6663 * @phba: pointer to lpfc hba data structure. 6664 * @pmb: pointer to the driver internal queue element for mailbox command. 6665 * 6666 * This routine is the completion callback function for the MBX_READ_LNK_STAT 6667 * mailbox command. This callback function is to actually send the Accept 6668 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 6669 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 6670 * mailbox command, constructs the RPS response with the link statistics 6671 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 6672 * response to the RPS. 6673 * 6674 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 6675 * will be incremented by 1 for holding the ndlp and the reference to ndlp 6676 * will be stored into the context1 field of the IOCB for the completion 6677 * callback function to the RPS Accept Response ELS IOCB command. 6678 * 6679 **/ 6680 static void 6681 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6682 { 6683 MAILBOX_t *mb; 6684 IOCB_t *icmd; 6685 RPS_RSP *rps_rsp; 6686 uint8_t *pcmd; 6687 struct lpfc_iocbq *elsiocb; 6688 struct lpfc_nodelist *ndlp; 6689 uint16_t status; 6690 uint16_t oxid; 6691 uint16_t rxid; 6692 uint32_t cmdsize; 6693 6694 mb = &pmb->u.mb; 6695 6696 ndlp = (struct lpfc_nodelist *) pmb->context2; 6697 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 6698 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 6699 pmb->context1 = NULL; 6700 pmb->context2 = NULL; 6701 6702 if (mb->mbxStatus) { 6703 mempool_free(pmb, phba->mbox_mem_pool); 6704 return; 6705 } 6706 6707 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 6708 mempool_free(pmb, phba->mbox_mem_pool); 6709 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6710 lpfc_max_els_tries, ndlp, 6711 ndlp->nlp_DID, ELS_CMD_ACC); 6712 6713 /* Decrement the ndlp reference count from previous mbox command */ 6714 lpfc_nlp_put(ndlp); 6715 6716 if (!elsiocb) 6717 return; 6718 6719 icmd = &elsiocb->iocb; 6720 icmd->ulpContext = rxid; 6721 icmd->unsli3.rcvsli3.ox_id = oxid; 6722 6723 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6724 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6725 pcmd += sizeof(uint32_t); /* Skip past command */ 6726 rps_rsp = (RPS_RSP *)pcmd; 6727 6728 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 6729 status = 0x10; 6730 else 6731 status = 0x8; 6732 if (phba->pport->fc_flag & FC_FABRIC) 6733 status |= 0x4; 6734 6735 rps_rsp->rsvd1 = 0; 6736 rps_rsp->portStatus = cpu_to_be16(status); 6737 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 6738 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 6739 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 6740 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 6741 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 6742 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 6743 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 6744 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 6745 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 6746 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6747 elsiocb->iotag, elsiocb->iocb.ulpContext, 6748 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6749 ndlp->nlp_rpi); 6750 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6751 phba->fc_stat.elsXmitACC++; 6752 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 6753 lpfc_els_free_iocb(phba, elsiocb); 6754 return; 6755 } 6756 6757 /** 6758 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 6759 * @vport: pointer to a host virtual N_Port data structure. 6760 * @cmdiocb: pointer to lpfc command iocb data structure. 6761 * @ndlp: pointer to a node-list data structure. 6762 * 6763 * This routine processes Read Port Status (RPL) IOCB received as an 6764 * ELS unsolicited event. It first checks the remote port state. If the 6765 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 6766 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 6767 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 6768 * for reading the HBA link statistics. It is for the callback function, 6769 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 6770 * to actually sending out RPL Accept (ACC) response. 6771 * 6772 * Return codes 6773 * 0 - Successfully processed rls iocb (currently always return 0) 6774 **/ 6775 static int 6776 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6777 struct lpfc_nodelist *ndlp) 6778 { 6779 struct lpfc_hba *phba = vport->phba; 6780 LPFC_MBOXQ_t *mbox; 6781 struct ls_rjt stat; 6782 6783 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 6784 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 6785 /* reject the unsolicited RPS request and done with it */ 6786 goto reject_out; 6787 6788 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 6789 if (mbox) { 6790 lpfc_read_lnk_stat(phba, mbox); 6791 mbox->context1 = (void *)((unsigned long) 6792 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 6793 cmdiocb->iocb.ulpContext)); /* rx_id */ 6794 mbox->context2 = lpfc_nlp_get(ndlp); 6795 mbox->vport = vport; 6796 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 6797 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 6798 != MBX_NOT_FINISHED) 6799 /* Mbox completion will send ELS Response */ 6800 return 0; 6801 /* Decrement reference count used for the failed mbox 6802 * command. 6803 */ 6804 lpfc_nlp_put(ndlp); 6805 mempool_free(mbox, phba->mbox_mem_pool); 6806 } 6807 reject_out: 6808 /* issue rejection response */ 6809 stat.un.b.lsRjtRsvd0 = 0; 6810 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6811 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6812 stat.un.b.vendorUnique = 0; 6813 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6814 return 0; 6815 } 6816 6817 /** 6818 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 6819 * @vport: pointer to a host virtual N_Port data structure. 6820 * @cmdiocb: pointer to lpfc command iocb data structure. 6821 * @ndlp: pointer to a node-list data structure. 6822 * 6823 * This routine processes Read Timout Value (RTV) IOCB received as an 6824 * ELS unsolicited event. It first checks the remote port state. If the 6825 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 6826 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 6827 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 6828 * Value (RTV) unsolicited IOCB event. 6829 * 6830 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 6831 * will be incremented by 1 for holding the ndlp and the reference to ndlp 6832 * will be stored into the context1 field of the IOCB for the completion 6833 * callback function to the RPS Accept Response ELS IOCB command. 6834 * 6835 * Return codes 6836 * 0 - Successfully processed rtv iocb (currently always return 0) 6837 **/ 6838 static int 6839 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6840 struct lpfc_nodelist *ndlp) 6841 { 6842 struct lpfc_hba *phba = vport->phba; 6843 struct ls_rjt stat; 6844 struct RTV_RSP *rtv_rsp; 6845 uint8_t *pcmd; 6846 struct lpfc_iocbq *elsiocb; 6847 uint32_t cmdsize; 6848 6849 6850 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 6851 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 6852 /* reject the unsolicited RPS request and done with it */ 6853 goto reject_out; 6854 6855 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 6856 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6857 lpfc_max_els_tries, ndlp, 6858 ndlp->nlp_DID, ELS_CMD_ACC); 6859 6860 if (!elsiocb) 6861 return 1; 6862 6863 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6864 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6865 pcmd += sizeof(uint32_t); /* Skip past command */ 6866 6867 /* use the command's xri in the response */ 6868 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 6869 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6870 6871 rtv_rsp = (struct RTV_RSP *)pcmd; 6872 6873 /* populate RTV payload */ 6874 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 6875 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 6876 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 6877 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 6878 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 6879 6880 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 6881 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 6882 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 6883 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 6884 "Data: x%x x%x x%x\n", 6885 elsiocb->iotag, elsiocb->iocb.ulpContext, 6886 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6887 ndlp->nlp_rpi, 6888 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 6889 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6890 phba->fc_stat.elsXmitACC++; 6891 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 6892 lpfc_els_free_iocb(phba, elsiocb); 6893 return 0; 6894 6895 reject_out: 6896 /* issue rejection response */ 6897 stat.un.b.lsRjtRsvd0 = 0; 6898 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6899 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6900 stat.un.b.vendorUnique = 0; 6901 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6902 return 0; 6903 } 6904 6905 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 6906 * @vport: pointer to a host virtual N_Port data structure. 6907 * @cmdiocb: pointer to lpfc command iocb data structure. 6908 * @ndlp: pointer to a node-list data structure. 6909 * 6910 * This routine processes Read Port Status (RPS) IOCB received as an 6911 * ELS unsolicited event. It first checks the remote port state. If the 6912 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 6913 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 6914 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 6915 * for reading the HBA link statistics. It is for the callback function, 6916 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 6917 * to actually sending out RPS Accept (ACC) response. 6918 * 6919 * Return codes 6920 * 0 - Successfully processed rps iocb (currently always return 0) 6921 **/ 6922 static int 6923 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6924 struct lpfc_nodelist *ndlp) 6925 { 6926 struct lpfc_hba *phba = vport->phba; 6927 uint32_t *lp; 6928 uint8_t flag; 6929 LPFC_MBOXQ_t *mbox; 6930 struct lpfc_dmabuf *pcmd; 6931 RPS *rps; 6932 struct ls_rjt stat; 6933 6934 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 6935 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 6936 /* reject the unsolicited RPS request and done with it */ 6937 goto reject_out; 6938 6939 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6940 lp = (uint32_t *) pcmd->virt; 6941 flag = (be32_to_cpu(*lp++) & 0xf); 6942 rps = (RPS *) lp; 6943 6944 if ((flag == 0) || 6945 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 6946 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 6947 sizeof(struct lpfc_name)) == 0))) { 6948 6949 printk("Fix me....\n"); 6950 dump_stack(); 6951 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 6952 if (mbox) { 6953 lpfc_read_lnk_stat(phba, mbox); 6954 mbox->context1 = (void *)((unsigned long) 6955 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 6956 cmdiocb->iocb.ulpContext)); /* rx_id */ 6957 mbox->context2 = lpfc_nlp_get(ndlp); 6958 mbox->vport = vport; 6959 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 6960 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 6961 != MBX_NOT_FINISHED) 6962 /* Mbox completion will send ELS Response */ 6963 return 0; 6964 /* Decrement reference count used for the failed mbox 6965 * command. 6966 */ 6967 lpfc_nlp_put(ndlp); 6968 mempool_free(mbox, phba->mbox_mem_pool); 6969 } 6970 } 6971 6972 reject_out: 6973 /* issue rejection response */ 6974 stat.un.b.lsRjtRsvd0 = 0; 6975 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6976 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6977 stat.un.b.vendorUnique = 0; 6978 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6979 return 0; 6980 } 6981 6982 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 6983 * @vport: pointer to a host virtual N_Port data structure. 6984 * @ndlp: pointer to a node-list data structure. 6985 * @did: DID of the target. 6986 * @rrq: Pointer to the rrq struct. 6987 * 6988 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 6989 * Successful the the completion handler will clear the RRQ. 6990 * 6991 * Return codes 6992 * 0 - Successfully sent rrq els iocb. 6993 * 1 - Failed to send rrq els iocb. 6994 **/ 6995 static int 6996 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 6997 uint32_t did, struct lpfc_node_rrq *rrq) 6998 { 6999 struct lpfc_hba *phba = vport->phba; 7000 struct RRQ *els_rrq; 7001 struct lpfc_iocbq *elsiocb; 7002 uint8_t *pcmd; 7003 uint16_t cmdsize; 7004 int ret; 7005 7006 7007 if (ndlp != rrq->ndlp) 7008 ndlp = rrq->ndlp; 7009 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 7010 return 1; 7011 7012 /* If ndlp is not NULL, we will bump the reference count on it */ 7013 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7014 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7015 ELS_CMD_RRQ); 7016 if (!elsiocb) 7017 return 1; 7018 7019 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7020 7021 /* For RRQ request, remainder of payload is Exchange IDs */ 7022 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7023 pcmd += sizeof(uint32_t); 7024 els_rrq = (struct RRQ *) pcmd; 7025 7026 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7027 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7028 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7029 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7030 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7031 7032 7033 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7034 "Issue RRQ: did:x%x", 7035 did, rrq->xritag, rrq->rxid); 7036 elsiocb->context_un.rrq = rrq; 7037 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7038 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7039 7040 if (ret == IOCB_ERROR) { 7041 lpfc_els_free_iocb(phba, elsiocb); 7042 return 1; 7043 } 7044 return 0; 7045 } 7046 7047 /** 7048 * lpfc_send_rrq - Sends ELS RRQ if needed. 7049 * @phba: pointer to lpfc hba data structure. 7050 * @rrq: pointer to the active rrq. 7051 * 7052 * This routine will call the lpfc_issue_els_rrq if the rrq is 7053 * still active for the xri. If this function returns a failure then 7054 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7055 * 7056 * Returns 0 Success. 7057 * 1 Failure. 7058 **/ 7059 int 7060 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7061 { 7062 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7063 rrq->nlp_DID); 7064 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7065 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7066 rrq->nlp_DID, rrq); 7067 else 7068 return 1; 7069 } 7070 7071 /** 7072 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7073 * @vport: pointer to a host virtual N_Port data structure. 7074 * @cmdsize: size of the ELS command. 7075 * @oldiocb: pointer to the original lpfc command iocb data structure. 7076 * @ndlp: pointer to a node-list data structure. 7077 * 7078 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7079 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7080 * 7081 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7082 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7083 * will be stored into the context1 field of the IOCB for the completion 7084 * callback function to the RPL Accept Response ELS command. 7085 * 7086 * Return code 7087 * 0 - Successfully issued ACC RPL ELS command 7088 * 1 - Failed to issue ACC RPL ELS command 7089 **/ 7090 static int 7091 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7092 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7093 { 7094 struct lpfc_hba *phba = vport->phba; 7095 IOCB_t *icmd, *oldcmd; 7096 RPL_RSP rpl_rsp; 7097 struct lpfc_iocbq *elsiocb; 7098 uint8_t *pcmd; 7099 7100 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7101 ndlp->nlp_DID, ELS_CMD_ACC); 7102 7103 if (!elsiocb) 7104 return 1; 7105 7106 icmd = &elsiocb->iocb; 7107 oldcmd = &oldiocb->iocb; 7108 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7109 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7110 7111 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7112 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7113 pcmd += sizeof(uint16_t); 7114 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7115 pcmd += sizeof(uint16_t); 7116 7117 /* Setup the RPL ACC payload */ 7118 rpl_rsp.listLen = be32_to_cpu(1); 7119 rpl_rsp.index = 0; 7120 rpl_rsp.port_num_blk.portNum = 0; 7121 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7122 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7123 sizeof(struct lpfc_name)); 7124 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7125 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7126 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7127 "0120 Xmit ELS RPL ACC response tag x%x " 7128 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7129 "rpi x%x\n", 7130 elsiocb->iotag, elsiocb->iocb.ulpContext, 7131 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7132 ndlp->nlp_rpi); 7133 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7134 phba->fc_stat.elsXmitACC++; 7135 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7136 IOCB_ERROR) { 7137 lpfc_els_free_iocb(phba, elsiocb); 7138 return 1; 7139 } 7140 return 0; 7141 } 7142 7143 /** 7144 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7145 * @vport: pointer to a host virtual N_Port data structure. 7146 * @cmdiocb: pointer to lpfc command iocb data structure. 7147 * @ndlp: pointer to a node-list data structure. 7148 * 7149 * This routine processes Read Port List (RPL) IOCB received as an ELS 7150 * unsolicited event. It first checks the remote port state. If the remote 7151 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7152 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7153 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7154 * to accept the RPL. 7155 * 7156 * Return code 7157 * 0 - Successfully processed rpl iocb (currently always return 0) 7158 **/ 7159 static int 7160 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7161 struct lpfc_nodelist *ndlp) 7162 { 7163 struct lpfc_dmabuf *pcmd; 7164 uint32_t *lp; 7165 uint32_t maxsize; 7166 uint16_t cmdsize; 7167 RPL *rpl; 7168 struct ls_rjt stat; 7169 7170 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7171 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7172 /* issue rejection response */ 7173 stat.un.b.lsRjtRsvd0 = 0; 7174 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7175 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7176 stat.un.b.vendorUnique = 0; 7177 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7178 NULL); 7179 /* rejected the unsolicited RPL request and done with it */ 7180 return 0; 7181 } 7182 7183 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7184 lp = (uint32_t *) pcmd->virt; 7185 rpl = (RPL *) (lp + 1); 7186 maxsize = be32_to_cpu(rpl->maxsize); 7187 7188 /* We support only one port */ 7189 if ((rpl->index == 0) && 7190 ((maxsize == 0) || 7191 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7192 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7193 } else { 7194 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7195 } 7196 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7197 7198 return 0; 7199 } 7200 7201 /** 7202 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7203 * @vport: pointer to a virtual N_Port data structure. 7204 * @cmdiocb: pointer to lpfc command iocb data structure. 7205 * @ndlp: pointer to a node-list data structure. 7206 * 7207 * This routine processes Fibre Channel Address Resolution Protocol 7208 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7209 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7210 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7211 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 7212 * remote PortName is compared against the FC PortName stored in the @vport 7213 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 7214 * compared against the FC NodeName stored in the @vport data structure. 7215 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 7216 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 7217 * invoked to send out FARP Response to the remote node. Before sending the 7218 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 7219 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 7220 * routine is invoked to log into the remote port first. 7221 * 7222 * Return code 7223 * 0 - Either the FARP Match Mode not supported or successfully processed 7224 **/ 7225 static int 7226 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7227 struct lpfc_nodelist *ndlp) 7228 { 7229 struct lpfc_dmabuf *pcmd; 7230 uint32_t *lp; 7231 IOCB_t *icmd; 7232 FARP *fp; 7233 uint32_t cmd, cnt, did; 7234 7235 icmd = &cmdiocb->iocb; 7236 did = icmd->un.elsreq64.remoteID; 7237 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7238 lp = (uint32_t *) pcmd->virt; 7239 7240 cmd = *lp++; 7241 fp = (FARP *) lp; 7242 /* FARP-REQ received from DID <did> */ 7243 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7244 "0601 FARP-REQ received from DID x%x\n", did); 7245 /* We will only support match on WWPN or WWNN */ 7246 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 7247 return 0; 7248 } 7249 7250 cnt = 0; 7251 /* If this FARP command is searching for my portname */ 7252 if (fp->Mflags & FARP_MATCH_PORT) { 7253 if (memcmp(&fp->RportName, &vport->fc_portname, 7254 sizeof(struct lpfc_name)) == 0) 7255 cnt = 1; 7256 } 7257 7258 /* If this FARP command is searching for my nodename */ 7259 if (fp->Mflags & FARP_MATCH_NODE) { 7260 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 7261 sizeof(struct lpfc_name)) == 0) 7262 cnt = 1; 7263 } 7264 7265 if (cnt) { 7266 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 7267 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 7268 /* Log back into the node before sending the FARP. */ 7269 if (fp->Rflags & FARP_REQUEST_PLOGI) { 7270 ndlp->nlp_prev_state = ndlp->nlp_state; 7271 lpfc_nlp_set_state(vport, ndlp, 7272 NLP_STE_PLOGI_ISSUE); 7273 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 7274 } 7275 7276 /* Send a FARP response to that node */ 7277 if (fp->Rflags & FARP_REQUEST_FARPR) 7278 lpfc_issue_els_farpr(vport, did, 0); 7279 } 7280 } 7281 return 0; 7282 } 7283 7284 /** 7285 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 7286 * @vport: pointer to a host virtual N_Port data structure. 7287 * @cmdiocb: pointer to lpfc command iocb data structure. 7288 * @ndlp: pointer to a node-list data structure. 7289 * 7290 * This routine processes Fibre Channel Address Resolution Protocol 7291 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 7292 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 7293 * the FARP response request. 7294 * 7295 * Return code 7296 * 0 - Successfully processed FARPR IOCB (currently always return 0) 7297 **/ 7298 static int 7299 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7300 struct lpfc_nodelist *ndlp) 7301 { 7302 struct lpfc_dmabuf *pcmd; 7303 uint32_t *lp; 7304 IOCB_t *icmd; 7305 uint32_t cmd, did; 7306 7307 icmd = &cmdiocb->iocb; 7308 did = icmd->un.elsreq64.remoteID; 7309 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7310 lp = (uint32_t *) pcmd->virt; 7311 7312 cmd = *lp++; 7313 /* FARP-RSP received from DID <did> */ 7314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7315 "0600 FARP-RSP received from DID x%x\n", did); 7316 /* ACCEPT the Farp resp request */ 7317 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7318 7319 return 0; 7320 } 7321 7322 /** 7323 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 7324 * @vport: pointer to a host virtual N_Port data structure. 7325 * @cmdiocb: pointer to lpfc command iocb data structure. 7326 * @fan_ndlp: pointer to a node-list data structure. 7327 * 7328 * This routine processes a Fabric Address Notification (FAN) IOCB 7329 * command received as an ELS unsolicited event. The FAN ELS command will 7330 * only be processed on a physical port (i.e., the @vport represents the 7331 * physical port). The fabric NodeName and PortName from the FAN IOCB are 7332 * compared against those in the phba data structure. If any of those is 7333 * different, the lpfc_initial_flogi() routine is invoked to initialize 7334 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 7335 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 7336 * is invoked to register login to the fabric. 7337 * 7338 * Return code 7339 * 0 - Successfully processed fan iocb (currently always return 0). 7340 **/ 7341 static int 7342 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7343 struct lpfc_nodelist *fan_ndlp) 7344 { 7345 struct lpfc_hba *phba = vport->phba; 7346 uint32_t *lp; 7347 FAN *fp; 7348 7349 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 7350 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 7351 fp = (FAN *) ++lp; 7352 /* FAN received; Fan does not have a reply sequence */ 7353 if ((vport == phba->pport) && 7354 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 7355 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 7356 sizeof(struct lpfc_name))) || 7357 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 7358 sizeof(struct lpfc_name)))) { 7359 /* This port has switched fabrics. FLOGI is required */ 7360 lpfc_issue_init_vfi(vport); 7361 } else { 7362 /* FAN verified - skip FLOGI */ 7363 vport->fc_myDID = vport->fc_prevDID; 7364 if (phba->sli_rev < LPFC_SLI_REV4) 7365 lpfc_issue_fabric_reglogin(vport); 7366 else { 7367 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7368 "3138 Need register VFI: (x%x/%x)\n", 7369 vport->fc_prevDID, vport->fc_myDID); 7370 lpfc_issue_reg_vfi(vport); 7371 } 7372 } 7373 } 7374 return 0; 7375 } 7376 7377 /** 7378 * lpfc_els_timeout - Handler funciton to the els timer 7379 * @ptr: holder for the timer function associated data. 7380 * 7381 * This routine is invoked by the ELS timer after timeout. It posts the ELS 7382 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 7383 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 7384 * up the worker thread. It is for the worker thread to invoke the routine 7385 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 7386 **/ 7387 void 7388 lpfc_els_timeout(unsigned long ptr) 7389 { 7390 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 7391 struct lpfc_hba *phba = vport->phba; 7392 uint32_t tmo_posted; 7393 unsigned long iflag; 7394 7395 spin_lock_irqsave(&vport->work_port_lock, iflag); 7396 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 7397 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7398 vport->work_port_events |= WORKER_ELS_TMO; 7399 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 7400 7401 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7402 lpfc_worker_wake_up(phba); 7403 return; 7404 } 7405 7406 7407 /** 7408 * lpfc_els_timeout_handler - Process an els timeout event 7409 * @vport: pointer to a virtual N_Port data structure. 7410 * 7411 * This routine is the actual handler function that processes an ELS timeout 7412 * event. It walks the ELS ring to get and abort all the IOCBs (except the 7413 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 7414 * invoking the lpfc_sli_issue_abort_iotag() routine. 7415 **/ 7416 void 7417 lpfc_els_timeout_handler(struct lpfc_vport *vport) 7418 { 7419 struct lpfc_hba *phba = vport->phba; 7420 struct lpfc_sli_ring *pring; 7421 struct lpfc_iocbq *tmp_iocb, *piocb; 7422 IOCB_t *cmd = NULL; 7423 struct lpfc_dmabuf *pcmd; 7424 uint32_t els_command = 0; 7425 uint32_t timeout; 7426 uint32_t remote_ID = 0xffffffff; 7427 LIST_HEAD(abort_list); 7428 7429 7430 timeout = (uint32_t)(phba->fc_ratov << 1); 7431 7432 pring = lpfc_phba_elsring(phba); 7433 7434 if ((phba->pport->load_flag & FC_UNLOADING)) 7435 return; 7436 spin_lock_irq(&phba->hbalock); 7437 if (phba->sli_rev == LPFC_SLI_REV4) 7438 spin_lock(&pring->ring_lock); 7439 7440 if ((phba->pport->load_flag & FC_UNLOADING)) { 7441 if (phba->sli_rev == LPFC_SLI_REV4) 7442 spin_unlock(&pring->ring_lock); 7443 spin_unlock_irq(&phba->hbalock); 7444 return; 7445 } 7446 7447 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 7448 cmd = &piocb->iocb; 7449 7450 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 7451 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 7452 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 7453 continue; 7454 7455 if (piocb->vport != vport) 7456 continue; 7457 7458 pcmd = (struct lpfc_dmabuf *) piocb->context2; 7459 if (pcmd) 7460 els_command = *(uint32_t *) (pcmd->virt); 7461 7462 if (els_command == ELS_CMD_FARP || 7463 els_command == ELS_CMD_FARPR || 7464 els_command == ELS_CMD_FDISC) 7465 continue; 7466 7467 if (piocb->drvrTimeout > 0) { 7468 if (piocb->drvrTimeout >= timeout) 7469 piocb->drvrTimeout -= timeout; 7470 else 7471 piocb->drvrTimeout = 0; 7472 continue; 7473 } 7474 7475 remote_ID = 0xffffffff; 7476 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 7477 remote_ID = cmd->un.elsreq64.remoteID; 7478 else { 7479 struct lpfc_nodelist *ndlp; 7480 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 7481 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 7482 remote_ID = ndlp->nlp_DID; 7483 } 7484 list_add_tail(&piocb->dlist, &abort_list); 7485 } 7486 if (phba->sli_rev == LPFC_SLI_REV4) 7487 spin_unlock(&pring->ring_lock); 7488 spin_unlock_irq(&phba->hbalock); 7489 7490 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 7491 cmd = &piocb->iocb; 7492 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7493 "0127 ELS timeout Data: x%x x%x x%x " 7494 "x%x\n", els_command, 7495 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 7496 spin_lock_irq(&phba->hbalock); 7497 list_del_init(&piocb->dlist); 7498 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 7499 spin_unlock_irq(&phba->hbalock); 7500 } 7501 7502 if (!list_empty(&pring->txcmplq)) 7503 if (!(phba->pport->load_flag & FC_UNLOADING)) 7504 mod_timer(&vport->els_tmofunc, 7505 jiffies + msecs_to_jiffies(1000 * timeout)); 7506 } 7507 7508 /** 7509 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 7510 * @vport: pointer to a host virtual N_Port data structure. 7511 * 7512 * This routine is used to clean up all the outstanding ELS commands on a 7513 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 7514 * routine. After that, it walks the ELS transmit queue to remove all the 7515 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 7516 * the IOCBs with a non-NULL completion callback function, the callback 7517 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 7518 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 7519 * callback function, the IOCB will simply be released. Finally, it walks 7520 * the ELS transmit completion queue to issue an abort IOCB to any transmit 7521 * completion queue IOCB that is associated with the @vport and is not 7522 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 7523 * part of the discovery state machine) out to HBA by invoking the 7524 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 7525 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 7526 * the IOCBs are aborted when this function returns. 7527 **/ 7528 void 7529 lpfc_els_flush_cmd(struct lpfc_vport *vport) 7530 { 7531 LIST_HEAD(abort_list); 7532 struct lpfc_hba *phba = vport->phba; 7533 struct lpfc_sli_ring *pring; 7534 struct lpfc_iocbq *tmp_iocb, *piocb; 7535 IOCB_t *cmd = NULL; 7536 7537 lpfc_fabric_abort_vport(vport); 7538 /* 7539 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 7540 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 7541 * ultimately grabs the ring_lock, the driver must splice the list into 7542 * a working list and release the locks before calling the abort. 7543 */ 7544 spin_lock_irq(&phba->hbalock); 7545 pring = lpfc_phba_elsring(phba); 7546 7547 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 7548 if (unlikely(!pring)) { 7549 spin_unlock_irq(&phba->hbalock); 7550 return; 7551 } 7552 7553 if (phba->sli_rev == LPFC_SLI_REV4) 7554 spin_lock(&pring->ring_lock); 7555 7556 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 7557 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 7558 continue; 7559 7560 if (piocb->vport != vport) 7561 continue; 7562 list_add_tail(&piocb->dlist, &abort_list); 7563 } 7564 if (phba->sli_rev == LPFC_SLI_REV4) 7565 spin_unlock(&pring->ring_lock); 7566 spin_unlock_irq(&phba->hbalock); 7567 /* Abort each iocb on the aborted list and remove the dlist links. */ 7568 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 7569 spin_lock_irq(&phba->hbalock); 7570 list_del_init(&piocb->dlist); 7571 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 7572 spin_unlock_irq(&phba->hbalock); 7573 } 7574 if (!list_empty(&abort_list)) 7575 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7576 "3387 abort list for txq not empty\n"); 7577 INIT_LIST_HEAD(&abort_list); 7578 7579 spin_lock_irq(&phba->hbalock); 7580 if (phba->sli_rev == LPFC_SLI_REV4) 7581 spin_lock(&pring->ring_lock); 7582 7583 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 7584 cmd = &piocb->iocb; 7585 7586 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 7587 continue; 7588 } 7589 7590 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 7591 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 7592 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 7593 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 7594 cmd->ulpCommand == CMD_ABORT_XRI_CN) 7595 continue; 7596 7597 if (piocb->vport != vport) 7598 continue; 7599 7600 list_del_init(&piocb->list); 7601 list_add_tail(&piocb->list, &abort_list); 7602 } 7603 if (phba->sli_rev == LPFC_SLI_REV4) 7604 spin_unlock(&pring->ring_lock); 7605 spin_unlock_irq(&phba->hbalock); 7606 7607 /* Cancell all the IOCBs from the completions list */ 7608 lpfc_sli_cancel_iocbs(phba, &abort_list, 7609 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 7610 7611 return; 7612 } 7613 7614 /** 7615 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 7616 * @phba: pointer to lpfc hba data structure. 7617 * 7618 * This routine is used to clean up all the outstanding ELS commands on a 7619 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 7620 * routine. After that, it walks the ELS transmit queue to remove all the 7621 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 7622 * the IOCBs with the completion callback function associated, the callback 7623 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 7624 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 7625 * callback function associated, the IOCB will simply be released. Finally, 7626 * it walks the ELS transmit completion queue to issue an abort IOCB to any 7627 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 7628 * management plane IOCBs that are not part of the discovery state machine) 7629 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 7630 **/ 7631 void 7632 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 7633 { 7634 struct lpfc_vport *vport; 7635 list_for_each_entry(vport, &phba->port_list, listentry) 7636 lpfc_els_flush_cmd(vport); 7637 7638 return; 7639 } 7640 7641 /** 7642 * lpfc_send_els_failure_event - Posts an ELS command failure event 7643 * @phba: Pointer to hba context object. 7644 * @cmdiocbp: Pointer to command iocb which reported error. 7645 * @rspiocbp: Pointer to response iocb which reported error. 7646 * 7647 * This function sends an event when there is an ELS command 7648 * failure. 7649 **/ 7650 void 7651 lpfc_send_els_failure_event(struct lpfc_hba *phba, 7652 struct lpfc_iocbq *cmdiocbp, 7653 struct lpfc_iocbq *rspiocbp) 7654 { 7655 struct lpfc_vport *vport = cmdiocbp->vport; 7656 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7657 struct lpfc_lsrjt_event lsrjt_event; 7658 struct lpfc_fabric_event_header fabric_event; 7659 struct ls_rjt stat; 7660 struct lpfc_nodelist *ndlp; 7661 uint32_t *pcmd; 7662 7663 ndlp = cmdiocbp->context1; 7664 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 7665 return; 7666 7667 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 7668 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 7669 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 7670 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 7671 sizeof(struct lpfc_name)); 7672 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 7673 sizeof(struct lpfc_name)); 7674 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7675 cmdiocbp->context2)->virt); 7676 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 7677 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 7678 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 7679 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 7680 fc_host_post_vendor_event(shost, 7681 fc_get_event_number(), 7682 sizeof(lsrjt_event), 7683 (char *)&lsrjt_event, 7684 LPFC_NL_VENDOR_ID); 7685 return; 7686 } 7687 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 7688 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 7689 fabric_event.event_type = FC_REG_FABRIC_EVENT; 7690 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 7691 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 7692 else 7693 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 7694 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 7695 sizeof(struct lpfc_name)); 7696 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 7697 sizeof(struct lpfc_name)); 7698 fc_host_post_vendor_event(shost, 7699 fc_get_event_number(), 7700 sizeof(fabric_event), 7701 (char *)&fabric_event, 7702 LPFC_NL_VENDOR_ID); 7703 return; 7704 } 7705 7706 } 7707 7708 /** 7709 * lpfc_send_els_event - Posts unsolicited els event 7710 * @vport: Pointer to vport object. 7711 * @ndlp: Pointer FC node object. 7712 * @cmd: ELS command code. 7713 * 7714 * This function posts an event when there is an incoming 7715 * unsolicited ELS command. 7716 **/ 7717 static void 7718 lpfc_send_els_event(struct lpfc_vport *vport, 7719 struct lpfc_nodelist *ndlp, 7720 uint32_t *payload) 7721 { 7722 struct lpfc_els_event_header *els_data = NULL; 7723 struct lpfc_logo_event *logo_data = NULL; 7724 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7725 7726 if (*payload == ELS_CMD_LOGO) { 7727 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 7728 if (!logo_data) { 7729 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7730 "0148 Failed to allocate memory " 7731 "for LOGO event\n"); 7732 return; 7733 } 7734 els_data = &logo_data->header; 7735 } else { 7736 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 7737 GFP_KERNEL); 7738 if (!els_data) { 7739 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7740 "0149 Failed to allocate memory " 7741 "for ELS event\n"); 7742 return; 7743 } 7744 } 7745 els_data->event_type = FC_REG_ELS_EVENT; 7746 switch (*payload) { 7747 case ELS_CMD_PLOGI: 7748 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 7749 break; 7750 case ELS_CMD_PRLO: 7751 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 7752 break; 7753 case ELS_CMD_ADISC: 7754 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 7755 break; 7756 case ELS_CMD_LOGO: 7757 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 7758 /* Copy the WWPN in the LOGO payload */ 7759 memcpy(logo_data->logo_wwpn, &payload[2], 7760 sizeof(struct lpfc_name)); 7761 break; 7762 default: 7763 kfree(els_data); 7764 return; 7765 } 7766 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 7767 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 7768 if (*payload == ELS_CMD_LOGO) { 7769 fc_host_post_vendor_event(shost, 7770 fc_get_event_number(), 7771 sizeof(struct lpfc_logo_event), 7772 (char *)logo_data, 7773 LPFC_NL_VENDOR_ID); 7774 kfree(logo_data); 7775 } else { 7776 fc_host_post_vendor_event(shost, 7777 fc_get_event_number(), 7778 sizeof(struct lpfc_els_event_header), 7779 (char *)els_data, 7780 LPFC_NL_VENDOR_ID); 7781 kfree(els_data); 7782 } 7783 7784 return; 7785 } 7786 7787 7788 /** 7789 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 7790 * @phba: pointer to lpfc hba data structure. 7791 * @pring: pointer to a SLI ring. 7792 * @vport: pointer to a host virtual N_Port data structure. 7793 * @elsiocb: pointer to lpfc els command iocb data structure. 7794 * 7795 * This routine is used for processing the IOCB associated with a unsolicited 7796 * event. It first determines whether there is an existing ndlp that matches 7797 * the DID from the unsolicited IOCB. If not, it will create a new one with 7798 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 7799 * IOCB is then used to invoke the proper routine and to set up proper state 7800 * of the discovery state machine. 7801 **/ 7802 static void 7803 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7804 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 7805 { 7806 struct Scsi_Host *shost; 7807 struct lpfc_nodelist *ndlp; 7808 struct ls_rjt stat; 7809 uint32_t *payload; 7810 uint32_t cmd, did, newnode; 7811 uint8_t rjt_exp, rjt_err = 0; 7812 IOCB_t *icmd = &elsiocb->iocb; 7813 7814 if (!vport || !(elsiocb->context2)) 7815 goto dropit; 7816 7817 newnode = 0; 7818 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 7819 cmd = *payload; 7820 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 7821 lpfc_post_buffer(phba, pring, 1); 7822 7823 did = icmd->un.rcvels.remoteID; 7824 if (icmd->ulpStatus) { 7825 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7826 "RCV Unsol ELS: status:x%x/x%x did:x%x", 7827 icmd->ulpStatus, icmd->un.ulpWord[4], did); 7828 goto dropit; 7829 } 7830 7831 /* Check to see if link went down during discovery */ 7832 if (lpfc_els_chk_latt(vport)) 7833 goto dropit; 7834 7835 /* Ignore traffic received during vport shutdown. */ 7836 if (vport->load_flag & FC_UNLOADING) 7837 goto dropit; 7838 7839 /* If NPort discovery is delayed drop incoming ELS */ 7840 if ((vport->fc_flag & FC_DISC_DELAYED) && 7841 (cmd != ELS_CMD_PLOGI)) 7842 goto dropit; 7843 7844 ndlp = lpfc_findnode_did(vport, did); 7845 if (!ndlp) { 7846 /* Cannot find existing Fabric ndlp, so allocate a new one */ 7847 ndlp = lpfc_nlp_init(vport, did); 7848 if (!ndlp) 7849 goto dropit; 7850 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 7851 newnode = 1; 7852 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7853 ndlp->nlp_type |= NLP_FABRIC; 7854 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 7855 ndlp = lpfc_enable_node(vport, ndlp, 7856 NLP_STE_UNUSED_NODE); 7857 if (!ndlp) 7858 goto dropit; 7859 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 7860 newnode = 1; 7861 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7862 ndlp->nlp_type |= NLP_FABRIC; 7863 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 7864 /* This is similar to the new node path */ 7865 ndlp = lpfc_nlp_get(ndlp); 7866 if (!ndlp) 7867 goto dropit; 7868 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 7869 newnode = 1; 7870 } 7871 7872 phba->fc_stat.elsRcvFrame++; 7873 7874 /* 7875 * Do not process any unsolicited ELS commands 7876 * if the ndlp is in DEV_LOSS 7877 */ 7878 shost = lpfc_shost_from_vport(vport); 7879 spin_lock_irq(shost->host_lock); 7880 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 7881 spin_unlock_irq(shost->host_lock); 7882 goto dropit; 7883 } 7884 spin_unlock_irq(shost->host_lock); 7885 7886 elsiocb->context1 = lpfc_nlp_get(ndlp); 7887 elsiocb->vport = vport; 7888 7889 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 7890 cmd &= ELS_CMD_MASK; 7891 } 7892 /* ELS command <elsCmd> received from NPORT <did> */ 7893 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7894 "0112 ELS command x%x received from NPORT x%x " 7895 "Data: x%x x%x x%x x%x\n", 7896 cmd, did, vport->port_state, vport->fc_flag, 7897 vport->fc_myDID, vport->fc_prevDID); 7898 7899 /* reject till our FLOGI completes */ 7900 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 7901 (cmd != ELS_CMD_FLOGI)) { 7902 rjt_err = LSRJT_LOGICAL_BSY; 7903 rjt_exp = LSEXP_NOTHING_MORE; 7904 goto lsrjt; 7905 } 7906 7907 switch (cmd) { 7908 case ELS_CMD_PLOGI: 7909 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7910 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 7911 did, vport->port_state, ndlp->nlp_flag); 7912 7913 phba->fc_stat.elsRcvPLOGI++; 7914 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 7915 if (phba->sli_rev == LPFC_SLI_REV4 && 7916 (phba->pport->fc_flag & FC_PT2PT)) { 7917 vport->fc_prevDID = vport->fc_myDID; 7918 /* Our DID needs to be updated before registering 7919 * the vfi. This is done in lpfc_rcv_plogi but 7920 * that is called after the reg_vfi. 7921 */ 7922 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 7923 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7924 "3312 Remote port assigned DID x%x " 7925 "%x\n", vport->fc_myDID, 7926 vport->fc_prevDID); 7927 } 7928 7929 lpfc_send_els_event(vport, ndlp, payload); 7930 7931 /* If Nport discovery is delayed, reject PLOGIs */ 7932 if (vport->fc_flag & FC_DISC_DELAYED) { 7933 rjt_err = LSRJT_UNABLE_TPC; 7934 rjt_exp = LSEXP_NOTHING_MORE; 7935 break; 7936 } 7937 7938 if (vport->port_state < LPFC_DISC_AUTH) { 7939 if (!(phba->pport->fc_flag & FC_PT2PT) || 7940 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 7941 rjt_err = LSRJT_UNABLE_TPC; 7942 rjt_exp = LSEXP_NOTHING_MORE; 7943 break; 7944 } 7945 } 7946 7947 spin_lock_irq(shost->host_lock); 7948 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 7949 spin_unlock_irq(shost->host_lock); 7950 7951 lpfc_disc_state_machine(vport, ndlp, elsiocb, 7952 NLP_EVT_RCV_PLOGI); 7953 7954 break; 7955 case ELS_CMD_FLOGI: 7956 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7957 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 7958 did, vport->port_state, ndlp->nlp_flag); 7959 7960 phba->fc_stat.elsRcvFLOGI++; 7961 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 7962 if (newnode) 7963 lpfc_nlp_put(ndlp); 7964 break; 7965 case ELS_CMD_LOGO: 7966 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7967 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 7968 did, vport->port_state, ndlp->nlp_flag); 7969 7970 phba->fc_stat.elsRcvLOGO++; 7971 lpfc_send_els_event(vport, ndlp, payload); 7972 if (vport->port_state < LPFC_DISC_AUTH) { 7973 rjt_err = LSRJT_UNABLE_TPC; 7974 rjt_exp = LSEXP_NOTHING_MORE; 7975 break; 7976 } 7977 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 7978 break; 7979 case ELS_CMD_PRLO: 7980 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7981 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 7982 did, vport->port_state, ndlp->nlp_flag); 7983 7984 phba->fc_stat.elsRcvPRLO++; 7985 lpfc_send_els_event(vport, ndlp, payload); 7986 if (vport->port_state < LPFC_DISC_AUTH) { 7987 rjt_err = LSRJT_UNABLE_TPC; 7988 rjt_exp = LSEXP_NOTHING_MORE; 7989 break; 7990 } 7991 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 7992 break; 7993 case ELS_CMD_LCB: 7994 phba->fc_stat.elsRcvLCB++; 7995 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 7996 break; 7997 case ELS_CMD_RDP: 7998 phba->fc_stat.elsRcvRDP++; 7999 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8000 break; 8001 case ELS_CMD_RSCN: 8002 phba->fc_stat.elsRcvRSCN++; 8003 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8004 if (newnode) 8005 lpfc_nlp_put(ndlp); 8006 break; 8007 case ELS_CMD_ADISC: 8008 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8009 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8010 did, vport->port_state, ndlp->nlp_flag); 8011 8012 lpfc_send_els_event(vport, ndlp, payload); 8013 phba->fc_stat.elsRcvADISC++; 8014 if (vport->port_state < LPFC_DISC_AUTH) { 8015 rjt_err = LSRJT_UNABLE_TPC; 8016 rjt_exp = LSEXP_NOTHING_MORE; 8017 break; 8018 } 8019 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8020 NLP_EVT_RCV_ADISC); 8021 break; 8022 case ELS_CMD_PDISC: 8023 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8024 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8025 did, vport->port_state, ndlp->nlp_flag); 8026 8027 phba->fc_stat.elsRcvPDISC++; 8028 if (vport->port_state < LPFC_DISC_AUTH) { 8029 rjt_err = LSRJT_UNABLE_TPC; 8030 rjt_exp = LSEXP_NOTHING_MORE; 8031 break; 8032 } 8033 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8034 NLP_EVT_RCV_PDISC); 8035 break; 8036 case ELS_CMD_FARPR: 8037 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8038 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8039 did, vport->port_state, ndlp->nlp_flag); 8040 8041 phba->fc_stat.elsRcvFARPR++; 8042 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8043 break; 8044 case ELS_CMD_FARP: 8045 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8046 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8047 did, vport->port_state, ndlp->nlp_flag); 8048 8049 phba->fc_stat.elsRcvFARP++; 8050 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8051 break; 8052 case ELS_CMD_FAN: 8053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8054 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8055 did, vport->port_state, ndlp->nlp_flag); 8056 8057 phba->fc_stat.elsRcvFAN++; 8058 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8059 break; 8060 case ELS_CMD_PRLI: 8061 case ELS_CMD_NVMEPRLI: 8062 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8063 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8064 did, vport->port_state, ndlp->nlp_flag); 8065 8066 phba->fc_stat.elsRcvPRLI++; 8067 if ((vport->port_state < LPFC_DISC_AUTH) && 8068 (vport->fc_flag & FC_FABRIC)) { 8069 rjt_err = LSRJT_UNABLE_TPC; 8070 rjt_exp = LSEXP_NOTHING_MORE; 8071 break; 8072 } 8073 8074 /* NVMET accepts NVME PRLI only. Reject FCP PRLI */ 8075 if (cmd == ELS_CMD_PRLI && phba->nvmet_support) { 8076 rjt_err = LSRJT_CMD_UNSUPPORTED; 8077 rjt_exp = LSEXP_REQ_UNSUPPORTED; 8078 break; 8079 } 8080 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8081 break; 8082 case ELS_CMD_LIRR: 8083 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8084 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 8085 did, vport->port_state, ndlp->nlp_flag); 8086 8087 phba->fc_stat.elsRcvLIRR++; 8088 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 8089 if (newnode) 8090 lpfc_nlp_put(ndlp); 8091 break; 8092 case ELS_CMD_RLS: 8093 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8094 "RCV RLS: did:x%x/ste:x%x flg:x%x", 8095 did, vport->port_state, ndlp->nlp_flag); 8096 8097 phba->fc_stat.elsRcvRLS++; 8098 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 8099 if (newnode) 8100 lpfc_nlp_put(ndlp); 8101 break; 8102 case ELS_CMD_RPS: 8103 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8104 "RCV RPS: did:x%x/ste:x%x flg:x%x", 8105 did, vport->port_state, ndlp->nlp_flag); 8106 8107 phba->fc_stat.elsRcvRPS++; 8108 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 8109 if (newnode) 8110 lpfc_nlp_put(ndlp); 8111 break; 8112 case ELS_CMD_RPL: 8113 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8114 "RCV RPL: did:x%x/ste:x%x flg:x%x", 8115 did, vport->port_state, ndlp->nlp_flag); 8116 8117 phba->fc_stat.elsRcvRPL++; 8118 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 8119 if (newnode) 8120 lpfc_nlp_put(ndlp); 8121 break; 8122 case ELS_CMD_RNID: 8123 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8124 "RCV RNID: did:x%x/ste:x%x flg:x%x", 8125 did, vport->port_state, ndlp->nlp_flag); 8126 8127 phba->fc_stat.elsRcvRNID++; 8128 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 8129 if (newnode) 8130 lpfc_nlp_put(ndlp); 8131 break; 8132 case ELS_CMD_RTV: 8133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8134 "RCV RTV: did:x%x/ste:x%x flg:x%x", 8135 did, vport->port_state, ndlp->nlp_flag); 8136 phba->fc_stat.elsRcvRTV++; 8137 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 8138 if (newnode) 8139 lpfc_nlp_put(ndlp); 8140 break; 8141 case ELS_CMD_RRQ: 8142 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8143 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 8144 did, vport->port_state, ndlp->nlp_flag); 8145 8146 phba->fc_stat.elsRcvRRQ++; 8147 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 8148 if (newnode) 8149 lpfc_nlp_put(ndlp); 8150 break; 8151 case ELS_CMD_ECHO: 8152 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8153 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 8154 did, vport->port_state, ndlp->nlp_flag); 8155 8156 phba->fc_stat.elsRcvECHO++; 8157 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 8158 if (newnode) 8159 lpfc_nlp_put(ndlp); 8160 break; 8161 case ELS_CMD_REC: 8162 /* receive this due to exchange closed */ 8163 rjt_err = LSRJT_UNABLE_TPC; 8164 rjt_exp = LSEXP_INVALID_OX_RX; 8165 break; 8166 default: 8167 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8168 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 8169 cmd, did, vport->port_state); 8170 8171 /* Unsupported ELS command, reject */ 8172 rjt_err = LSRJT_CMD_UNSUPPORTED; 8173 rjt_exp = LSEXP_NOTHING_MORE; 8174 8175 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8176 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8177 "0115 Unknown ELS command x%x " 8178 "received from NPORT x%x\n", cmd, did); 8179 if (newnode) 8180 lpfc_nlp_put(ndlp); 8181 break; 8182 } 8183 8184 lsrjt: 8185 /* check if need to LS_RJT received ELS cmd */ 8186 if (rjt_err) { 8187 memset(&stat, 0, sizeof(stat)); 8188 stat.un.b.lsRjtRsnCode = rjt_err; 8189 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 8190 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 8191 NULL); 8192 } 8193 8194 lpfc_nlp_put(elsiocb->context1); 8195 elsiocb->context1 = NULL; 8196 return; 8197 8198 dropit: 8199 if (vport && !(vport->load_flag & FC_UNLOADING)) 8200 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8201 "0111 Dropping received ELS cmd " 8202 "Data: x%x x%x x%x\n", 8203 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 8204 phba->fc_stat.elsRcvDrop++; 8205 } 8206 8207 /** 8208 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 8209 * @phba: pointer to lpfc hba data structure. 8210 * @pring: pointer to a SLI ring. 8211 * @elsiocb: pointer to lpfc els iocb data structure. 8212 * 8213 * This routine is used to process an unsolicited event received from a SLI 8214 * (Service Level Interface) ring. The actual processing of the data buffer 8215 * associated with the unsolicited event is done by invoking the routine 8216 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 8217 * SLI ring on which the unsolicited event was received. 8218 **/ 8219 void 8220 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8221 struct lpfc_iocbq *elsiocb) 8222 { 8223 struct lpfc_vport *vport = phba->pport; 8224 IOCB_t *icmd = &elsiocb->iocb; 8225 dma_addr_t paddr; 8226 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 8227 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 8228 8229 elsiocb->context1 = NULL; 8230 elsiocb->context2 = NULL; 8231 elsiocb->context3 = NULL; 8232 8233 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 8234 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 8235 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 8236 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 8237 IOERR_RCV_BUFFER_WAITING) { 8238 phba->fc_stat.NoRcvBuf++; 8239 /* Not enough posted buffers; Try posting more buffers */ 8240 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 8241 lpfc_post_buffer(phba, pring, 0); 8242 return; 8243 } 8244 8245 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8246 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 8247 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 8248 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 8249 vport = phba->pport; 8250 else 8251 vport = lpfc_find_vport_by_vpid(phba, 8252 icmd->unsli3.rcvsli3.vpi); 8253 } 8254 8255 /* If there are no BDEs associated 8256 * with this IOCB, there is nothing to do. 8257 */ 8258 if (icmd->ulpBdeCount == 0) 8259 return; 8260 8261 /* type of ELS cmd is first 32bit word 8262 * in packet 8263 */ 8264 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 8265 elsiocb->context2 = bdeBuf1; 8266 } else { 8267 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 8268 icmd->un.cont64[0].addrLow); 8269 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 8270 paddr); 8271 } 8272 8273 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8274 /* 8275 * The different unsolicited event handlers would tell us 8276 * if they are done with "mp" by setting context2 to NULL. 8277 */ 8278 if (elsiocb->context2) { 8279 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 8280 elsiocb->context2 = NULL; 8281 } 8282 8283 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 8284 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 8285 icmd->ulpBdeCount == 2) { 8286 elsiocb->context2 = bdeBuf2; 8287 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8288 /* free mp if we are done with it */ 8289 if (elsiocb->context2) { 8290 lpfc_in_buf_free(phba, elsiocb->context2); 8291 elsiocb->context2 = NULL; 8292 } 8293 } 8294 } 8295 8296 static void 8297 lpfc_start_fdmi(struct lpfc_vport *vport) 8298 { 8299 struct lpfc_nodelist *ndlp; 8300 8301 /* If this is the first time, allocate an ndlp and initialize 8302 * it. Otherwise, make sure the node is enabled and then do the 8303 * login. 8304 */ 8305 ndlp = lpfc_findnode_did(vport, FDMI_DID); 8306 if (!ndlp) { 8307 ndlp = lpfc_nlp_init(vport, FDMI_DID); 8308 if (ndlp) { 8309 ndlp->nlp_type |= NLP_FABRIC; 8310 } else { 8311 return; 8312 } 8313 } 8314 if (!NLP_CHK_NODE_ACT(ndlp)) 8315 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 8316 8317 if (ndlp) { 8318 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8319 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8320 } 8321 } 8322 8323 /** 8324 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 8325 * @phba: pointer to lpfc hba data structure. 8326 * @vport: pointer to a virtual N_Port data structure. 8327 * 8328 * This routine issues a Port Login (PLOGI) to the Name Server with 8329 * State Change Request (SCR) for a @vport. This routine will create an 8330 * ndlp for the Name Server associated to the @vport if such node does 8331 * not already exist. The PLOGI to Name Server is issued by invoking the 8332 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 8333 * (FDMI) is configured to the @vport, a FDMI node will be created and 8334 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 8335 **/ 8336 void 8337 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 8338 { 8339 struct lpfc_nodelist *ndlp; 8340 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8341 8342 /* 8343 * If lpfc_delay_discovery parameter is set and the clean address 8344 * bit is cleared and fc fabric parameters chenged, delay FC NPort 8345 * discovery. 8346 */ 8347 spin_lock_irq(shost->host_lock); 8348 if (vport->fc_flag & FC_DISC_DELAYED) { 8349 spin_unlock_irq(shost->host_lock); 8350 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 8351 "3334 Delay fc port discovery for %d seconds\n", 8352 phba->fc_ratov); 8353 mod_timer(&vport->delayed_disc_tmo, 8354 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 8355 return; 8356 } 8357 spin_unlock_irq(shost->host_lock); 8358 8359 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8360 if (!ndlp) { 8361 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8362 if (!ndlp) { 8363 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8364 lpfc_disc_start(vport); 8365 return; 8366 } 8367 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8368 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8369 "0251 NameServer login: no memory\n"); 8370 return; 8371 } 8372 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 8373 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 8374 if (!ndlp) { 8375 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8376 lpfc_disc_start(vport); 8377 return; 8378 } 8379 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8380 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8381 "0348 NameServer login: node freed\n"); 8382 return; 8383 } 8384 } 8385 ndlp->nlp_type |= NLP_FABRIC; 8386 8387 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8388 8389 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 8390 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8391 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8392 "0252 Cannot issue NameServer login\n"); 8393 return; 8394 } 8395 8396 if ((phba->cfg_enable_SmartSAN || 8397 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 8398 (vport->load_flag & FC_ALLOW_FDMI)) 8399 lpfc_start_fdmi(vport); 8400 } 8401 8402 /** 8403 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 8404 * @phba: pointer to lpfc hba data structure. 8405 * @pmb: pointer to the driver internal queue element for mailbox command. 8406 * 8407 * This routine is the completion callback function to register new vport 8408 * mailbox command. If the new vport mailbox command completes successfully, 8409 * the fabric registration login shall be performed on physical port (the 8410 * new vport created is actually a physical port, with VPI 0) or the port 8411 * login to Name Server for State Change Request (SCR) will be performed 8412 * on virtual port (real virtual port, with VPI greater than 0). 8413 **/ 8414 static void 8415 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8416 { 8417 struct lpfc_vport *vport = pmb->vport; 8418 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8419 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 8420 MAILBOX_t *mb = &pmb->u.mb; 8421 int rc; 8422 8423 spin_lock_irq(shost->host_lock); 8424 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 8425 spin_unlock_irq(shost->host_lock); 8426 8427 if (mb->mbxStatus) { 8428 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8429 "0915 Register VPI failed : Status: x%x" 8430 " upd bit: x%x \n", mb->mbxStatus, 8431 mb->un.varRegVpi.upd); 8432 if (phba->sli_rev == LPFC_SLI_REV4 && 8433 mb->un.varRegVpi.upd) 8434 goto mbox_err_exit ; 8435 8436 switch (mb->mbxStatus) { 8437 case 0x11: /* unsupported feature */ 8438 case 0x9603: /* max_vpi exceeded */ 8439 case 0x9602: /* Link event since CLEAR_LA */ 8440 /* giving up on vport registration */ 8441 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8442 spin_lock_irq(shost->host_lock); 8443 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8444 spin_unlock_irq(shost->host_lock); 8445 lpfc_can_disctmo(vport); 8446 break; 8447 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 8448 case 0x20: 8449 spin_lock_irq(shost->host_lock); 8450 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8451 spin_unlock_irq(shost->host_lock); 8452 lpfc_init_vpi(phba, pmb, vport->vpi); 8453 pmb->vport = vport; 8454 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 8455 rc = lpfc_sli_issue_mbox(phba, pmb, 8456 MBX_NOWAIT); 8457 if (rc == MBX_NOT_FINISHED) { 8458 lpfc_printf_vlog(vport, 8459 KERN_ERR, LOG_MBOX, 8460 "2732 Failed to issue INIT_VPI" 8461 " mailbox command\n"); 8462 } else { 8463 lpfc_nlp_put(ndlp); 8464 return; 8465 } 8466 8467 default: 8468 /* Try to recover from this error */ 8469 if (phba->sli_rev == LPFC_SLI_REV4) 8470 lpfc_sli4_unreg_all_rpis(vport); 8471 lpfc_mbx_unreg_vpi(vport); 8472 spin_lock_irq(shost->host_lock); 8473 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8474 spin_unlock_irq(shost->host_lock); 8475 if (mb->mbxStatus == MBX_NOT_FINISHED) 8476 break; 8477 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 8478 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 8479 if (phba->sli_rev == LPFC_SLI_REV4) 8480 lpfc_issue_init_vfi(vport); 8481 else 8482 lpfc_initial_flogi(vport); 8483 } else { 8484 lpfc_initial_fdisc(vport); 8485 } 8486 break; 8487 } 8488 } else { 8489 spin_lock_irq(shost->host_lock); 8490 vport->vpi_state |= LPFC_VPI_REGISTERED; 8491 spin_unlock_irq(shost->host_lock); 8492 if (vport == phba->pport) { 8493 if (phba->sli_rev < LPFC_SLI_REV4) 8494 lpfc_issue_fabric_reglogin(vport); 8495 else { 8496 /* 8497 * If the physical port is instantiated using 8498 * FDISC, do not start vport discovery. 8499 */ 8500 if (vport->port_state != LPFC_FDISC) 8501 lpfc_start_fdiscs(phba); 8502 lpfc_do_scr_ns_plogi(phba, vport); 8503 } 8504 } else 8505 lpfc_do_scr_ns_plogi(phba, vport); 8506 } 8507 mbox_err_exit: 8508 /* Now, we decrement the ndlp reference count held for this 8509 * callback function 8510 */ 8511 lpfc_nlp_put(ndlp); 8512 8513 mempool_free(pmb, phba->mbox_mem_pool); 8514 return; 8515 } 8516 8517 /** 8518 * lpfc_register_new_vport - Register a new vport with a HBA 8519 * @phba: pointer to lpfc hba data structure. 8520 * @vport: pointer to a host virtual N_Port data structure. 8521 * @ndlp: pointer to a node-list data structure. 8522 * 8523 * This routine registers the @vport as a new virtual port with a HBA. 8524 * It is done through a registering vpi mailbox command. 8525 **/ 8526 void 8527 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 8528 struct lpfc_nodelist *ndlp) 8529 { 8530 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8531 LPFC_MBOXQ_t *mbox; 8532 8533 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8534 if (mbox) { 8535 lpfc_reg_vpi(vport, mbox); 8536 mbox->vport = vport; 8537 mbox->context2 = lpfc_nlp_get(ndlp); 8538 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 8539 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8540 == MBX_NOT_FINISHED) { 8541 /* mailbox command not success, decrement ndlp 8542 * reference count for this command 8543 */ 8544 lpfc_nlp_put(ndlp); 8545 mempool_free(mbox, phba->mbox_mem_pool); 8546 8547 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8548 "0253 Register VPI: Can't send mbox\n"); 8549 goto mbox_err_exit; 8550 } 8551 } else { 8552 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8553 "0254 Register VPI: no memory\n"); 8554 goto mbox_err_exit; 8555 } 8556 return; 8557 8558 mbox_err_exit: 8559 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8560 spin_lock_irq(shost->host_lock); 8561 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 8562 spin_unlock_irq(shost->host_lock); 8563 return; 8564 } 8565 8566 /** 8567 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 8568 * @phba: pointer to lpfc hba data structure. 8569 * 8570 * This routine cancels the retry delay timers to all the vports. 8571 **/ 8572 void 8573 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 8574 { 8575 struct lpfc_vport **vports; 8576 struct lpfc_nodelist *ndlp; 8577 uint32_t link_state; 8578 int i; 8579 8580 /* Treat this failure as linkdown for all vports */ 8581 link_state = phba->link_state; 8582 lpfc_linkdown(phba); 8583 phba->link_state = link_state; 8584 8585 vports = lpfc_create_vport_work_array(phba); 8586 8587 if (vports) { 8588 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 8589 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 8590 if (ndlp) 8591 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 8592 lpfc_els_flush_cmd(vports[i]); 8593 } 8594 lpfc_destroy_vport_work_array(phba, vports); 8595 } 8596 } 8597 8598 /** 8599 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 8600 * @phba: pointer to lpfc hba data structure. 8601 * 8602 * This routine abort all pending discovery commands and 8603 * start a timer to retry FLOGI for the physical port 8604 * discovery. 8605 **/ 8606 void 8607 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 8608 { 8609 struct lpfc_nodelist *ndlp; 8610 struct Scsi_Host *shost; 8611 8612 /* Cancel the all vports retry delay retry timers */ 8613 lpfc_cancel_all_vport_retry_delay_timer(phba); 8614 8615 /* If fabric require FLOGI, then re-instantiate physical login */ 8616 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 8617 if (!ndlp) 8618 return; 8619 8620 shost = lpfc_shost_from_vport(phba->pport); 8621 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 8622 spin_lock_irq(shost->host_lock); 8623 ndlp->nlp_flag |= NLP_DELAY_TMO; 8624 spin_unlock_irq(shost->host_lock); 8625 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 8626 phba->pport->port_state = LPFC_FLOGI; 8627 return; 8628 } 8629 8630 /** 8631 * lpfc_fabric_login_reqd - Check if FLOGI required. 8632 * @phba: pointer to lpfc hba data structure. 8633 * @cmdiocb: pointer to FDISC command iocb. 8634 * @rspiocb: pointer to FDISC response iocb. 8635 * 8636 * This routine checks if a FLOGI is reguired for FDISC 8637 * to succeed. 8638 **/ 8639 static int 8640 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 8641 struct lpfc_iocbq *cmdiocb, 8642 struct lpfc_iocbq *rspiocb) 8643 { 8644 8645 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 8646 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 8647 return 0; 8648 else 8649 return 1; 8650 } 8651 8652 /** 8653 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 8654 * @phba: pointer to lpfc hba data structure. 8655 * @cmdiocb: pointer to lpfc command iocb data structure. 8656 * @rspiocb: pointer to lpfc response iocb data structure. 8657 * 8658 * This routine is the completion callback function to a Fabric Discover 8659 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 8660 * single threaded, each FDISC completion callback function will reset 8661 * the discovery timer for all vports such that the timers will not get 8662 * unnecessary timeout. The function checks the FDISC IOCB status. If error 8663 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 8664 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 8665 * assigned to the vport has been changed with the completion of the FDISC 8666 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 8667 * are unregistered from the HBA, and then the lpfc_register_new_vport() 8668 * routine is invoked to register new vport with the HBA. Otherwise, the 8669 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 8670 * Server for State Change Request (SCR). 8671 **/ 8672 static void 8673 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 8674 struct lpfc_iocbq *rspiocb) 8675 { 8676 struct lpfc_vport *vport = cmdiocb->vport; 8677 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8678 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 8679 struct lpfc_nodelist *np; 8680 struct lpfc_nodelist *next_np; 8681 IOCB_t *irsp = &rspiocb->iocb; 8682 struct lpfc_iocbq *piocb; 8683 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 8684 struct serv_parm *sp; 8685 uint8_t fabric_param_changed; 8686 8687 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8688 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 8689 irsp->ulpStatus, irsp->un.ulpWord[4], 8690 vport->fc_prevDID); 8691 /* Since all FDISCs are being single threaded, we 8692 * must reset the discovery timer for ALL vports 8693 * waiting to send FDISC when one completes. 8694 */ 8695 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 8696 lpfc_set_disctmo(piocb->vport); 8697 } 8698 8699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8700 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 8701 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 8702 8703 if (irsp->ulpStatus) { 8704 8705 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 8706 lpfc_retry_pport_discovery(phba); 8707 goto out; 8708 } 8709 8710 /* Check for retry */ 8711 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 8712 goto out; 8713 /* FDISC failed */ 8714 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8715 "0126 FDISC failed. (x%x/x%x)\n", 8716 irsp->ulpStatus, irsp->un.ulpWord[4]); 8717 goto fdisc_failed; 8718 } 8719 spin_lock_irq(shost->host_lock); 8720 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 8721 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 8722 vport->fc_flag |= FC_FABRIC; 8723 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 8724 vport->fc_flag |= FC_PUBLIC_LOOP; 8725 spin_unlock_irq(shost->host_lock); 8726 8727 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 8728 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 8729 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 8730 if (!prsp) 8731 goto out; 8732 sp = prsp->virt + sizeof(uint32_t); 8733 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 8734 memcpy(&vport->fabric_portname, &sp->portName, 8735 sizeof(struct lpfc_name)); 8736 memcpy(&vport->fabric_nodename, &sp->nodeName, 8737 sizeof(struct lpfc_name)); 8738 if (fabric_param_changed && 8739 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 8740 /* If our NportID changed, we need to ensure all 8741 * remaining NPORTs get unreg_login'ed so we can 8742 * issue unreg_vpi. 8743 */ 8744 list_for_each_entry_safe(np, next_np, 8745 &vport->fc_nodes, nlp_listp) { 8746 if (!NLP_CHK_NODE_ACT(ndlp) || 8747 (np->nlp_state != NLP_STE_NPR_NODE) || 8748 !(np->nlp_flag & NLP_NPR_ADISC)) 8749 continue; 8750 spin_lock_irq(shost->host_lock); 8751 np->nlp_flag &= ~NLP_NPR_ADISC; 8752 spin_unlock_irq(shost->host_lock); 8753 lpfc_unreg_rpi(vport, np); 8754 } 8755 lpfc_cleanup_pending_mbox(vport); 8756 8757 if (phba->sli_rev == LPFC_SLI_REV4) 8758 lpfc_sli4_unreg_all_rpis(vport); 8759 8760 lpfc_mbx_unreg_vpi(vport); 8761 spin_lock_irq(shost->host_lock); 8762 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8763 if (phba->sli_rev == LPFC_SLI_REV4) 8764 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 8765 else 8766 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 8767 spin_unlock_irq(shost->host_lock); 8768 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 8769 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 8770 /* 8771 * Driver needs to re-reg VPI in order for f/w 8772 * to update the MAC address. 8773 */ 8774 lpfc_register_new_vport(phba, vport, ndlp); 8775 goto out; 8776 } 8777 8778 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 8779 lpfc_issue_init_vpi(vport); 8780 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 8781 lpfc_register_new_vport(phba, vport, ndlp); 8782 else 8783 lpfc_do_scr_ns_plogi(phba, vport); 8784 goto out; 8785 fdisc_failed: 8786 if (vport->fc_vport && 8787 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 8788 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8789 /* Cancel discovery timer */ 8790 lpfc_can_disctmo(vport); 8791 lpfc_nlp_put(ndlp); 8792 out: 8793 lpfc_els_free_iocb(phba, cmdiocb); 8794 } 8795 8796 /** 8797 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 8798 * @vport: pointer to a virtual N_Port data structure. 8799 * @ndlp: pointer to a node-list data structure. 8800 * @retry: number of retries to the command IOCB. 8801 * 8802 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 8803 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 8804 * routine to issue the IOCB, which makes sure only one outstanding fabric 8805 * IOCB will be sent off HBA at any given time. 8806 * 8807 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 8808 * will be incremented by 1 for holding the ndlp and the reference to ndlp 8809 * will be stored into the context1 field of the IOCB for the completion 8810 * callback function to the FDISC ELS command. 8811 * 8812 * Return code 8813 * 0 - Successfully issued fdisc iocb command 8814 * 1 - Failed to issue fdisc iocb command 8815 **/ 8816 static int 8817 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8818 uint8_t retry) 8819 { 8820 struct lpfc_hba *phba = vport->phba; 8821 IOCB_t *icmd; 8822 struct lpfc_iocbq *elsiocb; 8823 struct serv_parm *sp; 8824 uint8_t *pcmd; 8825 uint16_t cmdsize; 8826 int did = ndlp->nlp_DID; 8827 int rc; 8828 8829 vport->port_state = LPFC_FDISC; 8830 vport->fc_myDID = 0; 8831 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 8832 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 8833 ELS_CMD_FDISC); 8834 if (!elsiocb) { 8835 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8836 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8837 "0255 Issue FDISC: no IOCB\n"); 8838 return 1; 8839 } 8840 8841 icmd = &elsiocb->iocb; 8842 icmd->un.elsreq64.myID = 0; 8843 icmd->un.elsreq64.fl = 1; 8844 8845 /* 8846 * SLI3 ports require a different context type value than SLI4. 8847 * Catch SLI3 ports here and override the prep. 8848 */ 8849 if (phba->sli_rev == LPFC_SLI_REV3) { 8850 icmd->ulpCt_h = 1; 8851 icmd->ulpCt_l = 0; 8852 } 8853 8854 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8855 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 8856 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 8857 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 8858 sp = (struct serv_parm *) pcmd; 8859 /* Setup CSPs accordingly for Fabric */ 8860 sp->cmn.e_d_tov = 0; 8861 sp->cmn.w2.r_a_tov = 0; 8862 sp->cmn.virtual_fabric_support = 0; 8863 sp->cls1.classValid = 0; 8864 sp->cls2.seqDelivery = 1; 8865 sp->cls3.seqDelivery = 1; 8866 8867 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 8868 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 8869 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 8870 pcmd += sizeof(uint32_t); /* Port Name */ 8871 memcpy(pcmd, &vport->fc_portname, 8); 8872 pcmd += sizeof(uint32_t); /* Node Name */ 8873 pcmd += sizeof(uint32_t); /* Node Name */ 8874 memcpy(pcmd, &vport->fc_nodename, 8); 8875 sp->cmn.valid_vendor_ver_level = 0; 8876 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 8877 lpfc_set_disctmo(vport); 8878 8879 phba->fc_stat.elsXmitFDISC++; 8880 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 8881 8882 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8883 "Issue FDISC: did:x%x", 8884 did, 0, 0); 8885 8886 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 8887 if (rc == IOCB_ERROR) { 8888 lpfc_els_free_iocb(phba, elsiocb); 8889 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8890 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8891 "0256 Issue FDISC: Cannot send IOCB\n"); 8892 return 1; 8893 } 8894 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 8895 return 0; 8896 } 8897 8898 /** 8899 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 8900 * @phba: pointer to lpfc hba data structure. 8901 * @cmdiocb: pointer to lpfc command iocb data structure. 8902 * @rspiocb: pointer to lpfc response iocb data structure. 8903 * 8904 * This routine is the completion callback function to the issuing of a LOGO 8905 * ELS command off a vport. It frees the command IOCB and then decrement the 8906 * reference count held on ndlp for this completion function, indicating that 8907 * the reference to the ndlp is no long needed. Note that the 8908 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 8909 * callback function and an additional explicit ndlp reference decrementation 8910 * will trigger the actual release of the ndlp. 8911 **/ 8912 static void 8913 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 8914 struct lpfc_iocbq *rspiocb) 8915 { 8916 struct lpfc_vport *vport = cmdiocb->vport; 8917 IOCB_t *irsp; 8918 struct lpfc_nodelist *ndlp; 8919 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8920 8921 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 8922 irsp = &rspiocb->iocb; 8923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8924 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 8925 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 8926 8927 lpfc_els_free_iocb(phba, cmdiocb); 8928 vport->unreg_vpi_cmpl = VPORT_ERROR; 8929 8930 /* Trigger the release of the ndlp after logo */ 8931 lpfc_nlp_put(ndlp); 8932 8933 /* NPIV LOGO completes to NPort <nlp_DID> */ 8934 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8935 "2928 NPIV LOGO completes to NPort x%x " 8936 "Data: x%x x%x x%x x%x\n", 8937 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 8938 irsp->ulpTimeout, vport->num_disc_nodes); 8939 8940 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 8941 spin_lock_irq(shost->host_lock); 8942 vport->fc_flag &= ~FC_NDISC_ACTIVE; 8943 vport->fc_flag &= ~FC_FABRIC; 8944 spin_unlock_irq(shost->host_lock); 8945 lpfc_can_disctmo(vport); 8946 } 8947 } 8948 8949 /** 8950 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 8951 * @vport: pointer to a virtual N_Port data structure. 8952 * @ndlp: pointer to a node-list data structure. 8953 * 8954 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 8955 * 8956 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 8957 * will be incremented by 1 for holding the ndlp and the reference to ndlp 8958 * will be stored into the context1 field of the IOCB for the completion 8959 * callback function to the LOGO ELS command. 8960 * 8961 * Return codes 8962 * 0 - Successfully issued logo off the @vport 8963 * 1 - Failed to issue logo off the @vport 8964 **/ 8965 int 8966 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 8967 { 8968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8969 struct lpfc_hba *phba = vport->phba; 8970 struct lpfc_iocbq *elsiocb; 8971 uint8_t *pcmd; 8972 uint16_t cmdsize; 8973 8974 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 8975 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 8976 ELS_CMD_LOGO); 8977 if (!elsiocb) 8978 return 1; 8979 8980 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8981 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 8982 pcmd += sizeof(uint32_t); 8983 8984 /* Fill in LOGO payload */ 8985 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 8986 pcmd += sizeof(uint32_t); 8987 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 8988 8989 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8990 "Issue LOGO npiv did:x%x flg:x%x", 8991 ndlp->nlp_DID, ndlp->nlp_flag, 0); 8992 8993 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 8994 spin_lock_irq(shost->host_lock); 8995 ndlp->nlp_flag |= NLP_LOGO_SND; 8996 spin_unlock_irq(shost->host_lock); 8997 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 8998 IOCB_ERROR) { 8999 spin_lock_irq(shost->host_lock); 9000 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9001 spin_unlock_irq(shost->host_lock); 9002 lpfc_els_free_iocb(phba, elsiocb); 9003 return 1; 9004 } 9005 return 0; 9006 } 9007 9008 /** 9009 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9010 * @ptr: holder for the timer function associated data. 9011 * 9012 * This routine is invoked by the fabric iocb block timer after 9013 * timeout. It posts the fabric iocb block timeout event by setting the 9014 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9015 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9016 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9017 * posted event WORKER_FABRIC_BLOCK_TMO. 9018 **/ 9019 void 9020 lpfc_fabric_block_timeout(unsigned long ptr) 9021 { 9022 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 9023 unsigned long iflags; 9024 uint32_t tmo_posted; 9025 9026 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9027 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 9028 if (!tmo_posted) 9029 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 9030 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9031 9032 if (!tmo_posted) 9033 lpfc_worker_wake_up(phba); 9034 return; 9035 } 9036 9037 /** 9038 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 9039 * @phba: pointer to lpfc hba data structure. 9040 * 9041 * This routine issues one fabric iocb from the driver internal list to 9042 * the HBA. It first checks whether it's ready to issue one fabric iocb to 9043 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 9044 * remove one pending fabric iocb from the driver internal list and invokes 9045 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 9046 **/ 9047 static void 9048 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 9049 { 9050 struct lpfc_iocbq *iocb; 9051 unsigned long iflags; 9052 int ret; 9053 IOCB_t *cmd; 9054 9055 repeat: 9056 iocb = NULL; 9057 spin_lock_irqsave(&phba->hbalock, iflags); 9058 /* Post any pending iocb to the SLI layer */ 9059 if (atomic_read(&phba->fabric_iocb_count) == 0) { 9060 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 9061 list); 9062 if (iocb) 9063 /* Increment fabric iocb count to hold the position */ 9064 atomic_inc(&phba->fabric_iocb_count); 9065 } 9066 spin_unlock_irqrestore(&phba->hbalock, iflags); 9067 if (iocb) { 9068 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9069 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9070 iocb->iocb_flag |= LPFC_IO_FABRIC; 9071 9072 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9073 "Fabric sched1: ste:x%x", 9074 iocb->vport->port_state, 0, 0); 9075 9076 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9077 9078 if (ret == IOCB_ERROR) { 9079 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9080 iocb->fabric_iocb_cmpl = NULL; 9081 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9082 cmd = &iocb->iocb; 9083 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 9084 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 9085 iocb->iocb_cmpl(phba, iocb, iocb); 9086 9087 atomic_dec(&phba->fabric_iocb_count); 9088 goto repeat; 9089 } 9090 } 9091 9092 return; 9093 } 9094 9095 /** 9096 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 9097 * @phba: pointer to lpfc hba data structure. 9098 * 9099 * This routine unblocks the issuing fabric iocb command. The function 9100 * will clear the fabric iocb block bit and then invoke the routine 9101 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 9102 * from the driver internal fabric iocb list. 9103 **/ 9104 void 9105 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 9106 { 9107 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9108 9109 lpfc_resume_fabric_iocbs(phba); 9110 return; 9111 } 9112 9113 /** 9114 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 9115 * @phba: pointer to lpfc hba data structure. 9116 * 9117 * This routine blocks the issuing fabric iocb for a specified amount of 9118 * time (currently 100 ms). This is done by set the fabric iocb block bit 9119 * and set up a timeout timer for 100ms. When the block bit is set, no more 9120 * fabric iocb will be issued out of the HBA. 9121 **/ 9122 static void 9123 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 9124 { 9125 int blocked; 9126 9127 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9128 /* Start a timer to unblock fabric iocbs after 100ms */ 9129 if (!blocked) 9130 mod_timer(&phba->fabric_block_timer, 9131 jiffies + msecs_to_jiffies(100)); 9132 9133 return; 9134 } 9135 9136 /** 9137 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 9138 * @phba: pointer to lpfc hba data structure. 9139 * @cmdiocb: pointer to lpfc command iocb data structure. 9140 * @rspiocb: pointer to lpfc response iocb data structure. 9141 * 9142 * This routine is the callback function that is put to the fabric iocb's 9143 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 9144 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 9145 * function first restores and invokes the original iocb's callback function 9146 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 9147 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 9148 **/ 9149 static void 9150 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9151 struct lpfc_iocbq *rspiocb) 9152 { 9153 struct ls_rjt stat; 9154 9155 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 9156 9157 switch (rspiocb->iocb.ulpStatus) { 9158 case IOSTAT_NPORT_RJT: 9159 case IOSTAT_FABRIC_RJT: 9160 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 9161 lpfc_block_fabric_iocbs(phba); 9162 } 9163 break; 9164 9165 case IOSTAT_NPORT_BSY: 9166 case IOSTAT_FABRIC_BSY: 9167 lpfc_block_fabric_iocbs(phba); 9168 break; 9169 9170 case IOSTAT_LS_RJT: 9171 stat.un.lsRjtError = 9172 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 9173 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 9174 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 9175 lpfc_block_fabric_iocbs(phba); 9176 break; 9177 } 9178 9179 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 9180 9181 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 9182 cmdiocb->fabric_iocb_cmpl = NULL; 9183 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 9184 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 9185 9186 atomic_dec(&phba->fabric_iocb_count); 9187 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 9188 /* Post any pending iocbs to HBA */ 9189 lpfc_resume_fabric_iocbs(phba); 9190 } 9191 } 9192 9193 /** 9194 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 9195 * @phba: pointer to lpfc hba data structure. 9196 * @iocb: pointer to lpfc command iocb data structure. 9197 * 9198 * This routine is used as the top-level API for issuing a fabric iocb command 9199 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 9200 * function makes sure that only one fabric bound iocb will be outstanding at 9201 * any given time. As such, this function will first check to see whether there 9202 * is already an outstanding fabric iocb on the wire. If so, it will put the 9203 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 9204 * issued later. Otherwise, it will issue the iocb on the wire and update the 9205 * fabric iocb count it indicate that there is one fabric iocb on the wire. 9206 * 9207 * Note, this implementation has a potential sending out fabric IOCBs out of 9208 * order. The problem is caused by the construction of the "ready" boolen does 9209 * not include the condition that the internal fabric IOCB list is empty. As 9210 * such, it is possible a fabric IOCB issued by this routine might be "jump" 9211 * ahead of the fabric IOCBs in the internal list. 9212 * 9213 * Return code 9214 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 9215 * IOCB_ERROR - failed to issue fabric iocb 9216 **/ 9217 static int 9218 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 9219 { 9220 unsigned long iflags; 9221 int ready; 9222 int ret; 9223 9224 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 9225 9226 spin_lock_irqsave(&phba->hbalock, iflags); 9227 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 9228 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9229 9230 if (ready) 9231 /* Increment fabric iocb count to hold the position */ 9232 atomic_inc(&phba->fabric_iocb_count); 9233 spin_unlock_irqrestore(&phba->hbalock, iflags); 9234 if (ready) { 9235 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9236 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9237 iocb->iocb_flag |= LPFC_IO_FABRIC; 9238 9239 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9240 "Fabric sched2: ste:x%x", 9241 iocb->vport->port_state, 0, 0); 9242 9243 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9244 9245 if (ret == IOCB_ERROR) { 9246 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9247 iocb->fabric_iocb_cmpl = NULL; 9248 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9249 atomic_dec(&phba->fabric_iocb_count); 9250 } 9251 } else { 9252 spin_lock_irqsave(&phba->hbalock, iflags); 9253 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 9254 spin_unlock_irqrestore(&phba->hbalock, iflags); 9255 ret = IOCB_SUCCESS; 9256 } 9257 return ret; 9258 } 9259 9260 /** 9261 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 9262 * @vport: pointer to a virtual N_Port data structure. 9263 * 9264 * This routine aborts all the IOCBs associated with a @vport from the 9265 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 9266 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 9267 * list, removes each IOCB associated with the @vport off the list, set the 9268 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 9269 * associated with the IOCB. 9270 **/ 9271 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 9272 { 9273 LIST_HEAD(completions); 9274 struct lpfc_hba *phba = vport->phba; 9275 struct lpfc_iocbq *tmp_iocb, *piocb; 9276 9277 spin_lock_irq(&phba->hbalock); 9278 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 9279 list) { 9280 9281 if (piocb->vport != vport) 9282 continue; 9283 9284 list_move_tail(&piocb->list, &completions); 9285 } 9286 spin_unlock_irq(&phba->hbalock); 9287 9288 /* Cancel all the IOCBs from the completions list */ 9289 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9290 IOERR_SLI_ABORTED); 9291 } 9292 9293 /** 9294 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 9295 * @ndlp: pointer to a node-list data structure. 9296 * 9297 * This routine aborts all the IOCBs associated with an @ndlp from the 9298 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 9299 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 9300 * list, removes each IOCB associated with the @ndlp off the list, set the 9301 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 9302 * associated with the IOCB. 9303 **/ 9304 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 9305 { 9306 LIST_HEAD(completions); 9307 struct lpfc_hba *phba = ndlp->phba; 9308 struct lpfc_iocbq *tmp_iocb, *piocb; 9309 struct lpfc_sli_ring *pring; 9310 9311 pring = lpfc_phba_elsring(phba); 9312 9313 spin_lock_irq(&phba->hbalock); 9314 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 9315 list) { 9316 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 9317 9318 list_move_tail(&piocb->list, &completions); 9319 } 9320 } 9321 spin_unlock_irq(&phba->hbalock); 9322 9323 /* Cancel all the IOCBs from the completions list */ 9324 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9325 IOERR_SLI_ABORTED); 9326 } 9327 9328 /** 9329 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 9330 * @phba: pointer to lpfc hba data structure. 9331 * 9332 * This routine aborts all the IOCBs currently on the driver internal 9333 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 9334 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 9335 * list, removes IOCBs off the list, set the status feild to 9336 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 9337 * the IOCB. 9338 **/ 9339 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 9340 { 9341 LIST_HEAD(completions); 9342 9343 spin_lock_irq(&phba->hbalock); 9344 list_splice_init(&phba->fabric_iocb_list, &completions); 9345 spin_unlock_irq(&phba->hbalock); 9346 9347 /* Cancel all the IOCBs from the completions list */ 9348 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9349 IOERR_SLI_ABORTED); 9350 } 9351 9352 /** 9353 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 9354 * @vport: pointer to lpfc vport data structure. 9355 * 9356 * This routine is invoked by the vport cleanup for deletions and the cleanup 9357 * for an ndlp on removal. 9358 **/ 9359 void 9360 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 9361 { 9362 struct lpfc_hba *phba = vport->phba; 9363 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 9364 unsigned long iflag = 0; 9365 9366 spin_lock_irqsave(&phba->hbalock, iflag); 9367 spin_lock(&phba->sli4_hba.sgl_list_lock); 9368 list_for_each_entry_safe(sglq_entry, sglq_next, 9369 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 9370 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 9371 sglq_entry->ndlp = NULL; 9372 } 9373 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9374 spin_unlock_irqrestore(&phba->hbalock, iflag); 9375 return; 9376 } 9377 9378 /** 9379 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 9380 * @phba: pointer to lpfc hba data structure. 9381 * @axri: pointer to the els xri abort wcqe structure. 9382 * 9383 * This routine is invoked by the worker thread to process a SLI4 slow-path 9384 * ELS aborted xri. 9385 **/ 9386 void 9387 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 9388 struct sli4_wcqe_xri_aborted *axri) 9389 { 9390 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 9391 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 9392 uint16_t lxri = 0; 9393 9394 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 9395 unsigned long iflag = 0; 9396 struct lpfc_nodelist *ndlp; 9397 struct lpfc_sli_ring *pring; 9398 9399 pring = lpfc_phba_elsring(phba); 9400 9401 spin_lock_irqsave(&phba->hbalock, iflag); 9402 spin_lock(&phba->sli4_hba.sgl_list_lock); 9403 list_for_each_entry_safe(sglq_entry, sglq_next, 9404 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 9405 if (sglq_entry->sli4_xritag == xri) { 9406 list_del(&sglq_entry->list); 9407 ndlp = sglq_entry->ndlp; 9408 sglq_entry->ndlp = NULL; 9409 list_add_tail(&sglq_entry->list, 9410 &phba->sli4_hba.lpfc_els_sgl_list); 9411 sglq_entry->state = SGL_FREED; 9412 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9413 spin_unlock_irqrestore(&phba->hbalock, iflag); 9414 lpfc_set_rrq_active(phba, ndlp, 9415 sglq_entry->sli4_lxritag, 9416 rxid, 1); 9417 9418 /* Check if TXQ queue needs to be serviced */ 9419 if (!(list_empty(&pring->txq))) 9420 lpfc_worker_wake_up(phba); 9421 return; 9422 } 9423 } 9424 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9425 lxri = lpfc_sli4_xri_inrange(phba, xri); 9426 if (lxri == NO_XRI) { 9427 spin_unlock_irqrestore(&phba->hbalock, iflag); 9428 return; 9429 } 9430 spin_lock(&phba->sli4_hba.sgl_list_lock); 9431 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 9432 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 9433 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9434 spin_unlock_irqrestore(&phba->hbalock, iflag); 9435 return; 9436 } 9437 sglq_entry->state = SGL_XRI_ABORTED; 9438 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9439 spin_unlock_irqrestore(&phba->hbalock, iflag); 9440 return; 9441 } 9442 9443 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 9444 * @vport: pointer to virtual port object. 9445 * @ndlp: nodelist pointer for the impacted node. 9446 * 9447 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 9448 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 9449 * the driver is required to send a LOGO to the remote node before it 9450 * attempts to recover its login to the remote node. 9451 */ 9452 void 9453 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 9454 struct lpfc_nodelist *ndlp) 9455 { 9456 struct Scsi_Host *shost; 9457 struct lpfc_hba *phba; 9458 unsigned long flags = 0; 9459 9460 shost = lpfc_shost_from_vport(vport); 9461 phba = vport->phba; 9462 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 9463 lpfc_printf_log(phba, KERN_INFO, 9464 LOG_SLI, "3093 No rport recovery needed. " 9465 "rport in state 0x%x\n", ndlp->nlp_state); 9466 return; 9467 } 9468 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9469 "3094 Start rport recovery on shost id 0x%x " 9470 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 9471 "flags 0x%x\n", 9472 shost->host_no, ndlp->nlp_DID, 9473 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 9474 ndlp->nlp_flag); 9475 /* 9476 * The rport is not responding. Remove the FCP-2 flag to prevent 9477 * an ADISC in the follow-up recovery code. 9478 */ 9479 spin_lock_irqsave(shost->host_lock, flags); 9480 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 9481 spin_unlock_irqrestore(shost->host_lock, flags); 9482 lpfc_issue_els_logo(vport, ndlp, 0); 9483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 9484 } 9485 9486