1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * context1 of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->context2 = pcmd; 283 elsiocb->context3 = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_dmabuf *mp; 349 struct lpfc_nodelist *ndlp; 350 struct serv_parm *sp; 351 int rc; 352 int err = 0; 353 354 sp = &phba->fc_fabparam; 355 ndlp = lpfc_findnode_did(vport, Fabric_DID); 356 if (!ndlp) { 357 err = 1; 358 goto fail; 359 } 360 361 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 362 if (!mbox) { 363 err = 2; 364 goto fail; 365 } 366 367 vport->port_state = LPFC_FABRIC_CFG_LINK; 368 lpfc_config_link(phba, mbox); 369 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 370 mbox->vport = vport; 371 372 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 373 if (rc == MBX_NOT_FINISHED) { 374 err = 3; 375 goto fail_free_mbox; 376 } 377 378 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 379 if (!mbox) { 380 err = 4; 381 goto fail; 382 } 383 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 384 ndlp->nlp_rpi); 385 if (rc) { 386 err = 5; 387 goto fail_free_mbox; 388 } 389 390 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 391 mbox->vport = vport; 392 /* increment the reference count on ndlp to hold reference 393 * for the callback routine. 394 */ 395 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 396 if (!mbox->ctx_ndlp) { 397 err = 6; 398 goto fail_no_ndlp; 399 } 400 401 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 402 if (rc == MBX_NOT_FINISHED) { 403 err = 7; 404 goto fail_issue_reg_login; 405 } 406 407 return 0; 408 409 fail_issue_reg_login: 410 /* decrement the reference count on ndlp just incremented 411 * for the failed mbox command. 412 */ 413 lpfc_nlp_put(ndlp); 414 fail_no_ndlp: 415 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 fail_free_mbox: 419 mempool_free(mbox, phba->mbox_mem_pool); 420 421 fail: 422 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 424 "0249 Cannot issue Register Fabric login: Err %d\n", 425 err); 426 return -ENXIO; 427 } 428 429 /** 430 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 431 * @vport: pointer to a host virtual N_Port data structure. 432 * 433 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 434 * the @vport. This mailbox command is necessary for SLI4 port only. 435 * 436 * Return code 437 * 0 - successfully issued REG_VFI for @vport 438 * A failure code otherwise. 439 **/ 440 int 441 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 442 { 443 struct lpfc_hba *phba = vport->phba; 444 LPFC_MBOXQ_t *mboxq = NULL; 445 struct lpfc_nodelist *ndlp; 446 struct lpfc_dmabuf *dmabuf = NULL; 447 int rc = 0; 448 449 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 450 if ((phba->sli_rev == LPFC_SLI_REV4) && 451 !(phba->link_flag & LS_LOOPBACK_MODE) && 452 !(vport->fc_flag & FC_PT2PT)) { 453 ndlp = lpfc_findnode_did(vport, Fabric_DID); 454 if (!ndlp) { 455 rc = -ENODEV; 456 goto fail; 457 } 458 } 459 460 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 461 if (!mboxq) { 462 rc = -ENOMEM; 463 goto fail; 464 } 465 466 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 467 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 468 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 469 if (!dmabuf) { 470 rc = -ENOMEM; 471 goto fail; 472 } 473 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 474 if (!dmabuf->virt) { 475 rc = -ENOMEM; 476 goto fail; 477 } 478 memcpy(dmabuf->virt, &phba->fc_fabparam, 479 sizeof(struct serv_parm)); 480 } 481 482 vport->port_state = LPFC_FABRIC_CFG_LINK; 483 if (dmabuf) 484 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 485 else 486 lpfc_reg_vfi(mboxq, vport, 0); 487 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 489 mboxq->vport = vport; 490 mboxq->ctx_buf = dmabuf; 491 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 492 if (rc == MBX_NOT_FINISHED) { 493 rc = -ENXIO; 494 goto fail; 495 } 496 return 0; 497 498 fail: 499 if (mboxq) 500 mempool_free(mboxq, phba->mbox_mem_pool); 501 if (dmabuf) { 502 if (dmabuf->virt) 503 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 504 kfree(dmabuf); 505 } 506 507 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 508 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 509 "0289 Issue Register VFI failed: Err %d\n", rc); 510 return rc; 511 } 512 513 /** 514 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 515 * @vport: pointer to a host virtual N_Port data structure. 516 * 517 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 518 * the @vport. This mailbox command is necessary for SLI4 port only. 519 * 520 * Return code 521 * 0 - successfully issued REG_VFI for @vport 522 * A failure code otherwise. 523 **/ 524 int 525 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 526 { 527 struct lpfc_hba *phba = vport->phba; 528 struct Scsi_Host *shost; 529 LPFC_MBOXQ_t *mboxq; 530 int rc; 531 532 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 533 if (!mboxq) { 534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 535 "2556 UNREG_VFI mbox allocation failed" 536 "HBA state x%x\n", phba->pport->port_state); 537 return -ENOMEM; 538 } 539 540 lpfc_unreg_vfi(mboxq, vport); 541 mboxq->vport = vport; 542 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 543 544 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 545 if (rc == MBX_NOT_FINISHED) { 546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 547 "2557 UNREG_VFI issue mbox failed rc x%x " 548 "HBA state x%x\n", 549 rc, phba->pport->port_state); 550 mempool_free(mboxq, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 554 shost = lpfc_shost_from_vport(vport); 555 spin_lock_irq(shost->host_lock); 556 vport->fc_flag &= ~FC_VFI_REGISTERED; 557 spin_unlock_irq(shost->host_lock); 558 return 0; 559 } 560 561 /** 562 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 563 * @vport: pointer to a host virtual N_Port data structure. 564 * @sp: pointer to service parameter data structure. 565 * 566 * This routine is called from FLOGI/FDISC completion handler functions. 567 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 568 * node nodename is changed in the completion service parameter else return 569 * 0. This function also set flag in the vport data structure to delay 570 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 571 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 572 * node nodename is changed in the completion service parameter. 573 * 574 * Return code 575 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 576 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 577 * 578 **/ 579 static uint8_t 580 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 581 struct serv_parm *sp) 582 { 583 struct lpfc_hba *phba = vport->phba; 584 uint8_t fabric_param_changed = 0; 585 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 586 587 if ((vport->fc_prevDID != vport->fc_myDID) || 588 memcmp(&vport->fabric_portname, &sp->portName, 589 sizeof(struct lpfc_name)) || 590 memcmp(&vport->fabric_nodename, &sp->nodeName, 591 sizeof(struct lpfc_name)) || 592 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 593 fabric_param_changed = 1; 594 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 595 } 596 /* 597 * Word 1 Bit 31 in common service parameter is overloaded. 598 * Word 1 Bit 31 in FLOGI request is multiple NPort request 599 * Word 1 Bit 31 in FLOGI response is clean address bit 600 * 601 * If fabric parameter is changed and clean address bit is 602 * cleared delay nport discovery if 603 * - vport->fc_prevDID != 0 (not initial discovery) OR 604 * - lpfc_delay_discovery module parameter is set. 605 */ 606 if (fabric_param_changed && !sp->cmn.clean_address_bit && 607 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 608 spin_lock_irq(shost->host_lock); 609 vport->fc_flag |= FC_DISC_DELAYED; 610 spin_unlock_irq(shost->host_lock); 611 } 612 613 return fabric_param_changed; 614 } 615 616 617 /** 618 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 619 * @vport: pointer to a host virtual N_Port data structure. 620 * @ndlp: pointer to a node-list data structure. 621 * @sp: pointer to service parameter data structure. 622 * @ulp_word4: command response value 623 * 624 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 625 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 626 * port in a fabric topology. It properly sets up the parameters to the @ndlp 627 * from the IOCB response. It also check the newly assigned N_Port ID to the 628 * @vport against the previously assigned N_Port ID. If it is different from 629 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 630 * is invoked on all the remaining nodes with the @vport to unregister the 631 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 632 * is invoked to register login to the fabric. 633 * 634 * Return code 635 * 0 - Success (currently, always return 0) 636 **/ 637 static int 638 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 639 struct serv_parm *sp, uint32_t ulp_word4) 640 { 641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 642 struct lpfc_hba *phba = vport->phba; 643 struct lpfc_nodelist *np; 644 struct lpfc_nodelist *next_np; 645 uint8_t fabric_param_changed; 646 647 spin_lock_irq(shost->host_lock); 648 vport->fc_flag |= FC_FABRIC; 649 spin_unlock_irq(shost->host_lock); 650 651 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 652 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 653 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 654 655 phba->fc_edtovResol = sp->cmn.edtovResolution; 656 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 657 658 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 659 spin_lock_irq(shost->host_lock); 660 vport->fc_flag |= FC_PUBLIC_LOOP; 661 spin_unlock_irq(shost->host_lock); 662 } 663 664 vport->fc_myDID = ulp_word4 & Mask_DID; 665 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 666 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 667 ndlp->nlp_class_sup = 0; 668 if (sp->cls1.classValid) 669 ndlp->nlp_class_sup |= FC_COS_CLASS1; 670 if (sp->cls2.classValid) 671 ndlp->nlp_class_sup |= FC_COS_CLASS2; 672 if (sp->cls3.classValid) 673 ndlp->nlp_class_sup |= FC_COS_CLASS3; 674 if (sp->cls4.classValid) 675 ndlp->nlp_class_sup |= FC_COS_CLASS4; 676 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 677 sp->cmn.bbRcvSizeLsb; 678 679 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 680 if (fabric_param_changed) { 681 /* Reset FDMI attribute masks based on config parameter */ 682 if (phba->cfg_enable_SmartSAN || 683 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 684 /* Setup appropriate attribute masks */ 685 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 686 if (phba->cfg_enable_SmartSAN) 687 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 688 else 689 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 690 } else { 691 vport->fdmi_hba_mask = 0; 692 vport->fdmi_port_mask = 0; 693 } 694 695 } 696 memcpy(&vport->fabric_portname, &sp->portName, 697 sizeof(struct lpfc_name)); 698 memcpy(&vport->fabric_nodename, &sp->nodeName, 699 sizeof(struct lpfc_name)); 700 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 701 702 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 703 if (sp->cmn.response_multiple_NPort) { 704 lpfc_printf_vlog(vport, KERN_WARNING, 705 LOG_ELS | LOG_VPORT, 706 "1816 FLOGI NPIV supported, " 707 "response data 0x%x\n", 708 sp->cmn.response_multiple_NPort); 709 spin_lock_irq(&phba->hbalock); 710 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 711 spin_unlock_irq(&phba->hbalock); 712 } else { 713 /* Because we asked f/w for NPIV it still expects us 714 to call reg_vnpid at least for the physical host */ 715 lpfc_printf_vlog(vport, KERN_WARNING, 716 LOG_ELS | LOG_VPORT, 717 "1817 Fabric does not support NPIV " 718 "- configuring single port mode.\n"); 719 spin_lock_irq(&phba->hbalock); 720 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 721 spin_unlock_irq(&phba->hbalock); 722 } 723 } 724 725 /* 726 * For FC we need to do some special processing because of the SLI 727 * Port's default settings of the Common Service Parameters. 728 */ 729 if ((phba->sli_rev == LPFC_SLI_REV4) && 730 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 731 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 732 if (fabric_param_changed) 733 lpfc_unregister_fcf_prep(phba); 734 735 /* This should just update the VFI CSPs*/ 736 if (vport->fc_flag & FC_VFI_REGISTERED) 737 lpfc_issue_reg_vfi(vport); 738 } 739 740 if (fabric_param_changed && 741 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 742 743 /* If our NportID changed, we need to ensure all 744 * remaining NPORTs get unreg_login'ed. 745 */ 746 list_for_each_entry_safe(np, next_np, 747 &vport->fc_nodes, nlp_listp) { 748 if ((np->nlp_state != NLP_STE_NPR_NODE) || 749 !(np->nlp_flag & NLP_NPR_ADISC)) 750 continue; 751 spin_lock_irq(&np->lock); 752 np->nlp_flag &= ~NLP_NPR_ADISC; 753 spin_unlock_irq(&np->lock); 754 lpfc_unreg_rpi(vport, np); 755 } 756 lpfc_cleanup_pending_mbox(vport); 757 758 if (phba->sli_rev == LPFC_SLI_REV4) { 759 lpfc_sli4_unreg_all_rpis(vport); 760 lpfc_mbx_unreg_vpi(vport); 761 spin_lock_irq(shost->host_lock); 762 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 763 spin_unlock_irq(shost->host_lock); 764 } 765 766 /* 767 * For SLI3 and SLI4, the VPI needs to be reregistered in 768 * response to this fabric parameter change event. 769 */ 770 spin_lock_irq(shost->host_lock); 771 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 772 spin_unlock_irq(shost->host_lock); 773 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 774 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 775 /* 776 * Driver needs to re-reg VPI in order for f/w 777 * to update the MAC address. 778 */ 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 lpfc_register_new_vport(phba, vport, ndlp); 781 return 0; 782 } 783 784 if (phba->sli_rev < LPFC_SLI_REV4) { 785 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 786 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 787 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 788 lpfc_register_new_vport(phba, vport, ndlp); 789 else 790 lpfc_issue_fabric_reglogin(vport); 791 } else { 792 ndlp->nlp_type |= NLP_FABRIC; 793 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 794 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 795 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 796 lpfc_start_fdiscs(phba); 797 lpfc_do_scr_ns_plogi(phba, vport); 798 } else if (vport->fc_flag & FC_VFI_REGISTERED) 799 lpfc_issue_init_vpi(vport); 800 else { 801 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 802 "3135 Need register VFI: (x%x/%x)\n", 803 vport->fc_prevDID, vport->fc_myDID); 804 lpfc_issue_reg_vfi(vport); 805 } 806 } 807 return 0; 808 } 809 810 /** 811 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 812 * @vport: pointer to a host virtual N_Port data structure. 813 * @ndlp: pointer to a node-list data structure. 814 * @sp: pointer to service parameter data structure. 815 * 816 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 817 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 818 * in a point-to-point topology. First, the @vport's N_Port Name is compared 819 * with the received N_Port Name: if the @vport's N_Port Name is greater than 820 * the received N_Port Name lexicographically, this node shall assign local 821 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 822 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 823 * this node shall just wait for the remote node to issue PLOGI and assign 824 * N_Port IDs. 825 * 826 * Return code 827 * 0 - Success 828 * -ENXIO - Fail 829 **/ 830 static int 831 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 832 struct serv_parm *sp) 833 { 834 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 835 struct lpfc_hba *phba = vport->phba; 836 LPFC_MBOXQ_t *mbox; 837 int rc; 838 839 spin_lock_irq(shost->host_lock); 840 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 841 vport->fc_flag |= FC_PT2PT; 842 spin_unlock_irq(shost->host_lock); 843 844 /* If we are pt2pt with another NPort, force NPIV off! */ 845 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 846 847 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 848 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 849 lpfc_unregister_fcf_prep(phba); 850 851 spin_lock_irq(shost->host_lock); 852 vport->fc_flag &= ~FC_VFI_REGISTERED; 853 spin_unlock_irq(shost->host_lock); 854 phba->fc_topology_changed = 0; 855 } 856 857 rc = memcmp(&vport->fc_portname, &sp->portName, 858 sizeof(vport->fc_portname)); 859 860 if (rc >= 0) { 861 /* This side will initiate the PLOGI */ 862 spin_lock_irq(shost->host_lock); 863 vport->fc_flag |= FC_PT2PT_PLOGI; 864 spin_unlock_irq(shost->host_lock); 865 866 /* 867 * N_Port ID cannot be 0, set our Id to LocalID 868 * the other side will be RemoteID. 869 */ 870 871 /* not equal */ 872 if (rc) 873 vport->fc_myDID = PT2PT_LocalID; 874 875 /* If not registered with a transport, decrement ndlp reference 876 * count indicating that ndlp can be safely released when other 877 * references are removed. 878 */ 879 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 880 lpfc_nlp_put(ndlp); 881 882 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 883 if (!ndlp) { 884 /* 885 * Cannot find existing Fabric ndlp, so allocate a 886 * new one 887 */ 888 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 889 if (!ndlp) 890 goto fail; 891 } 892 893 memcpy(&ndlp->nlp_portname, &sp->portName, 894 sizeof(struct lpfc_name)); 895 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 896 sizeof(struct lpfc_name)); 897 /* Set state will put ndlp onto node list if not already done */ 898 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 899 spin_lock_irq(&ndlp->lock); 900 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 901 spin_unlock_irq(&ndlp->lock); 902 903 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 904 if (!mbox) 905 goto fail; 906 907 lpfc_config_link(phba, mbox); 908 909 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 910 mbox->vport = vport; 911 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 912 if (rc == MBX_NOT_FINISHED) { 913 mempool_free(mbox, phba->mbox_mem_pool); 914 goto fail; 915 } 916 } else { 917 /* This side will wait for the PLOGI. If not registered with 918 * a transport, decrement node reference count indicating that 919 * ndlp can be released when other references are removed. 920 */ 921 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 922 lpfc_nlp_put(ndlp); 923 924 /* Start discovery - this should just do CLEAR_LA */ 925 lpfc_disc_start(vport); 926 } 927 928 return 0; 929 fail: 930 return -ENXIO; 931 } 932 933 /** 934 * lpfc_cmpl_els_flogi - Completion callback function for flogi 935 * @phba: pointer to lpfc hba data structure. 936 * @cmdiocb: pointer to lpfc command iocb data structure. 937 * @rspiocb: pointer to lpfc response iocb data structure. 938 * 939 * This routine is the top-level completion callback function for issuing 940 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 941 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 942 * retry has been made (either immediately or delayed with lpfc_els_retry() 943 * returning 1), the command IOCB will be released and function returned. 944 * If the retry attempt has been given up (possibly reach the maximum 945 * number of retries), one additional decrement of ndlp reference shall be 946 * invoked before going out after releasing the command IOCB. This will 947 * actually release the remote node (Note, lpfc_els_free_iocb() will also 948 * invoke one decrement of ndlp reference count). If no error reported in 949 * the IOCB status, the command Port ID field is used to determine whether 950 * this is a point-to-point topology or a fabric topology: if the Port ID 951 * field is assigned, it is a fabric topology; otherwise, it is a 952 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 953 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 954 * specific topology completion conditions. 955 **/ 956 static void 957 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 958 struct lpfc_iocbq *rspiocb) 959 { 960 struct lpfc_vport *vport = cmdiocb->vport; 961 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 962 struct lpfc_nodelist *ndlp = cmdiocb->context1; 963 IOCB_t *irsp; 964 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 965 struct serv_parm *sp; 966 uint16_t fcf_index; 967 int rc; 968 u32 ulp_status, ulp_word4, tmo; 969 970 /* Check to see if link went down during discovery */ 971 if (lpfc_els_chk_latt(vport)) { 972 /* One additional decrement on node reference count to 973 * trigger the release of the node 974 */ 975 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 976 lpfc_nlp_put(ndlp); 977 goto out; 978 } 979 980 ulp_status = get_job_ulpstatus(phba, rspiocb); 981 ulp_word4 = get_job_word4(phba, rspiocb); 982 983 if (phba->sli_rev == LPFC_SLI_REV4) { 984 tmo = get_wqe_tmo(cmdiocb); 985 } else { 986 irsp = &rspiocb->iocb; 987 tmo = irsp->ulpTimeout; 988 } 989 990 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 991 "FLOGI cmpl: status:x%x/x%x state:x%x", 992 ulp_status, ulp_word4, 993 vport->port_state); 994 995 if (ulp_status) { 996 /* 997 * In case of FIP mode, perform roundrobin FCF failover 998 * due to new FCF discovery 999 */ 1000 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1001 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1002 if (phba->link_state < LPFC_LINK_UP) 1003 goto stop_rr_fcf_flogi; 1004 if ((phba->fcoe_cvl_eventtag_attn == 1005 phba->fcoe_cvl_eventtag) && 1006 (ulp_status == IOSTAT_LOCAL_REJECT) && 1007 ((ulp_word4 & IOERR_PARAM_MASK) == 1008 IOERR_SLI_ABORTED)) 1009 goto stop_rr_fcf_flogi; 1010 else 1011 phba->fcoe_cvl_eventtag_attn = 1012 phba->fcoe_cvl_eventtag; 1013 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1014 "2611 FLOGI failed on FCF (x%x), " 1015 "status:x%x/x%x, tmo:x%x, perform " 1016 "roundrobin FCF failover\n", 1017 phba->fcf.current_rec.fcf_indx, 1018 ulp_status, ulp_word4, tmo); 1019 lpfc_sli4_set_fcf_flogi_fail(phba, 1020 phba->fcf.current_rec.fcf_indx); 1021 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1022 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1023 if (rc) 1024 goto out; 1025 } 1026 1027 stop_rr_fcf_flogi: 1028 /* FLOGI failure */ 1029 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1030 ((ulp_word4 & IOERR_PARAM_MASK) == 1031 IOERR_LOOP_OPEN_FAILURE))) 1032 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1033 "2858 FLOGI failure Status:x%x/x%x TMO" 1034 ":x%x Data x%x x%x\n", 1035 ulp_status, ulp_word4, tmo, 1036 phba->hba_flag, phba->fcf.fcf_flag); 1037 1038 /* Check for retry */ 1039 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1040 goto out; 1041 1042 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1043 "0150 FLOGI failure Status:x%x/x%x " 1044 "xri x%x TMO:x%x refcnt %d\n", 1045 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1046 tmo, kref_read(&ndlp->kref)); 1047 1048 /* If this is not a loop open failure, bail out */ 1049 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1050 ((ulp_word4 & IOERR_PARAM_MASK) == 1051 IOERR_LOOP_OPEN_FAILURE))) { 1052 /* FLOGI failure */ 1053 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1054 "0100 FLOGI failure Status:x%x/x%x " 1055 "TMO:x%x\n", 1056 ulp_status, ulp_word4, tmo); 1057 goto flogifail; 1058 } 1059 1060 /* FLOGI failed, so there is no fabric */ 1061 spin_lock_irq(shost->host_lock); 1062 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1063 FC_PT2PT_NO_NVME); 1064 spin_unlock_irq(shost->host_lock); 1065 1066 /* If private loop, then allow max outstanding els to be 1067 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1068 * alpa map would take too long otherwise. 1069 */ 1070 if (phba->alpa_map[0] == 0) 1071 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1072 if ((phba->sli_rev == LPFC_SLI_REV4) && 1073 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1074 (vport->fc_prevDID != vport->fc_myDID) || 1075 phba->fc_topology_changed)) { 1076 if (vport->fc_flag & FC_VFI_REGISTERED) { 1077 if (phba->fc_topology_changed) { 1078 lpfc_unregister_fcf_prep(phba); 1079 spin_lock_irq(shost->host_lock); 1080 vport->fc_flag &= ~FC_VFI_REGISTERED; 1081 spin_unlock_irq(shost->host_lock); 1082 phba->fc_topology_changed = 0; 1083 } else { 1084 lpfc_sli4_unreg_all_rpis(vport); 1085 } 1086 } 1087 1088 /* Do not register VFI if the driver aborted FLOGI */ 1089 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 1090 lpfc_issue_reg_vfi(vport); 1091 1092 lpfc_nlp_put(ndlp); 1093 goto out; 1094 } 1095 goto flogifail; 1096 } 1097 spin_lock_irq(shost->host_lock); 1098 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1099 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1100 spin_unlock_irq(shost->host_lock); 1101 1102 /* 1103 * The FLogI succeeded. Sync the data for the CPU before 1104 * accessing it. 1105 */ 1106 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1107 if (!prsp) 1108 goto out; 1109 sp = prsp->virt + sizeof(uint32_t); 1110 1111 /* FLOGI completes successfully */ 1112 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1113 "0101 FLOGI completes successfully, I/O tag:x%x " 1114 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1115 cmdiocb->iotag, cmdiocb->sli4_xritag, 1116 ulp_word4, sp->cmn.e_d_tov, 1117 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1118 vport->port_state, vport->fc_flag, 1119 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1120 1121 if (sp->cmn.priority_tagging) 1122 vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; 1123 1124 if (vport->port_state == LPFC_FLOGI) { 1125 /* 1126 * If Common Service Parameters indicate Nport 1127 * we are point to point, if Fport we are Fabric. 1128 */ 1129 if (sp->cmn.fPort) 1130 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1131 ulp_word4); 1132 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1133 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1134 else { 1135 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1136 "2831 FLOGI response with cleared Fabric " 1137 "bit fcf_index 0x%x " 1138 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1139 "Fabric Name " 1140 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1141 phba->fcf.current_rec.fcf_indx, 1142 phba->fcf.current_rec.switch_name[0], 1143 phba->fcf.current_rec.switch_name[1], 1144 phba->fcf.current_rec.switch_name[2], 1145 phba->fcf.current_rec.switch_name[3], 1146 phba->fcf.current_rec.switch_name[4], 1147 phba->fcf.current_rec.switch_name[5], 1148 phba->fcf.current_rec.switch_name[6], 1149 phba->fcf.current_rec.switch_name[7], 1150 phba->fcf.current_rec.fabric_name[0], 1151 phba->fcf.current_rec.fabric_name[1], 1152 phba->fcf.current_rec.fabric_name[2], 1153 phba->fcf.current_rec.fabric_name[3], 1154 phba->fcf.current_rec.fabric_name[4], 1155 phba->fcf.current_rec.fabric_name[5], 1156 phba->fcf.current_rec.fabric_name[6], 1157 phba->fcf.current_rec.fabric_name[7]); 1158 1159 lpfc_nlp_put(ndlp); 1160 spin_lock_irq(&phba->hbalock); 1161 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1162 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1163 spin_unlock_irq(&phba->hbalock); 1164 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1165 goto out; 1166 } 1167 if (!rc) { 1168 /* Mark the FCF discovery process done */ 1169 if (phba->hba_flag & HBA_FIP_SUPPORT) 1170 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1171 LOG_ELS, 1172 "2769 FLOGI to FCF (x%x) " 1173 "completed successfully\n", 1174 phba->fcf.current_rec.fcf_indx); 1175 spin_lock_irq(&phba->hbalock); 1176 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1177 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1178 spin_unlock_irq(&phba->hbalock); 1179 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1180 goto out; 1181 } 1182 } else if (vport->port_state > LPFC_FLOGI && 1183 vport->fc_flag & FC_PT2PT) { 1184 /* 1185 * In a p2p topology, it is possible that discovery has 1186 * already progressed, and this completion can be ignored. 1187 * Recheck the indicated topology. 1188 */ 1189 if (!sp->cmn.fPort) 1190 goto out; 1191 } 1192 1193 flogifail: 1194 spin_lock_irq(&phba->hbalock); 1195 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1196 spin_unlock_irq(&phba->hbalock); 1197 1198 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) { 1199 /* FLOGI failed, so just use loop map to make discovery list */ 1200 lpfc_disc_list_loopmap(vport); 1201 1202 /* Start discovery */ 1203 lpfc_disc_start(vport); 1204 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1205 (((ulp_word4 & IOERR_PARAM_MASK) != 1206 IOERR_SLI_ABORTED) && 1207 ((ulp_word4 & IOERR_PARAM_MASK) != 1208 IOERR_SLI_DOWN))) && 1209 (phba->link_state != LPFC_CLEAR_LA)) { 1210 /* If FLOGI failed enable link interrupt. */ 1211 lpfc_issue_clear_la(phba, vport); 1212 } 1213 out: 1214 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1215 lpfc_els_free_iocb(phba, cmdiocb); 1216 lpfc_nlp_put(ndlp); 1217 } 1218 1219 /** 1220 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1221 * aborted during a link down 1222 * @phba: pointer to lpfc hba data structure. 1223 * @cmdiocb: pointer to lpfc command iocb data structure. 1224 * @rspiocb: pointer to lpfc response iocb data structure. 1225 * 1226 */ 1227 static void 1228 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1229 struct lpfc_iocbq *rspiocb) 1230 { 1231 uint32_t *pcmd; 1232 uint32_t cmd; 1233 u32 ulp_status, ulp_word4; 1234 1235 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1236 cmd = *pcmd; 1237 1238 ulp_status = get_job_ulpstatus(phba, rspiocb); 1239 ulp_word4 = get_job_word4(phba, rspiocb); 1240 1241 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1242 "6445 ELS completes after LINK_DOWN: " 1243 " Status %x/%x cmd x%x flg x%x\n", 1244 ulp_status, ulp_word4, cmd, 1245 cmdiocb->cmd_flag); 1246 1247 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1248 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1249 atomic_dec(&phba->fabric_iocb_count); 1250 } 1251 lpfc_els_free_iocb(phba, cmdiocb); 1252 } 1253 1254 /** 1255 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1256 * @vport: pointer to a host virtual N_Port data structure. 1257 * @ndlp: pointer to a node-list data structure. 1258 * @retry: number of retries to the command IOCB. 1259 * 1260 * This routine issues a Fabric Login (FLOGI) Request ELS command 1261 * for a @vport. The initiator service parameters are put into the payload 1262 * of the FLOGI Request IOCB and the top-level callback function pointer 1263 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1264 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1265 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1266 * 1267 * Note that the ndlp reference count will be incremented by 1 for holding the 1268 * ndlp and the reference to ndlp will be stored into the context1 field of 1269 * the IOCB for the completion callback function to the FLOGI ELS command. 1270 * 1271 * Return code 1272 * 0 - successfully issued flogi iocb for @vport 1273 * 1 - failed to issue flogi iocb for @vport 1274 **/ 1275 static int 1276 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1277 uint8_t retry) 1278 { 1279 struct lpfc_hba *phba = vport->phba; 1280 struct serv_parm *sp; 1281 union lpfc_wqe128 *wqe = NULL; 1282 IOCB_t *icmd = NULL; 1283 struct lpfc_iocbq *elsiocb; 1284 struct lpfc_iocbq defer_flogi_acc; 1285 u8 *pcmd, ct; 1286 uint16_t cmdsize; 1287 uint32_t tmo, did; 1288 int rc; 1289 1290 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1291 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1292 ndlp->nlp_DID, ELS_CMD_FLOGI); 1293 1294 if (!elsiocb) 1295 return 1; 1296 1297 wqe = &elsiocb->wqe; 1298 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1299 icmd = &elsiocb->iocb; 1300 1301 /* For FLOGI request, remainder of payload is service parameters */ 1302 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1303 pcmd += sizeof(uint32_t); 1304 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1305 sp = (struct serv_parm *) pcmd; 1306 1307 /* Setup CSPs accordingly for Fabric */ 1308 sp->cmn.e_d_tov = 0; 1309 sp->cmn.w2.r_a_tov = 0; 1310 sp->cmn.virtual_fabric_support = 0; 1311 sp->cls1.classValid = 0; 1312 if (sp->cmn.fcphLow < FC_PH3) 1313 sp->cmn.fcphLow = FC_PH3; 1314 if (sp->cmn.fcphHigh < FC_PH3) 1315 sp->cmn.fcphHigh = FC_PH3; 1316 1317 /* Determine if switch supports priority tagging */ 1318 if (phba->cfg_vmid_priority_tagging) { 1319 sp->cmn.priority_tagging = 1; 1320 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1321 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1322 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1323 sizeof(phba->wwpn)); 1324 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1325 sizeof(phba->wwnn)); 1326 } 1327 } 1328 1329 if (phba->sli_rev == LPFC_SLI_REV4) { 1330 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1331 LPFC_SLI_INTF_IF_TYPE_0) { 1332 /* FLOGI needs to be 3 for WQE FCFI */ 1333 ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1); 1334 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1335 1336 /* Set the fcfi to the fcfi we registered with */ 1337 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1338 phba->fcf.fcfi); 1339 } 1340 1341 /* Can't do SLI4 class2 without support sequence coalescing */ 1342 sp->cls2.classValid = 0; 1343 sp->cls2.seqDelivery = 0; 1344 } else { 1345 /* Historical, setting sequential-delivery bit for SLI3 */ 1346 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1347 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1348 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1349 sp->cmn.request_multiple_Nport = 1; 1350 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1351 icmd->ulpCt_h = 1; 1352 icmd->ulpCt_l = 0; 1353 } else { 1354 sp->cmn.request_multiple_Nport = 0; 1355 } 1356 1357 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1358 icmd->un.elsreq64.myID = 0; 1359 icmd->un.elsreq64.fl = 1; 1360 } 1361 } 1362 1363 tmo = phba->fc_ratov; 1364 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1365 lpfc_set_disctmo(vport); 1366 phba->fc_ratov = tmo; 1367 1368 phba->fc_stat.elsXmitFLOGI++; 1369 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1370 1371 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1372 "Issue FLOGI: opt:x%x", 1373 phba->sli3_options, 0, 0); 1374 1375 elsiocb->context1 = lpfc_nlp_get(ndlp); 1376 if (!elsiocb->context1) { 1377 lpfc_els_free_iocb(phba, elsiocb); 1378 return 1; 1379 } 1380 1381 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1382 if (rc == IOCB_ERROR) { 1383 lpfc_els_free_iocb(phba, elsiocb); 1384 lpfc_nlp_put(ndlp); 1385 return 1; 1386 } 1387 1388 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1389 1390 /* Check for a deferred FLOGI ACC condition */ 1391 if (phba->defer_flogi_acc_flag) { 1392 /* lookup ndlp for received FLOGI */ 1393 ndlp = lpfc_findnode_did(vport, 0); 1394 if (!ndlp) 1395 return 0; 1396 1397 did = vport->fc_myDID; 1398 vport->fc_myDID = Fabric_DID; 1399 1400 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1401 1402 if (phba->sli_rev == LPFC_SLI_REV4) { 1403 bf_set(wqe_ctxt_tag, 1404 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1405 phba->defer_flogi_acc_rx_id); 1406 bf_set(wqe_rcvoxid, 1407 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1408 phba->defer_flogi_acc_ox_id); 1409 } else { 1410 icmd = &defer_flogi_acc.iocb; 1411 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1412 icmd->unsli3.rcvsli3.ox_id = 1413 phba->defer_flogi_acc_ox_id; 1414 } 1415 1416 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1417 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1418 " ox_id: x%x, hba_flag x%x\n", 1419 phba->defer_flogi_acc_rx_id, 1420 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1421 1422 /* Send deferred FLOGI ACC */ 1423 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1424 ndlp, NULL); 1425 1426 phba->defer_flogi_acc_flag = false; 1427 vport->fc_myDID = did; 1428 1429 /* Decrement ndlp reference count to indicate the node can be 1430 * released when other references are removed. 1431 */ 1432 lpfc_nlp_put(ndlp); 1433 } 1434 1435 return 0; 1436 } 1437 1438 /** 1439 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1440 * @phba: pointer to lpfc hba data structure. 1441 * 1442 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1443 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1444 * list and issues an abort IOCB commond on each outstanding IOCB that 1445 * contains a active Fabric_DID ndlp. Note that this function is to issue 1446 * the abort IOCB command on all the outstanding IOCBs, thus when this 1447 * function returns, it does not guarantee all the IOCBs are actually aborted. 1448 * 1449 * Return code 1450 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1451 **/ 1452 int 1453 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1454 { 1455 struct lpfc_sli_ring *pring; 1456 struct lpfc_iocbq *iocb, *next_iocb; 1457 struct lpfc_nodelist *ndlp; 1458 u32 ulp_command; 1459 1460 /* Abort outstanding I/O on NPort <nlp_DID> */ 1461 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1462 "0201 Abort outstanding I/O on NPort x%x\n", 1463 Fabric_DID); 1464 1465 pring = lpfc_phba_elsring(phba); 1466 if (unlikely(!pring)) 1467 return -EIO; 1468 1469 /* 1470 * Check the txcmplq for an iocb that matches the nport the driver is 1471 * searching for. 1472 */ 1473 spin_lock_irq(&phba->hbalock); 1474 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1475 ulp_command = get_job_cmnd(phba, iocb); 1476 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1477 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1478 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1479 if ((phba->pport->fc_flag & FC_PT2PT) && 1480 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1481 iocb->fabric_cmd_cmpl = 1482 lpfc_ignore_els_cmpl; 1483 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1484 NULL); 1485 } 1486 } 1487 } 1488 /* Make sure HBA is alive */ 1489 lpfc_issue_hb_tmo(phba); 1490 1491 spin_unlock_irq(&phba->hbalock); 1492 1493 return 0; 1494 } 1495 1496 /** 1497 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1498 * @vport: pointer to a host virtual N_Port data structure. 1499 * 1500 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1501 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1502 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1503 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1504 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1505 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1506 * @vport. 1507 * 1508 * Return code 1509 * 0 - failed to issue initial flogi for @vport 1510 * 1 - successfully issued initial flogi for @vport 1511 **/ 1512 int 1513 lpfc_initial_flogi(struct lpfc_vport *vport) 1514 { 1515 struct lpfc_nodelist *ndlp; 1516 1517 vport->port_state = LPFC_FLOGI; 1518 lpfc_set_disctmo(vport); 1519 1520 /* First look for the Fabric ndlp */ 1521 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1522 if (!ndlp) { 1523 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1524 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1525 if (!ndlp) 1526 return 0; 1527 /* Set the node type */ 1528 ndlp->nlp_type |= NLP_FABRIC; 1529 1530 /* Put ndlp onto node list */ 1531 lpfc_enqueue_node(vport, ndlp); 1532 } 1533 1534 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1535 /* This decrement of reference count to node shall kick off 1536 * the release of the node. 1537 */ 1538 lpfc_nlp_put(ndlp); 1539 return 0; 1540 } 1541 return 1; 1542 } 1543 1544 /** 1545 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1546 * @vport: pointer to a host virtual N_Port data structure. 1547 * 1548 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1549 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1550 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1551 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1552 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1553 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1554 * @vport. 1555 * 1556 * Return code 1557 * 0 - failed to issue initial fdisc for @vport 1558 * 1 - successfully issued initial fdisc for @vport 1559 **/ 1560 int 1561 lpfc_initial_fdisc(struct lpfc_vport *vport) 1562 { 1563 struct lpfc_nodelist *ndlp; 1564 1565 /* First look for the Fabric ndlp */ 1566 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1567 if (!ndlp) { 1568 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1569 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1570 if (!ndlp) 1571 return 0; 1572 1573 /* NPIV is only supported in Fabrics. */ 1574 ndlp->nlp_type |= NLP_FABRIC; 1575 1576 /* Put ndlp onto node list */ 1577 lpfc_enqueue_node(vport, ndlp); 1578 } 1579 1580 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1581 /* decrement node reference count to trigger the release of 1582 * the node. 1583 */ 1584 lpfc_nlp_put(ndlp); 1585 return 0; 1586 } 1587 return 1; 1588 } 1589 1590 /** 1591 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1592 * @vport: pointer to a host virtual N_Port data structure. 1593 * 1594 * This routine checks whether there are more remaining Port Logins 1595 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1596 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1597 * to issue ELS PLOGIs up to the configured discover threads with the 1598 * @vport (@vport->cfg_discovery_threads). The function also decrement 1599 * the @vport's num_disc_node by 1 if it is not already 0. 1600 **/ 1601 void 1602 lpfc_more_plogi(struct lpfc_vport *vport) 1603 { 1604 if (vport->num_disc_nodes) 1605 vport->num_disc_nodes--; 1606 1607 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1608 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1609 "0232 Continue discovery with %d PLOGIs to go " 1610 "Data: x%x x%x x%x\n", 1611 vport->num_disc_nodes, vport->fc_plogi_cnt, 1612 vport->fc_flag, vport->port_state); 1613 /* Check to see if there are more PLOGIs to be sent */ 1614 if (vport->fc_flag & FC_NLP_MORE) 1615 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1616 lpfc_els_disc_plogi(vport); 1617 1618 return; 1619 } 1620 1621 /** 1622 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1623 * @phba: pointer to lpfc hba data structure. 1624 * @prsp: pointer to response IOCB payload. 1625 * @ndlp: pointer to a node-list data structure. 1626 * 1627 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1628 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1629 * The following cases are considered N_Port confirmed: 1630 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1631 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1632 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1633 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1634 * 1) if there is a node on vport list other than the @ndlp with the same 1635 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1636 * on that node to release the RPI associated with the node; 2) if there is 1637 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1638 * into, a new node shall be allocated (or activated). In either case, the 1639 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1640 * be released and the new_ndlp shall be put on to the vport node list and 1641 * its pointer returned as the confirmed node. 1642 * 1643 * Note that before the @ndlp got "released", the keepDID from not-matching 1644 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1645 * of the @ndlp. This is because the release of @ndlp is actually to put it 1646 * into an inactive state on the vport node list and the vport node list 1647 * management algorithm does not allow two node with a same DID. 1648 * 1649 * Return code 1650 * pointer to the PLOGI N_Port @ndlp 1651 **/ 1652 static struct lpfc_nodelist * 1653 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1654 struct lpfc_nodelist *ndlp) 1655 { 1656 struct lpfc_vport *vport = ndlp->vport; 1657 struct lpfc_nodelist *new_ndlp; 1658 struct serv_parm *sp; 1659 uint8_t name[sizeof(struct lpfc_name)]; 1660 uint32_t keepDID = 0, keep_nlp_flag = 0; 1661 uint32_t keep_new_nlp_flag = 0; 1662 uint16_t keep_nlp_state; 1663 u32 keep_nlp_fc4_type = 0; 1664 struct lpfc_nvme_rport *keep_nrport = NULL; 1665 unsigned long *active_rrqs_xri_bitmap = NULL; 1666 1667 /* Fabric nodes can have the same WWPN so we don't bother searching 1668 * by WWPN. Just return the ndlp that was given to us. 1669 */ 1670 if (ndlp->nlp_type & NLP_FABRIC) 1671 return ndlp; 1672 1673 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1674 memset(name, 0, sizeof(struct lpfc_name)); 1675 1676 /* Now we find out if the NPort we are logging into, matches the WWPN 1677 * we have for that ndlp. If not, we have some work to do. 1678 */ 1679 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1680 1681 /* return immediately if the WWPN matches ndlp */ 1682 if (!new_ndlp || (new_ndlp == ndlp)) 1683 return ndlp; 1684 1685 /* 1686 * Unregister from backend if not done yet. Could have been skipped 1687 * due to ADISC 1688 */ 1689 lpfc_nlp_unreg_node(vport, new_ndlp); 1690 1691 if (phba->sli_rev == LPFC_SLI_REV4) { 1692 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1693 GFP_KERNEL); 1694 if (active_rrqs_xri_bitmap) 1695 memset(active_rrqs_xri_bitmap, 0, 1696 phba->cfg_rrq_xri_bitmap_sz); 1697 } 1698 1699 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1700 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1701 "new_ndlp x%x x%x x%x\n", 1702 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1703 (new_ndlp ? new_ndlp->nlp_DID : 0), 1704 (new_ndlp ? new_ndlp->nlp_flag : 0), 1705 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1706 1707 keepDID = new_ndlp->nlp_DID; 1708 1709 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1710 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1711 phba->cfg_rrq_xri_bitmap_sz); 1712 1713 /* At this point in this routine, we know new_ndlp will be 1714 * returned. however, any previous GID_FTs that were done 1715 * would have updated nlp_fc4_type in ndlp, so we must ensure 1716 * new_ndlp has the right value. 1717 */ 1718 if (vport->fc_flag & FC_FABRIC) { 1719 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1720 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1721 } 1722 1723 lpfc_unreg_rpi(vport, new_ndlp); 1724 new_ndlp->nlp_DID = ndlp->nlp_DID; 1725 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1726 if (phba->sli_rev == LPFC_SLI_REV4) 1727 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1728 ndlp->active_rrqs_xri_bitmap, 1729 phba->cfg_rrq_xri_bitmap_sz); 1730 1731 /* Lock both ndlps */ 1732 spin_lock_irq(&ndlp->lock); 1733 spin_lock_irq(&new_ndlp->lock); 1734 keep_new_nlp_flag = new_ndlp->nlp_flag; 1735 keep_nlp_flag = ndlp->nlp_flag; 1736 new_ndlp->nlp_flag = ndlp->nlp_flag; 1737 1738 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1739 if (keep_new_nlp_flag & NLP_UNREG_INP) 1740 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1741 else 1742 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1743 1744 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1745 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1746 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1747 else 1748 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1749 1750 /* 1751 * Retain the DROPPED flag. This will take care of the init 1752 * refcount when affecting the state change 1753 */ 1754 if (keep_new_nlp_flag & NLP_DROPPED) 1755 new_ndlp->nlp_flag |= NLP_DROPPED; 1756 else 1757 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1758 1759 ndlp->nlp_flag = keep_new_nlp_flag; 1760 1761 /* if ndlp had NLP_UNREG_INP set, keep it */ 1762 if (keep_nlp_flag & NLP_UNREG_INP) 1763 ndlp->nlp_flag |= NLP_UNREG_INP; 1764 else 1765 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1766 1767 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1768 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1769 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1770 else 1771 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1772 1773 /* 1774 * Retain the DROPPED flag. This will take care of the init 1775 * refcount when affecting the state change 1776 */ 1777 if (keep_nlp_flag & NLP_DROPPED) 1778 ndlp->nlp_flag |= NLP_DROPPED; 1779 else 1780 ndlp->nlp_flag &= ~NLP_DROPPED; 1781 1782 spin_unlock_irq(&new_ndlp->lock); 1783 spin_unlock_irq(&ndlp->lock); 1784 1785 /* Set nlp_states accordingly */ 1786 keep_nlp_state = new_ndlp->nlp_state; 1787 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1788 1789 /* interchange the nvme remoteport structs */ 1790 keep_nrport = new_ndlp->nrport; 1791 new_ndlp->nrport = ndlp->nrport; 1792 1793 /* Move this back to NPR state */ 1794 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1795 /* The new_ndlp is replacing ndlp totally, so we need 1796 * to put ndlp on UNUSED list and try to free it. 1797 */ 1798 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1799 "3179 PLOGI confirm NEW: %x %x\n", 1800 new_ndlp->nlp_DID, keepDID); 1801 1802 /* Two ndlps cannot have the same did on the nodelist. 1803 * Note: for this case, ndlp has a NULL WWPN so setting 1804 * the nlp_fc4_type isn't required. 1805 */ 1806 ndlp->nlp_DID = keepDID; 1807 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1808 if (phba->sli_rev == LPFC_SLI_REV4 && 1809 active_rrqs_xri_bitmap) 1810 memcpy(ndlp->active_rrqs_xri_bitmap, 1811 active_rrqs_xri_bitmap, 1812 phba->cfg_rrq_xri_bitmap_sz); 1813 1814 } else { 1815 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1816 "3180 PLOGI confirm SWAP: %x %x\n", 1817 new_ndlp->nlp_DID, keepDID); 1818 1819 lpfc_unreg_rpi(vport, ndlp); 1820 1821 /* Two ndlps cannot have the same did and the fc4 1822 * type must be transferred because the ndlp is in 1823 * flight. 1824 */ 1825 ndlp->nlp_DID = keepDID; 1826 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1827 1828 if (phba->sli_rev == LPFC_SLI_REV4 && 1829 active_rrqs_xri_bitmap) 1830 memcpy(ndlp->active_rrqs_xri_bitmap, 1831 active_rrqs_xri_bitmap, 1832 phba->cfg_rrq_xri_bitmap_sz); 1833 1834 /* Since we are switching over to the new_ndlp, 1835 * reset the old ndlp state 1836 */ 1837 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1838 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1839 keep_nlp_state = NLP_STE_NPR_NODE; 1840 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1841 ndlp->nrport = keep_nrport; 1842 } 1843 1844 /* 1845 * If ndlp is not associated with any rport we can drop it here else 1846 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1847 */ 1848 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1849 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1850 1851 if (phba->sli_rev == LPFC_SLI_REV4 && 1852 active_rrqs_xri_bitmap) 1853 mempool_free(active_rrqs_xri_bitmap, 1854 phba->active_rrq_pool); 1855 1856 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1857 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1858 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1859 new_ndlp->nlp_fc4_type); 1860 1861 return new_ndlp; 1862 } 1863 1864 /** 1865 * lpfc_end_rscn - Check and handle more rscn for a vport 1866 * @vport: pointer to a host virtual N_Port data structure. 1867 * 1868 * This routine checks whether more Registration State Change 1869 * Notifications (RSCNs) came in while the discovery state machine was in 1870 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1871 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1872 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1873 * handling the RSCNs. 1874 **/ 1875 void 1876 lpfc_end_rscn(struct lpfc_vport *vport) 1877 { 1878 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1879 1880 if (vport->fc_flag & FC_RSCN_MODE) { 1881 /* 1882 * Check to see if more RSCNs came in while we were 1883 * processing this one. 1884 */ 1885 if (vport->fc_rscn_id_cnt || 1886 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1887 lpfc_els_handle_rscn(vport); 1888 else { 1889 spin_lock_irq(shost->host_lock); 1890 vport->fc_flag &= ~FC_RSCN_MODE; 1891 spin_unlock_irq(shost->host_lock); 1892 } 1893 } 1894 } 1895 1896 /** 1897 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1898 * @phba: pointer to lpfc hba data structure. 1899 * @cmdiocb: pointer to lpfc command iocb data structure. 1900 * @rspiocb: pointer to lpfc response iocb data structure. 1901 * 1902 * This routine will call the clear rrq function to free the rrq and 1903 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1904 * exist then the clear_rrq is still called because the rrq needs to 1905 * be freed. 1906 **/ 1907 1908 static void 1909 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1910 struct lpfc_iocbq *rspiocb) 1911 { 1912 struct lpfc_vport *vport = cmdiocb->vport; 1913 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1914 struct lpfc_node_rrq *rrq; 1915 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1916 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1917 1918 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1919 rrq = cmdiocb->context_un.rrq; 1920 cmdiocb->context_un.rsp_iocb = rspiocb; 1921 1922 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1923 "RRQ cmpl: status:x%x/x%x did:x%x", 1924 ulp_status, ulp_word4, 1925 get_job_els_rsp64_did(phba, cmdiocb)); 1926 1927 1928 /* rrq completes to NPort <nlp_DID> */ 1929 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1930 "2880 RRQ completes to DID x%x " 1931 "Data: x%x x%x x%x x%x x%x\n", 1932 ndlp->nlp_DID, ulp_status, ulp_word4, 1933 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1934 1935 if (ulp_status) { 1936 /* Check for retry */ 1937 /* RRQ failed Don't print the vport to vport rjts */ 1938 if (ulp_status != IOSTAT_LS_RJT || 1939 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1940 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1941 (phba)->pport->cfg_log_verbose & LOG_ELS) 1942 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1943 "2881 RRQ failure DID:%06X Status:" 1944 "x%x/x%x\n", 1945 ndlp->nlp_DID, ulp_status, 1946 ulp_word4); 1947 } 1948 1949 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1950 lpfc_els_free_iocb(phba, cmdiocb); 1951 lpfc_nlp_put(ndlp); 1952 return; 1953 } 1954 /** 1955 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1956 * @phba: pointer to lpfc hba data structure. 1957 * @cmdiocb: pointer to lpfc command iocb data structure. 1958 * @rspiocb: pointer to lpfc response iocb data structure. 1959 * 1960 * This routine is the completion callback function for issuing the Port 1961 * Login (PLOGI) command. For PLOGI completion, there must be an active 1962 * ndlp on the vport node list that matches the remote node ID from the 1963 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1964 * ignored and command IOCB released. The PLOGI response IOCB status is 1965 * checked for error conditions. If there is error status reported, PLOGI 1966 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1967 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1968 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1969 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1970 * there are additional N_Port nodes with the vport that need to perform 1971 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1972 * PLOGIs. 1973 **/ 1974 static void 1975 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1976 struct lpfc_iocbq *rspiocb) 1977 { 1978 struct lpfc_vport *vport = cmdiocb->vport; 1979 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1980 IOCB_t *irsp; 1981 struct lpfc_nodelist *ndlp, *free_ndlp; 1982 struct lpfc_dmabuf *prsp; 1983 int disc; 1984 struct serv_parm *sp = NULL; 1985 u32 ulp_status, ulp_word4, did, iotag; 1986 1987 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1988 cmdiocb->context_un.rsp_iocb = rspiocb; 1989 1990 ulp_status = get_job_ulpstatus(phba, rspiocb); 1991 ulp_word4 = get_job_word4(phba, rspiocb); 1992 did = get_job_els_rsp64_did(phba, cmdiocb); 1993 1994 if (phba->sli_rev == LPFC_SLI_REV4) { 1995 iotag = get_wqe_reqtag(cmdiocb); 1996 } else { 1997 irsp = &rspiocb->iocb; 1998 iotag = irsp->ulpIoTag; 1999 } 2000 2001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2002 "PLOGI cmpl: status:x%x/x%x did:x%x", 2003 ulp_status, ulp_word4, did); 2004 2005 ndlp = lpfc_findnode_did(vport, did); 2006 if (!ndlp) { 2007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2008 "0136 PLOGI completes to NPort x%x " 2009 "with no ndlp. Data: x%x x%x x%x\n", 2010 did, ulp_status, ulp_word4, iotag); 2011 goto out_freeiocb; 2012 } 2013 2014 /* Since ndlp can be freed in the disc state machine, note if this node 2015 * is being used during discovery. 2016 */ 2017 spin_lock_irq(&ndlp->lock); 2018 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2019 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2020 spin_unlock_irq(&ndlp->lock); 2021 2022 /* PLOGI completes to NPort <nlp_DID> */ 2023 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2024 "0102 PLOGI completes to NPort x%06x " 2025 "Data: x%x x%x x%x x%x x%x\n", 2026 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2027 ulp_status, ulp_word4, 2028 disc, vport->num_disc_nodes); 2029 2030 /* Check to see if link went down during discovery */ 2031 if (lpfc_els_chk_latt(vport)) { 2032 spin_lock_irq(&ndlp->lock); 2033 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2034 spin_unlock_irq(&ndlp->lock); 2035 goto out; 2036 } 2037 2038 if (ulp_status) { 2039 /* Check for retry */ 2040 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2041 /* ELS command is being retried */ 2042 if (disc) { 2043 spin_lock_irq(&ndlp->lock); 2044 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2045 spin_unlock_irq(&ndlp->lock); 2046 } 2047 goto out; 2048 } 2049 /* PLOGI failed Don't print the vport to vport rjts */ 2050 if (ulp_status != IOSTAT_LS_RJT || 2051 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2052 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2053 (phba)->pport->cfg_log_verbose & LOG_ELS) 2054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2055 "2753 PLOGI failure DID:%06X " 2056 "Status:x%x/x%x\n", 2057 ndlp->nlp_DID, ulp_status, 2058 ulp_word4); 2059 2060 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2061 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2062 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2063 NLP_EVT_CMPL_PLOGI); 2064 2065 /* If a PLOGI collision occurred, the node needs to continue 2066 * with the reglogin process. 2067 */ 2068 spin_lock_irq(&ndlp->lock); 2069 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2070 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2071 spin_unlock_irq(&ndlp->lock); 2072 goto out; 2073 } 2074 spin_unlock_irq(&ndlp->lock); 2075 2076 /* No PLOGI collision and the node is not registered with the 2077 * scsi or nvme transport. It is no longer an active node. Just 2078 * start the device remove process. 2079 */ 2080 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2081 spin_lock_irq(&ndlp->lock); 2082 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2083 spin_unlock_irq(&ndlp->lock); 2084 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2085 NLP_EVT_DEVICE_RM); 2086 } 2087 } else { 2088 /* Good status, call state machine */ 2089 prsp = list_entry(((struct lpfc_dmabuf *) 2090 cmdiocb->context2)->list.next, 2091 struct lpfc_dmabuf, list); 2092 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2093 2094 sp = (struct serv_parm *)((u8 *)prsp->virt + 2095 sizeof(u32)); 2096 2097 ndlp->vmid_support = 0; 2098 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2099 (phba->cfg_vmid_priority_tagging && 2100 sp->cmn.priority_tagging)) { 2101 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2102 "4018 app_hdr_support %d tagging %d DID x%x\n", 2103 sp->cmn.app_hdr_support, 2104 sp->cmn.priority_tagging, 2105 ndlp->nlp_DID); 2106 /* if the dest port supports VMID, mark it in ndlp */ 2107 ndlp->vmid_support = 1; 2108 } 2109 2110 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2111 NLP_EVT_CMPL_PLOGI); 2112 } 2113 2114 if (disc && vport->num_disc_nodes) { 2115 /* Check to see if there are more PLOGIs to be sent */ 2116 lpfc_more_plogi(vport); 2117 2118 if (vport->num_disc_nodes == 0) { 2119 spin_lock_irq(shost->host_lock); 2120 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2121 spin_unlock_irq(shost->host_lock); 2122 2123 lpfc_can_disctmo(vport); 2124 lpfc_end_rscn(vport); 2125 } 2126 } 2127 2128 out: 2129 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2130 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2131 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2132 2133 out_freeiocb: 2134 /* Release the reference on the original I/O request. */ 2135 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2136 2137 lpfc_els_free_iocb(phba, cmdiocb); 2138 lpfc_nlp_put(free_ndlp); 2139 return; 2140 } 2141 2142 /** 2143 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2144 * @vport: pointer to a host virtual N_Port data structure. 2145 * @did: destination port identifier. 2146 * @retry: number of retries to the command IOCB. 2147 * 2148 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2149 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2150 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2151 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2152 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2153 * 2154 * Note that the ndlp reference count will be incremented by 1 for holding 2155 * the ndlp and the reference to ndlp will be stored into the context1 field 2156 * of the IOCB for the completion callback function to the PLOGI ELS command. 2157 * 2158 * Return code 2159 * 0 - Successfully issued a plogi for @vport 2160 * 1 - failed to issue a plogi for @vport 2161 **/ 2162 int 2163 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2164 { 2165 struct lpfc_hba *phba = vport->phba; 2166 struct serv_parm *sp; 2167 struct lpfc_nodelist *ndlp; 2168 struct lpfc_iocbq *elsiocb; 2169 uint8_t *pcmd; 2170 uint16_t cmdsize; 2171 int ret; 2172 2173 ndlp = lpfc_findnode_did(vport, did); 2174 if (!ndlp) 2175 return 1; 2176 2177 /* Defer the processing of the issue PLOGI until after the 2178 * outstanding UNREG_RPI mbox command completes, unless we 2179 * are going offline. This logic does not apply for Fabric DIDs 2180 */ 2181 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2182 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2183 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2184 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2185 "4110 Issue PLOGI x%x deferred " 2186 "on NPort x%x rpi x%x Data: x%px\n", 2187 ndlp->nlp_defer_did, ndlp->nlp_DID, 2188 ndlp->nlp_rpi, ndlp); 2189 2190 /* We can only defer 1st PLOGI */ 2191 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2192 ndlp->nlp_defer_did = did; 2193 return 0; 2194 } 2195 2196 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2197 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2198 ELS_CMD_PLOGI); 2199 if (!elsiocb) 2200 return 1; 2201 2202 spin_lock_irq(&ndlp->lock); 2203 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2204 spin_unlock_irq(&ndlp->lock); 2205 2206 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2207 2208 /* For PLOGI request, remainder of payload is service parameters */ 2209 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2210 pcmd += sizeof(uint32_t); 2211 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2212 sp = (struct serv_parm *) pcmd; 2213 2214 /* 2215 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2216 * to device on remote loops work. 2217 */ 2218 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2219 sp->cmn.altBbCredit = 1; 2220 2221 if (sp->cmn.fcphLow < FC_PH_4_3) 2222 sp->cmn.fcphLow = FC_PH_4_3; 2223 2224 if (sp->cmn.fcphHigh < FC_PH3) 2225 sp->cmn.fcphHigh = FC_PH3; 2226 2227 sp->cmn.valid_vendor_ver_level = 0; 2228 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2229 sp->cmn.bbRcvSizeMsb &= 0xF; 2230 2231 /* Check if the destination port supports VMID */ 2232 ndlp->vmid_support = 0; 2233 if (vport->vmid_priority_tagging) 2234 sp->cmn.priority_tagging = 1; 2235 else if (phba->cfg_vmid_app_header && 2236 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2237 sp->cmn.app_hdr_support = 1; 2238 2239 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2240 "Issue PLOGI: did:x%x", 2241 did, 0, 0); 2242 2243 /* If our firmware supports this feature, convey that 2244 * information to the target using the vendor specific field. 2245 */ 2246 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2247 sp->cmn.valid_vendor_ver_level = 1; 2248 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2249 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2250 } 2251 2252 phba->fc_stat.elsXmitPLOGI++; 2253 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2254 2255 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2256 "Issue PLOGI: did:x%x refcnt %d", 2257 did, kref_read(&ndlp->kref), 0); 2258 elsiocb->context1 = lpfc_nlp_get(ndlp); 2259 if (!elsiocb->context1) { 2260 lpfc_els_free_iocb(phba, elsiocb); 2261 return 1; 2262 } 2263 2264 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2265 if (ret) { 2266 lpfc_els_free_iocb(phba, elsiocb); 2267 lpfc_nlp_put(ndlp); 2268 return 1; 2269 } 2270 2271 return 0; 2272 } 2273 2274 /** 2275 * lpfc_cmpl_els_prli - Completion callback function for prli 2276 * @phba: pointer to lpfc hba data structure. 2277 * @cmdiocb: pointer to lpfc command iocb data structure. 2278 * @rspiocb: pointer to lpfc response iocb data structure. 2279 * 2280 * This routine is the completion callback function for a Process Login 2281 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2282 * status. If there is error status reported, PRLI retry shall be attempted 2283 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2284 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2285 * ndlp to mark the PRLI completion. 2286 **/ 2287 static void 2288 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2289 struct lpfc_iocbq *rspiocb) 2290 { 2291 struct lpfc_vport *vport = cmdiocb->vport; 2292 struct lpfc_nodelist *ndlp; 2293 char *mode; 2294 u32 loglevel; 2295 u32 ulp_status; 2296 u32 ulp_word4; 2297 2298 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2299 cmdiocb->context_un.rsp_iocb = rspiocb; 2300 2301 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2302 2303 ulp_status = get_job_ulpstatus(phba, rspiocb); 2304 ulp_word4 = get_job_word4(phba, rspiocb); 2305 2306 spin_lock_irq(&ndlp->lock); 2307 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2308 2309 /* Driver supports multiple FC4 types. Counters matter. */ 2310 vport->fc_prli_sent--; 2311 ndlp->fc4_prli_sent--; 2312 spin_unlock_irq(&ndlp->lock); 2313 2314 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2315 "PRLI cmpl: status:x%x/x%x did:x%x", 2316 ulp_status, ulp_word4, 2317 ndlp->nlp_DID); 2318 2319 /* PRLI completes to NPort <nlp_DID> */ 2320 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2321 "0103 PRLI completes to NPort x%06x " 2322 "Data: x%x x%x x%x x%x\n", 2323 ndlp->nlp_DID, ulp_status, ulp_word4, 2324 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2325 2326 /* Check to see if link went down during discovery */ 2327 if (lpfc_els_chk_latt(vport)) 2328 goto out; 2329 2330 if (ulp_status) { 2331 /* Check for retry */ 2332 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2333 /* ELS command is being retried */ 2334 goto out; 2335 } 2336 2337 /* If we don't send GFT_ID to Fabric, a PRLI error 2338 * could be expected. 2339 */ 2340 if ((vport->fc_flag & FC_FABRIC) || 2341 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2342 mode = KERN_ERR; 2343 loglevel = LOG_TRACE_EVENT; 2344 } else { 2345 mode = KERN_INFO; 2346 loglevel = LOG_ELS; 2347 } 2348 2349 /* PRLI failed */ 2350 lpfc_printf_vlog(vport, mode, loglevel, 2351 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2352 "data: x%x\n", 2353 ndlp->nlp_DID, ulp_status, 2354 ulp_word4, ndlp->fc4_prli_sent); 2355 2356 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2357 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2358 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2359 NLP_EVT_CMPL_PRLI); 2360 2361 /* 2362 * For P2P topology, retain the node so that PLOGI can be 2363 * attempted on it again. 2364 */ 2365 if (vport->fc_flag & FC_PT2PT) 2366 goto out; 2367 2368 /* As long as this node is not registered with the SCSI 2369 * or NVMe transport and no other PRLIs are outstanding, 2370 * it is no longer an active node. Otherwise devloss 2371 * handles the final cleanup. 2372 */ 2373 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2374 !ndlp->fc4_prli_sent) { 2375 spin_lock_irq(&ndlp->lock); 2376 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2377 spin_unlock_irq(&ndlp->lock); 2378 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2379 NLP_EVT_DEVICE_RM); 2380 } 2381 } else { 2382 /* Good status, call state machine. However, if another 2383 * PRLI is outstanding, don't call the state machine 2384 * because final disposition to Mapped or Unmapped is 2385 * completed there. 2386 */ 2387 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2388 NLP_EVT_CMPL_PRLI); 2389 } 2390 2391 out: 2392 lpfc_els_free_iocb(phba, cmdiocb); 2393 lpfc_nlp_put(ndlp); 2394 return; 2395 } 2396 2397 /** 2398 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2399 * @vport: pointer to a host virtual N_Port data structure. 2400 * @ndlp: pointer to a node-list data structure. 2401 * @retry: number of retries to the command IOCB. 2402 * 2403 * This routine issues a Process Login (PRLI) ELS command for the 2404 * @vport. The PRLI service parameters are set up in the payload of the 2405 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2406 * is put to the IOCB completion callback func field before invoking the 2407 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2408 * 2409 * Note that the ndlp reference count will be incremented by 1 for holding the 2410 * ndlp and the reference to ndlp will be stored into the context1 field of 2411 * the IOCB for the completion callback function to the PRLI ELS command. 2412 * 2413 * Return code 2414 * 0 - successfully issued prli iocb command for @vport 2415 * 1 - failed to issue prli iocb command for @vport 2416 **/ 2417 int 2418 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2419 uint8_t retry) 2420 { 2421 int rc = 0; 2422 struct lpfc_hba *phba = vport->phba; 2423 PRLI *npr; 2424 struct lpfc_nvme_prli *npr_nvme; 2425 struct lpfc_iocbq *elsiocb; 2426 uint8_t *pcmd; 2427 uint16_t cmdsize; 2428 u32 local_nlp_type, elscmd; 2429 2430 /* 2431 * If we are in RSCN mode, the FC4 types supported from a 2432 * previous GFT_ID command may not be accurate. So, if we 2433 * are a NVME Initiator, always look for the possibility of 2434 * the remote NPort beng a NVME Target. 2435 */ 2436 if (phba->sli_rev == LPFC_SLI_REV4 && 2437 vport->fc_flag & FC_RSCN_MODE && 2438 vport->nvmei_support) 2439 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2440 local_nlp_type = ndlp->nlp_fc4_type; 2441 2442 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2443 * fields here before any of them can complete. 2444 */ 2445 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2446 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2447 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2448 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2449 ndlp->nvme_fb_size = 0; 2450 2451 send_next_prli: 2452 if (local_nlp_type & NLP_FC4_FCP) { 2453 /* Payload is 4 + 16 = 20 x14 bytes. */ 2454 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2455 elscmd = ELS_CMD_PRLI; 2456 } else if (local_nlp_type & NLP_FC4_NVME) { 2457 /* Payload is 4 + 20 = 24 x18 bytes. */ 2458 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2459 elscmd = ELS_CMD_NVMEPRLI; 2460 } else { 2461 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2462 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2463 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2464 return 1; 2465 } 2466 2467 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2468 * FC4 type, implicitly LOGO. 2469 */ 2470 if (phba->sli_rev == LPFC_SLI_REV3 && 2471 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2472 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2473 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2474 ndlp->nlp_type); 2475 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2476 return 1; 2477 } 2478 2479 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2480 ndlp->nlp_DID, elscmd); 2481 if (!elsiocb) 2482 return 1; 2483 2484 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2485 2486 /* For PRLI request, remainder of payload is service parameters */ 2487 memset(pcmd, 0, cmdsize); 2488 2489 if (local_nlp_type & NLP_FC4_FCP) { 2490 /* Remainder of payload is FCP PRLI parameter page. 2491 * Note: this data structure is defined as 2492 * BE/LE in the structure definition so no 2493 * byte swap call is made. 2494 */ 2495 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2496 pcmd += sizeof(uint32_t); 2497 npr = (PRLI *)pcmd; 2498 2499 /* 2500 * If our firmware version is 3.20 or later, 2501 * set the following bits for FC-TAPE support. 2502 */ 2503 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2504 npr->ConfmComplAllowed = 1; 2505 npr->Retry = 1; 2506 npr->TaskRetryIdReq = 1; 2507 } 2508 npr->estabImagePair = 1; 2509 npr->readXferRdyDis = 1; 2510 if (vport->cfg_first_burst_size) 2511 npr->writeXferRdyDis = 1; 2512 2513 /* For FCP support */ 2514 npr->prliType = PRLI_FCP_TYPE; 2515 npr->initiatorFunc = 1; 2516 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2517 2518 /* Remove FCP type - processed. */ 2519 local_nlp_type &= ~NLP_FC4_FCP; 2520 } else if (local_nlp_type & NLP_FC4_NVME) { 2521 /* Remainder of payload is NVME PRLI parameter page. 2522 * This data structure is the newer definition that 2523 * uses bf macros so a byte swap is required. 2524 */ 2525 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2526 pcmd += sizeof(uint32_t); 2527 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2528 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2529 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2530 if (phba->nsler) { 2531 bf_set(prli_nsler, npr_nvme, 1); 2532 bf_set(prli_conf, npr_nvme, 1); 2533 } 2534 2535 /* Only initiators request first burst. */ 2536 if ((phba->cfg_nvme_enable_fb) && 2537 !phba->nvmet_support) 2538 bf_set(prli_fba, npr_nvme, 1); 2539 2540 if (phba->nvmet_support) { 2541 bf_set(prli_tgt, npr_nvme, 1); 2542 bf_set(prli_disc, npr_nvme, 1); 2543 } else { 2544 bf_set(prli_init, npr_nvme, 1); 2545 bf_set(prli_conf, npr_nvme, 1); 2546 } 2547 2548 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2549 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2550 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2551 2552 /* Remove NVME type - processed. */ 2553 local_nlp_type &= ~NLP_FC4_NVME; 2554 } 2555 2556 phba->fc_stat.elsXmitPRLI++; 2557 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2558 spin_lock_irq(&ndlp->lock); 2559 ndlp->nlp_flag |= NLP_PRLI_SND; 2560 2561 /* The vport counters are used for lpfc_scan_finished, but 2562 * the ndlp is used to track outstanding PRLIs for different 2563 * FC4 types. 2564 */ 2565 vport->fc_prli_sent++; 2566 ndlp->fc4_prli_sent++; 2567 spin_unlock_irq(&ndlp->lock); 2568 2569 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2570 "Issue PRLI: did:x%x refcnt %d", 2571 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2572 elsiocb->context1 = lpfc_nlp_get(ndlp); 2573 if (!elsiocb->context1) { 2574 lpfc_els_free_iocb(phba, elsiocb); 2575 goto err; 2576 } 2577 2578 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2579 if (rc == IOCB_ERROR) { 2580 lpfc_els_free_iocb(phba, elsiocb); 2581 lpfc_nlp_put(ndlp); 2582 goto err; 2583 } 2584 2585 2586 /* The driver supports 2 FC4 types. Make sure 2587 * a PRLI is issued for all types before exiting. 2588 */ 2589 if (phba->sli_rev == LPFC_SLI_REV4 && 2590 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2591 goto send_next_prli; 2592 else 2593 return 0; 2594 2595 err: 2596 spin_lock_irq(&ndlp->lock); 2597 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2598 spin_unlock_irq(&ndlp->lock); 2599 return 1; 2600 } 2601 2602 /** 2603 * lpfc_rscn_disc - Perform rscn discovery for a vport 2604 * @vport: pointer to a host virtual N_Port data structure. 2605 * 2606 * This routine performs Registration State Change Notification (RSCN) 2607 * discovery for a @vport. If the @vport's node port recovery count is not 2608 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2609 * the nodes that need recovery. If none of the PLOGI were needed through 2610 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2611 * invoked to check and handle possible more RSCN came in during the period 2612 * of processing the current ones. 2613 **/ 2614 static void 2615 lpfc_rscn_disc(struct lpfc_vport *vport) 2616 { 2617 lpfc_can_disctmo(vport); 2618 2619 /* RSCN discovery */ 2620 /* go thru NPR nodes and issue ELS PLOGIs */ 2621 if (vport->fc_npr_cnt) 2622 if (lpfc_els_disc_plogi(vport)) 2623 return; 2624 2625 lpfc_end_rscn(vport); 2626 } 2627 2628 /** 2629 * lpfc_adisc_done - Complete the adisc phase of discovery 2630 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2631 * 2632 * This function is called when the final ADISC is completed during discovery. 2633 * This function handles clearing link attention or issuing reg_vpi depending 2634 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2635 * discovery. 2636 * This function is called with no locks held. 2637 **/ 2638 static void 2639 lpfc_adisc_done(struct lpfc_vport *vport) 2640 { 2641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2642 struct lpfc_hba *phba = vport->phba; 2643 2644 /* 2645 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2646 * and continue discovery. 2647 */ 2648 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2649 !(vport->fc_flag & FC_RSCN_MODE) && 2650 (phba->sli_rev < LPFC_SLI_REV4)) { 2651 2652 /* 2653 * If link is down, clear_la and reg_vpi will be done after 2654 * flogi following a link up event 2655 */ 2656 if (!lpfc_is_link_up(phba)) 2657 return; 2658 2659 /* The ADISCs are complete. Doesn't matter if they 2660 * succeeded or failed because the ADISC completion 2661 * routine guarantees to call the state machine and 2662 * the RPI is either unregistered (failed ADISC response) 2663 * or the RPI is still valid and the node is marked 2664 * mapped for a target. The exchanges should be in the 2665 * correct state. This code is specific to SLI3. 2666 */ 2667 lpfc_issue_clear_la(phba, vport); 2668 lpfc_issue_reg_vpi(phba, vport); 2669 return; 2670 } 2671 /* 2672 * For SLI2, we need to set port_state to READY 2673 * and continue discovery. 2674 */ 2675 if (vport->port_state < LPFC_VPORT_READY) { 2676 /* If we get here, there is nothing to ADISC */ 2677 lpfc_issue_clear_la(phba, vport); 2678 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2679 vport->num_disc_nodes = 0; 2680 /* go thru NPR list, issue ELS PLOGIs */ 2681 if (vport->fc_npr_cnt) 2682 lpfc_els_disc_plogi(vport); 2683 if (!vport->num_disc_nodes) { 2684 spin_lock_irq(shost->host_lock); 2685 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2686 spin_unlock_irq(shost->host_lock); 2687 lpfc_can_disctmo(vport); 2688 lpfc_end_rscn(vport); 2689 } 2690 } 2691 vport->port_state = LPFC_VPORT_READY; 2692 } else 2693 lpfc_rscn_disc(vport); 2694 } 2695 2696 /** 2697 * lpfc_more_adisc - Issue more adisc as needed 2698 * @vport: pointer to a host virtual N_Port data structure. 2699 * 2700 * This routine determines whether there are more ndlps on a @vport 2701 * node list need to have Address Discover (ADISC) issued. If so, it will 2702 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2703 * remaining nodes which need to have ADISC sent. 2704 **/ 2705 void 2706 lpfc_more_adisc(struct lpfc_vport *vport) 2707 { 2708 if (vport->num_disc_nodes) 2709 vport->num_disc_nodes--; 2710 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2711 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2712 "0210 Continue discovery with %d ADISCs to go " 2713 "Data: x%x x%x x%x\n", 2714 vport->num_disc_nodes, vport->fc_adisc_cnt, 2715 vport->fc_flag, vport->port_state); 2716 /* Check to see if there are more ADISCs to be sent */ 2717 if (vport->fc_flag & FC_NLP_MORE) { 2718 lpfc_set_disctmo(vport); 2719 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2720 lpfc_els_disc_adisc(vport); 2721 } 2722 if (!vport->num_disc_nodes) 2723 lpfc_adisc_done(vport); 2724 return; 2725 } 2726 2727 /** 2728 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2729 * @phba: pointer to lpfc hba data structure. 2730 * @cmdiocb: pointer to lpfc command iocb data structure. 2731 * @rspiocb: pointer to lpfc response iocb data structure. 2732 * 2733 * This routine is the completion function for issuing the Address Discover 2734 * (ADISC) command. It first checks to see whether link went down during 2735 * the discovery process. If so, the node will be marked as node port 2736 * recovery for issuing discover IOCB by the link attention handler and 2737 * exit. Otherwise, the response status is checked. If error was reported 2738 * in the response status, the ADISC command shall be retried by invoking 2739 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2740 * the response status, the state machine is invoked to set transition 2741 * with respect to NLP_EVT_CMPL_ADISC event. 2742 **/ 2743 static void 2744 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2745 struct lpfc_iocbq *rspiocb) 2746 { 2747 struct lpfc_vport *vport = cmdiocb->vport; 2748 IOCB_t *irsp; 2749 struct lpfc_nodelist *ndlp; 2750 int disc; 2751 u32 ulp_status, ulp_word4, tmo; 2752 2753 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2754 cmdiocb->context_un.rsp_iocb = rspiocb; 2755 2756 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2757 2758 ulp_status = get_job_ulpstatus(phba, rspiocb); 2759 ulp_word4 = get_job_word4(phba, rspiocb); 2760 2761 if (phba->sli_rev == LPFC_SLI_REV4) { 2762 tmo = get_wqe_tmo(cmdiocb); 2763 } else { 2764 irsp = &rspiocb->iocb; 2765 tmo = irsp->ulpTimeout; 2766 } 2767 2768 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2769 "ADISC cmpl: status:x%x/x%x did:x%x", 2770 ulp_status, ulp_word4, 2771 ndlp->nlp_DID); 2772 2773 /* Since ndlp can be freed in the disc state machine, note if this node 2774 * is being used during discovery. 2775 */ 2776 spin_lock_irq(&ndlp->lock); 2777 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2778 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2779 spin_unlock_irq(&ndlp->lock); 2780 /* ADISC completes to NPort <nlp_DID> */ 2781 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2782 "0104 ADISC completes to NPort x%x " 2783 "Data: x%x x%x x%x x%x x%x\n", 2784 ndlp->nlp_DID, ulp_status, ulp_word4, 2785 tmo, disc, vport->num_disc_nodes); 2786 /* Check to see if link went down during discovery */ 2787 if (lpfc_els_chk_latt(vport)) { 2788 spin_lock_irq(&ndlp->lock); 2789 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2790 spin_unlock_irq(&ndlp->lock); 2791 goto out; 2792 } 2793 2794 if (ulp_status) { 2795 /* Check for retry */ 2796 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2797 /* ELS command is being retried */ 2798 if (disc) { 2799 spin_lock_irq(&ndlp->lock); 2800 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2801 spin_unlock_irq(&ndlp->lock); 2802 lpfc_set_disctmo(vport); 2803 } 2804 goto out; 2805 } 2806 /* ADISC failed */ 2807 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2808 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2809 ndlp->nlp_DID, ulp_status, 2810 ulp_word4); 2811 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2812 NLP_EVT_CMPL_ADISC); 2813 2814 /* As long as this node is not registered with the SCSI or NVMe 2815 * transport, it is no longer an active node. Otherwise 2816 * devloss handles the final cleanup. 2817 */ 2818 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2819 spin_lock_irq(&ndlp->lock); 2820 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2821 spin_unlock_irq(&ndlp->lock); 2822 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2823 NLP_EVT_DEVICE_RM); 2824 } 2825 } else 2826 /* Good status, call state machine */ 2827 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2828 NLP_EVT_CMPL_ADISC); 2829 2830 /* Check to see if there are more ADISCs to be sent */ 2831 if (disc && vport->num_disc_nodes) 2832 lpfc_more_adisc(vport); 2833 out: 2834 lpfc_els_free_iocb(phba, cmdiocb); 2835 lpfc_nlp_put(ndlp); 2836 return; 2837 } 2838 2839 /** 2840 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2841 * @vport: pointer to a virtual N_Port data structure. 2842 * @ndlp: pointer to a node-list data structure. 2843 * @retry: number of retries to the command IOCB. 2844 * 2845 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2846 * @vport. It prepares the payload of the ADISC ELS command, updates the 2847 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2848 * to issue the ADISC ELS command. 2849 * 2850 * Note that the ndlp reference count will be incremented by 1 for holding the 2851 * ndlp and the reference to ndlp will be stored into the context1 field of 2852 * the IOCB for the completion callback function to the ADISC ELS command. 2853 * 2854 * Return code 2855 * 0 - successfully issued adisc 2856 * 1 - failed to issue adisc 2857 **/ 2858 int 2859 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2860 uint8_t retry) 2861 { 2862 int rc = 0; 2863 struct lpfc_hba *phba = vport->phba; 2864 ADISC *ap; 2865 struct lpfc_iocbq *elsiocb; 2866 uint8_t *pcmd; 2867 uint16_t cmdsize; 2868 2869 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2870 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2871 ndlp->nlp_DID, ELS_CMD_ADISC); 2872 if (!elsiocb) 2873 return 1; 2874 2875 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2876 2877 /* For ADISC request, remainder of payload is service parameters */ 2878 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2879 pcmd += sizeof(uint32_t); 2880 2881 /* Fill in ADISC payload */ 2882 ap = (ADISC *) pcmd; 2883 ap->hardAL_PA = phba->fc_pref_ALPA; 2884 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2885 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2886 ap->DID = be32_to_cpu(vport->fc_myDID); 2887 2888 phba->fc_stat.elsXmitADISC++; 2889 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2890 spin_lock_irq(&ndlp->lock); 2891 ndlp->nlp_flag |= NLP_ADISC_SND; 2892 spin_unlock_irq(&ndlp->lock); 2893 elsiocb->context1 = lpfc_nlp_get(ndlp); 2894 if (!elsiocb->context1) { 2895 lpfc_els_free_iocb(phba, elsiocb); 2896 goto err; 2897 } 2898 2899 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2900 "Issue ADISC: did:x%x refcnt %d", 2901 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2902 2903 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2904 if (rc == IOCB_ERROR) { 2905 lpfc_els_free_iocb(phba, elsiocb); 2906 lpfc_nlp_put(ndlp); 2907 goto err; 2908 } 2909 2910 return 0; 2911 2912 err: 2913 spin_lock_irq(&ndlp->lock); 2914 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2915 spin_unlock_irq(&ndlp->lock); 2916 return 1; 2917 } 2918 2919 /** 2920 * lpfc_cmpl_els_logo - Completion callback function for logo 2921 * @phba: pointer to lpfc hba data structure. 2922 * @cmdiocb: pointer to lpfc command iocb data structure. 2923 * @rspiocb: pointer to lpfc response iocb data structure. 2924 * 2925 * This routine is the completion function for issuing the ELS Logout (LOGO) 2926 * command. If no error status was reported from the LOGO response, the 2927 * state machine of the associated ndlp shall be invoked for transition with 2928 * respect to NLP_EVT_CMPL_LOGO event. 2929 **/ 2930 static void 2931 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2932 struct lpfc_iocbq *rspiocb) 2933 { 2934 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2935 struct lpfc_vport *vport = ndlp->vport; 2936 IOCB_t *irsp; 2937 unsigned long flags; 2938 uint32_t skip_recovery = 0; 2939 int wake_up_waiter = 0; 2940 u32 ulp_status; 2941 u32 ulp_word4; 2942 u32 tmo; 2943 2944 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2945 cmdiocb->context_un.rsp_iocb = rspiocb; 2946 2947 ulp_status = get_job_ulpstatus(phba, rspiocb); 2948 ulp_word4 = get_job_word4(phba, rspiocb); 2949 2950 if (phba->sli_rev == LPFC_SLI_REV4) { 2951 tmo = get_wqe_tmo(cmdiocb); 2952 } else { 2953 irsp = &rspiocb->iocb; 2954 tmo = irsp->ulpTimeout; 2955 } 2956 2957 spin_lock_irq(&ndlp->lock); 2958 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2959 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2960 wake_up_waiter = 1; 2961 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2962 } 2963 spin_unlock_irq(&ndlp->lock); 2964 2965 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2966 "LOGO cmpl: status:x%x/x%x did:x%x", 2967 ulp_status, ulp_word4, 2968 ndlp->nlp_DID); 2969 2970 /* LOGO completes to NPort <nlp_DID> */ 2971 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2972 "0105 LOGO completes to NPort x%x " 2973 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2974 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2975 ulp_status, ulp_word4, 2976 tmo, vport->num_disc_nodes); 2977 2978 if (lpfc_els_chk_latt(vport)) { 2979 skip_recovery = 1; 2980 goto out; 2981 } 2982 2983 /* The LOGO will not be retried on failure. A LOGO was 2984 * issued to the remote rport and a ACC or RJT or no Answer are 2985 * all acceptable. Note the failure and move forward with 2986 * discovery. The PLOGI will retry. 2987 */ 2988 if (ulp_status) { 2989 /* LOGO failed */ 2990 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2991 "2756 LOGO failure, No Retry DID:%06X " 2992 "Status:x%x/x%x\n", 2993 ndlp->nlp_DID, ulp_status, 2994 ulp_word4); 2995 2996 if (lpfc_error_lost_link(ulp_status, ulp_word4)) { 2997 skip_recovery = 1; 2998 goto out; 2999 } 3000 } 3001 3002 /* Call state machine. This will unregister the rpi if needed. */ 3003 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3004 3005 /* The driver sets this flag for an NPIV instance that doesn't want to 3006 * log into the remote port. 3007 */ 3008 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3009 spin_lock_irq(&ndlp->lock); 3010 if (phba->sli_rev == LPFC_SLI_REV4) 3011 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3012 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3013 spin_unlock_irq(&ndlp->lock); 3014 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3015 NLP_EVT_DEVICE_RM); 3016 lpfc_els_free_iocb(phba, cmdiocb); 3017 lpfc_nlp_put(ndlp); 3018 3019 /* Presume the node was released. */ 3020 return; 3021 } 3022 3023 out: 3024 /* Driver is done with the IO. */ 3025 lpfc_els_free_iocb(phba, cmdiocb); 3026 lpfc_nlp_put(ndlp); 3027 3028 /* At this point, the LOGO processing is complete. NOTE: For a 3029 * pt2pt topology, we are assuming the NPortID will only change 3030 * on link up processing. For a LOGO / PLOGI initiated by the 3031 * Initiator, we are assuming the NPortID is not going to change. 3032 */ 3033 3034 if (wake_up_waiter && ndlp->logo_waitq) 3035 wake_up(ndlp->logo_waitq); 3036 /* 3037 * If the node is a target, the handling attempts to recover the port. 3038 * For any other port type, the rpi is unregistered as an implicit 3039 * LOGO. 3040 */ 3041 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3042 skip_recovery == 0) { 3043 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3044 spin_lock_irqsave(&ndlp->lock, flags); 3045 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3046 spin_unlock_irqrestore(&ndlp->lock, flags); 3047 3048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3049 "3187 LOGO completes to NPort x%x: Start " 3050 "Recovery Data: x%x x%x x%x x%x\n", 3051 ndlp->nlp_DID, ulp_status, 3052 ulp_word4, tmo, 3053 vport->num_disc_nodes); 3054 lpfc_disc_start(vport); 3055 return; 3056 } 3057 3058 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3059 * driver sends a LOGO to the rport to cleanup. For fabric and 3060 * initiator ports cleanup the node as long as it the node is not 3061 * register with the transport. 3062 */ 3063 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3064 spin_lock_irq(&ndlp->lock); 3065 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3066 spin_unlock_irq(&ndlp->lock); 3067 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3068 NLP_EVT_DEVICE_RM); 3069 } 3070 } 3071 3072 /** 3073 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3074 * @vport: pointer to a virtual N_Port data structure. 3075 * @ndlp: pointer to a node-list data structure. 3076 * @retry: number of retries to the command IOCB. 3077 * 3078 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3079 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3080 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3081 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3082 * 3083 * Note that the ndlp reference count will be incremented by 1 for holding the 3084 * ndlp and the reference to ndlp will be stored into the context1 field of 3085 * the IOCB for the completion callback function to the LOGO ELS command. 3086 * 3087 * Callers of this routine are expected to unregister the RPI first 3088 * 3089 * Return code 3090 * 0 - successfully issued logo 3091 * 1 - failed to issue logo 3092 **/ 3093 int 3094 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3095 uint8_t retry) 3096 { 3097 struct lpfc_hba *phba = vport->phba; 3098 struct lpfc_iocbq *elsiocb; 3099 uint8_t *pcmd; 3100 uint16_t cmdsize; 3101 int rc; 3102 3103 spin_lock_irq(&ndlp->lock); 3104 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3105 spin_unlock_irq(&ndlp->lock); 3106 return 0; 3107 } 3108 spin_unlock_irq(&ndlp->lock); 3109 3110 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3112 ndlp->nlp_DID, ELS_CMD_LOGO); 3113 if (!elsiocb) 3114 return 1; 3115 3116 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3117 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3118 pcmd += sizeof(uint32_t); 3119 3120 /* Fill in LOGO payload */ 3121 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3122 pcmd += sizeof(uint32_t); 3123 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3124 3125 phba->fc_stat.elsXmitLOGO++; 3126 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3127 spin_lock_irq(&ndlp->lock); 3128 ndlp->nlp_flag |= NLP_LOGO_SND; 3129 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3130 spin_unlock_irq(&ndlp->lock); 3131 elsiocb->context1 = lpfc_nlp_get(ndlp); 3132 if (!elsiocb->context1) { 3133 lpfc_els_free_iocb(phba, elsiocb); 3134 goto err; 3135 } 3136 3137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3138 "Issue LOGO: did:x%x refcnt %d", 3139 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3140 3141 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3142 if (rc == IOCB_ERROR) { 3143 lpfc_els_free_iocb(phba, elsiocb); 3144 lpfc_nlp_put(ndlp); 3145 goto err; 3146 } 3147 3148 spin_lock_irq(&ndlp->lock); 3149 ndlp->nlp_prev_state = ndlp->nlp_state; 3150 spin_unlock_irq(&ndlp->lock); 3151 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3152 return 0; 3153 3154 err: 3155 spin_lock_irq(&ndlp->lock); 3156 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3157 spin_unlock_irq(&ndlp->lock); 3158 return 1; 3159 } 3160 3161 /** 3162 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3163 * @phba: pointer to lpfc hba data structure. 3164 * @cmdiocb: pointer to lpfc command iocb data structure. 3165 * @rspiocb: pointer to lpfc response iocb data structure. 3166 * 3167 * This routine is a generic completion callback function for ELS commands. 3168 * Specifically, it is the callback function which does not need to perform 3169 * any command specific operations. It is currently used by the ELS command 3170 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3171 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3172 * Other than certain debug loggings, this callback function simply invokes the 3173 * lpfc_els_chk_latt() routine to check whether link went down during the 3174 * discovery process. 3175 **/ 3176 static void 3177 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3178 struct lpfc_iocbq *rspiocb) 3179 { 3180 struct lpfc_vport *vport = cmdiocb->vport; 3181 struct lpfc_nodelist *free_ndlp; 3182 IOCB_t *irsp; 3183 u32 ulp_status, ulp_word4, tmo, did, iotag; 3184 3185 ulp_status = get_job_ulpstatus(phba, rspiocb); 3186 ulp_word4 = get_job_word4(phba, rspiocb); 3187 did = get_job_els_rsp64_did(phba, cmdiocb); 3188 3189 if (phba->sli_rev == LPFC_SLI_REV4) { 3190 tmo = get_wqe_tmo(cmdiocb); 3191 iotag = get_wqe_reqtag(cmdiocb); 3192 } else { 3193 irsp = &rspiocb->iocb; 3194 tmo = irsp->ulpTimeout; 3195 iotag = irsp->ulpIoTag; 3196 } 3197 3198 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3199 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3200 ulp_status, ulp_word4, did); 3201 3202 /* ELS cmd tag <ulpIoTag> completes */ 3203 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3204 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3205 iotag, ulp_status, ulp_word4, tmo); 3206 3207 /* Check to see if link went down during discovery */ 3208 lpfc_els_chk_latt(vport); 3209 3210 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3211 3212 lpfc_els_free_iocb(phba, cmdiocb); 3213 lpfc_nlp_put(free_ndlp); 3214 } 3215 3216 /** 3217 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3218 * @vport: pointer to lpfc_vport data structure. 3219 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3220 * 3221 * This routine registers the rpi assigned to the fabric controller 3222 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3223 * state triggering a registration with the SCSI transport. 3224 * 3225 * This routine is single out because the fabric controller node 3226 * does not receive a PLOGI. This routine is consumed by the 3227 * SCR and RDF ELS commands. Callers are expected to qualify 3228 * with SLI4 first. 3229 **/ 3230 static int 3231 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3232 { 3233 int rc = 0; 3234 struct lpfc_hba *phba = vport->phba; 3235 struct lpfc_nodelist *ns_ndlp; 3236 LPFC_MBOXQ_t *mbox; 3237 struct lpfc_dmabuf *mp; 3238 3239 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3240 return rc; 3241 3242 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3243 if (!ns_ndlp) 3244 return -ENODEV; 3245 3246 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3247 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3248 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3249 ns_ndlp->nlp_state); 3250 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3251 return -ENODEV; 3252 3253 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3254 if (!mbox) { 3255 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3256 "0936 %s: no memory for reg_login " 3257 "Data: x%x x%x x%x x%x\n", __func__, 3258 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3259 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3260 return -ENOMEM; 3261 } 3262 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3263 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3264 if (rc) { 3265 rc = -EACCES; 3266 goto out; 3267 } 3268 3269 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3270 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3271 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3272 if (!mbox->ctx_ndlp) { 3273 rc = -ENOMEM; 3274 goto out_mem; 3275 } 3276 3277 mbox->vport = vport; 3278 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3279 if (rc == MBX_NOT_FINISHED) { 3280 rc = -ENODEV; 3281 lpfc_nlp_put(fc_ndlp); 3282 goto out_mem; 3283 } 3284 /* Success path. Exit. */ 3285 lpfc_nlp_set_state(vport, fc_ndlp, 3286 NLP_STE_REG_LOGIN_ISSUE); 3287 return 0; 3288 3289 out_mem: 3290 fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3291 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 3292 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3293 kfree(mp); 3294 3295 out: 3296 mempool_free(mbox, phba->mbox_mem_pool); 3297 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3298 "0938 %s: failed to format reg_login " 3299 "Data: x%x x%x x%x x%x\n", __func__, 3300 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3301 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3302 return rc; 3303 } 3304 3305 /** 3306 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3307 * @phba: pointer to lpfc hba data structure. 3308 * @cmdiocb: pointer to lpfc command iocb data structure. 3309 * @rspiocb: pointer to lpfc response iocb data structure. 3310 * 3311 * This routine is a generic completion callback function for Discovery ELS cmd. 3312 * Currently used by the ELS command issuing routines for the ELS State Change 3313 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3314 * These commands will be retried once only for ELS timeout errors. 3315 **/ 3316 static void 3317 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3318 struct lpfc_iocbq *rspiocb) 3319 { 3320 struct lpfc_vport *vport = cmdiocb->vport; 3321 IOCB_t *irsp; 3322 struct lpfc_els_rdf_rsp *prdf; 3323 struct lpfc_dmabuf *pcmd, *prsp; 3324 u32 *pdata; 3325 u32 cmd; 3326 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3327 u32 ulp_status, ulp_word4, tmo, did, iotag; 3328 3329 ulp_status = get_job_ulpstatus(phba, rspiocb); 3330 ulp_word4 = get_job_word4(phba, rspiocb); 3331 did = get_job_els_rsp64_did(phba, cmdiocb); 3332 3333 if (phba->sli_rev == LPFC_SLI_REV4) { 3334 tmo = get_wqe_tmo(cmdiocb); 3335 iotag = get_wqe_reqtag(cmdiocb); 3336 } else { 3337 irsp = &rspiocb->iocb; 3338 tmo = irsp->ulpTimeout; 3339 iotag = irsp->ulpIoTag; 3340 } 3341 3342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3343 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3344 ulp_status, ulp_word4, did); 3345 3346 /* ELS cmd tag <ulpIoTag> completes */ 3347 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3348 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3349 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3350 3351 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3352 if (!pcmd) 3353 goto out; 3354 3355 pdata = (u32 *)pcmd->virt; 3356 if (!pdata) 3357 goto out; 3358 cmd = *pdata; 3359 3360 /* Only 1 retry for ELS Timeout only */ 3361 if (ulp_status == IOSTAT_LOCAL_REJECT && 3362 ((ulp_word4 & IOERR_PARAM_MASK) == 3363 IOERR_SEQUENCE_TIMEOUT)) { 3364 cmdiocb->retry++; 3365 if (cmdiocb->retry <= 1) { 3366 switch (cmd) { 3367 case ELS_CMD_SCR: 3368 lpfc_issue_els_scr(vport, cmdiocb->retry); 3369 break; 3370 case ELS_CMD_EDC: 3371 lpfc_issue_els_edc(vport, cmdiocb->retry); 3372 break; 3373 case ELS_CMD_RDF: 3374 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3375 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3376 break; 3377 } 3378 goto out; 3379 } 3380 phba->fc_stat.elsRetryExceeded++; 3381 } 3382 if (cmd == ELS_CMD_EDC) { 3383 /* must be called before checking uplStatus and returning */ 3384 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3385 return; 3386 } 3387 if (ulp_status) { 3388 /* ELS discovery cmd completes with error */ 3389 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3390 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3391 ulp_status, ulp_word4); 3392 goto out; 3393 } 3394 3395 /* The RDF response doesn't have any impact on the running driver 3396 * but the notification descriptors are dumped here for support. 3397 */ 3398 if (cmd == ELS_CMD_RDF) { 3399 int i; 3400 3401 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3402 if (!prsp) 3403 goto out; 3404 3405 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3406 if (!prdf) 3407 goto out; 3408 3409 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3410 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3411 lpfc_printf_vlog(vport, KERN_INFO, 3412 LOG_ELS | LOG_CGN_MGMT, 3413 "4677 Fabric RDF Notification Grant " 3414 "Data: 0x%08x Reg: %x %x\n", 3415 be32_to_cpu( 3416 prdf->reg_d1.desc_tags[i]), 3417 phba->cgn_reg_signal, 3418 phba->cgn_reg_fpin); 3419 } 3420 3421 out: 3422 /* Check to see if link went down during discovery */ 3423 lpfc_els_chk_latt(vport); 3424 lpfc_els_free_iocb(phba, cmdiocb); 3425 lpfc_nlp_put(ndlp); 3426 return; 3427 } 3428 3429 /** 3430 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3431 * @vport: pointer to a host virtual N_Port data structure. 3432 * @retry: retry counter for the command IOCB. 3433 * 3434 * This routine issues a State Change Request (SCR) to a fabric node 3435 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3436 * first search the @vport node list to find the matching ndlp. If no such 3437 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3438 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3439 * routine is invoked to send the SCR IOCB. 3440 * 3441 * Note that the ndlp reference count will be incremented by 1 for holding the 3442 * ndlp and the reference to ndlp will be stored into the context1 field of 3443 * the IOCB for the completion callback function to the SCR ELS command. 3444 * 3445 * Return code 3446 * 0 - Successfully issued scr command 3447 * 1 - Failed to issue scr command 3448 **/ 3449 int 3450 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3451 { 3452 int rc = 0; 3453 struct lpfc_hba *phba = vport->phba; 3454 struct lpfc_iocbq *elsiocb; 3455 uint8_t *pcmd; 3456 uint16_t cmdsize; 3457 struct lpfc_nodelist *ndlp; 3458 3459 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3460 3461 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3462 if (!ndlp) { 3463 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3464 if (!ndlp) 3465 return 1; 3466 lpfc_enqueue_node(vport, ndlp); 3467 } 3468 3469 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3470 ndlp->nlp_DID, ELS_CMD_SCR); 3471 if (!elsiocb) 3472 return 1; 3473 3474 if (phba->sli_rev == LPFC_SLI_REV4) { 3475 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3476 if (rc) { 3477 lpfc_els_free_iocb(phba, elsiocb); 3478 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3479 "0937 %s: Failed to reg fc node, rc %d\n", 3480 __func__, rc); 3481 return 1; 3482 } 3483 } 3484 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3485 3486 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3487 pcmd += sizeof(uint32_t); 3488 3489 /* For SCR, remainder of payload is SCR parameter page */ 3490 memset(pcmd, 0, sizeof(SCR)); 3491 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3492 3493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3494 "Issue SCR: did:x%x", 3495 ndlp->nlp_DID, 0, 0); 3496 3497 phba->fc_stat.elsXmitSCR++; 3498 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3499 elsiocb->context1 = lpfc_nlp_get(ndlp); 3500 if (!elsiocb->context1) { 3501 lpfc_els_free_iocb(phba, elsiocb); 3502 return 1; 3503 } 3504 3505 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3506 "Issue SCR: did:x%x refcnt %d", 3507 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3508 3509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3510 if (rc == IOCB_ERROR) { 3511 lpfc_els_free_iocb(phba, elsiocb); 3512 lpfc_nlp_put(ndlp); 3513 return 1; 3514 } 3515 3516 return 0; 3517 } 3518 3519 /** 3520 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3521 * or the other nport (pt2pt). 3522 * @vport: pointer to a host virtual N_Port data structure. 3523 * @retry: number of retries to the command IOCB. 3524 * 3525 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3526 * when connected to a fabric, or to the remote port when connected 3527 * in point-to-point mode. When sent to the Fabric Controller, it will 3528 * replay the RSCN to registered recipients. 3529 * 3530 * Note that the ndlp reference count will be incremented by 1 for holding the 3531 * ndlp and the reference to ndlp will be stored into the context1 field of 3532 * the IOCB for the completion callback function to the RSCN ELS command. 3533 * 3534 * Return code 3535 * 0 - Successfully issued RSCN command 3536 * 1 - Failed to issue RSCN command 3537 **/ 3538 int 3539 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3540 { 3541 int rc = 0; 3542 struct lpfc_hba *phba = vport->phba; 3543 struct lpfc_iocbq *elsiocb; 3544 struct lpfc_nodelist *ndlp; 3545 struct { 3546 struct fc_els_rscn rscn; 3547 struct fc_els_rscn_page portid; 3548 } *event; 3549 uint32_t nportid; 3550 uint16_t cmdsize = sizeof(*event); 3551 3552 /* Not supported for private loop */ 3553 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3554 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3555 return 1; 3556 3557 if (vport->fc_flag & FC_PT2PT) { 3558 /* find any mapped nport - that would be the other nport */ 3559 ndlp = lpfc_findnode_mapped(vport); 3560 if (!ndlp) 3561 return 1; 3562 } else { 3563 nportid = FC_FID_FCTRL; 3564 /* find the fabric controller node */ 3565 ndlp = lpfc_findnode_did(vport, nportid); 3566 if (!ndlp) { 3567 /* if one didn't exist, make one */ 3568 ndlp = lpfc_nlp_init(vport, nportid); 3569 if (!ndlp) 3570 return 1; 3571 lpfc_enqueue_node(vport, ndlp); 3572 } 3573 } 3574 3575 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3576 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3577 3578 if (!elsiocb) 3579 return 1; 3580 3581 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3582 3583 event->rscn.rscn_cmd = ELS_RSCN; 3584 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3585 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3586 3587 nportid = vport->fc_myDID; 3588 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3589 event->portid.rscn_page_flags = 0; 3590 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3591 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3592 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3593 3594 phba->fc_stat.elsXmitRSCN++; 3595 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3596 elsiocb->context1 = lpfc_nlp_get(ndlp); 3597 if (!elsiocb->context1) { 3598 lpfc_els_free_iocb(phba, elsiocb); 3599 return 1; 3600 } 3601 3602 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3603 "Issue RSCN: did:x%x", 3604 ndlp->nlp_DID, 0, 0); 3605 3606 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3607 if (rc == IOCB_ERROR) { 3608 lpfc_els_free_iocb(phba, elsiocb); 3609 lpfc_nlp_put(ndlp); 3610 return 1; 3611 } 3612 3613 return 0; 3614 } 3615 3616 /** 3617 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3618 * @vport: pointer to a host virtual N_Port data structure. 3619 * @nportid: N_Port identifier to the remote node. 3620 * @retry: number of retries to the command IOCB. 3621 * 3622 * This routine issues a Fibre Channel Address Resolution Response 3623 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3624 * is passed into the function. It first search the @vport node list to find 3625 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3626 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3627 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3628 * 3629 * Note that the ndlp reference count will be incremented by 1 for holding the 3630 * ndlp and the reference to ndlp will be stored into the context1 field of 3631 * the IOCB for the completion callback function to the FARPR ELS command. 3632 * 3633 * Return code 3634 * 0 - Successfully issued farpr command 3635 * 1 - Failed to issue farpr command 3636 **/ 3637 static int 3638 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3639 { 3640 int rc = 0; 3641 struct lpfc_hba *phba = vport->phba; 3642 struct lpfc_iocbq *elsiocb; 3643 FARP *fp; 3644 uint8_t *pcmd; 3645 uint32_t *lp; 3646 uint16_t cmdsize; 3647 struct lpfc_nodelist *ondlp; 3648 struct lpfc_nodelist *ndlp; 3649 3650 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3651 3652 ndlp = lpfc_findnode_did(vport, nportid); 3653 if (!ndlp) { 3654 ndlp = lpfc_nlp_init(vport, nportid); 3655 if (!ndlp) 3656 return 1; 3657 lpfc_enqueue_node(vport, ndlp); 3658 } 3659 3660 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3661 ndlp->nlp_DID, ELS_CMD_FARPR); 3662 if (!elsiocb) 3663 return 1; 3664 3665 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3666 3667 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3668 pcmd += sizeof(uint32_t); 3669 3670 /* Fill in FARPR payload */ 3671 fp = (FARP *) (pcmd); 3672 memset(fp, 0, sizeof(FARP)); 3673 lp = (uint32_t *) pcmd; 3674 *lp++ = be32_to_cpu(nportid); 3675 *lp++ = be32_to_cpu(vport->fc_myDID); 3676 fp->Rflags = 0; 3677 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3678 3679 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3680 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3681 ondlp = lpfc_findnode_did(vport, nportid); 3682 if (ondlp) { 3683 memcpy(&fp->OportName, &ondlp->nlp_portname, 3684 sizeof(struct lpfc_name)); 3685 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3686 sizeof(struct lpfc_name)); 3687 } 3688 3689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3690 "Issue FARPR: did:x%x", 3691 ndlp->nlp_DID, 0, 0); 3692 3693 phba->fc_stat.elsXmitFARPR++; 3694 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3695 elsiocb->context1 = lpfc_nlp_get(ndlp); 3696 if (!elsiocb->context1) { 3697 lpfc_els_free_iocb(phba, elsiocb); 3698 return 1; 3699 } 3700 3701 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3702 if (rc == IOCB_ERROR) { 3703 /* The additional lpfc_nlp_put will cause the following 3704 * lpfc_els_free_iocb routine to trigger the release of 3705 * the node. 3706 */ 3707 lpfc_els_free_iocb(phba, elsiocb); 3708 lpfc_nlp_put(ndlp); 3709 return 1; 3710 } 3711 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3712 * trigger the release of the node. 3713 */ 3714 /* Don't release reference count as RDF is likely outstanding */ 3715 return 0; 3716 } 3717 3718 /** 3719 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3720 * @vport: pointer to a host virtual N_Port data structure. 3721 * @retry: retry counter for the command IOCB. 3722 * 3723 * This routine issues an ELS RDF to the Fabric Controller to register 3724 * for diagnostic functions. 3725 * 3726 * Note that the ndlp reference count will be incremented by 1 for holding the 3727 * ndlp and the reference to ndlp will be stored into the context1 field of 3728 * the IOCB for the completion callback function to the RDF ELS command. 3729 * 3730 * Return code 3731 * 0 - Successfully issued rdf command 3732 * 1 - Failed to issue rdf command 3733 **/ 3734 int 3735 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3736 { 3737 struct lpfc_hba *phba = vport->phba; 3738 struct lpfc_iocbq *elsiocb; 3739 struct lpfc_els_rdf_req *prdf; 3740 struct lpfc_nodelist *ndlp; 3741 uint16_t cmdsize; 3742 int rc; 3743 3744 cmdsize = sizeof(*prdf); 3745 3746 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3747 if (!ndlp) { 3748 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3749 if (!ndlp) 3750 return -ENODEV; 3751 lpfc_enqueue_node(vport, ndlp); 3752 } 3753 3754 /* RDF ELS is not required on an NPIV VN_Port. */ 3755 if (vport->port_type == LPFC_NPIV_PORT) 3756 return -EACCES; 3757 3758 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3759 ndlp->nlp_DID, ELS_CMD_RDF); 3760 if (!elsiocb) 3761 return -ENOMEM; 3762 3763 /* Configure the payload for the supported FPIN events. */ 3764 prdf = (struct lpfc_els_rdf_req *) 3765 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3766 memset(prdf, 0, cmdsize); 3767 prdf->rdf.fpin_cmd = ELS_RDF; 3768 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3769 sizeof(struct fc_els_rdf)); 3770 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3771 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3772 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3773 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3774 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3775 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3776 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3777 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3778 3779 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3780 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3781 ndlp->nlp_DID, phba->cgn_reg_signal, 3782 phba->cgn_reg_fpin); 3783 3784 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3785 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3786 elsiocb->context1 = lpfc_nlp_get(ndlp); 3787 if (!elsiocb->context1) { 3788 lpfc_els_free_iocb(phba, elsiocb); 3789 return -EIO; 3790 } 3791 3792 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3793 "Issue RDF: did:x%x refcnt %d", 3794 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3795 3796 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3797 if (rc == IOCB_ERROR) { 3798 lpfc_els_free_iocb(phba, elsiocb); 3799 lpfc_nlp_put(ndlp); 3800 return -EIO; 3801 } 3802 return 0; 3803 } 3804 3805 /** 3806 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3807 * @vport: pointer to a host virtual N_Port data structure. 3808 * @cmdiocb: pointer to lpfc command iocb data structure. 3809 * @ndlp: pointer to a node-list data structure. 3810 * 3811 * A received RDF implies a possible change to fabric supported diagnostic 3812 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3813 * RDF request to reregister for supported diagnostic functions. 3814 * 3815 * Return code 3816 * 0 - Success 3817 * -EIO - Failed to process received RDF 3818 **/ 3819 static int 3820 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3821 struct lpfc_nodelist *ndlp) 3822 { 3823 /* Send LS_ACC */ 3824 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3825 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3826 "1623 Failed to RDF_ACC from x%x for x%x\n", 3827 ndlp->nlp_DID, vport->fc_myDID); 3828 return -EIO; 3829 } 3830 3831 /* Issue new RDF for reregistering */ 3832 if (lpfc_issue_els_rdf(vport, 0)) { 3833 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3834 "2623 Failed to re register RDF for x%x\n", 3835 vport->fc_myDID); 3836 return -EIO; 3837 } 3838 3839 return 0; 3840 } 3841 3842 /** 3843 * lpfc_least_capable_settings - helper function for EDC rsp processing 3844 * @phba: pointer to lpfc hba data structure. 3845 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3846 * 3847 * This helper routine determines the least capable setting for 3848 * congestion signals, signal freq, including scale, from the 3849 * congestion detection descriptor in the EDC rsp. The routine 3850 * sets @phba values in preparation for a set_featues mailbox. 3851 **/ 3852 static void 3853 lpfc_least_capable_settings(struct lpfc_hba *phba, 3854 struct fc_diag_cg_sig_desc *pcgd) 3855 { 3856 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3857 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3858 struct lpfc_cgn_info *cp; 3859 u32 crc; 3860 u16 sig_freq; 3861 3862 /* Get rsp signal and frequency capabilities. */ 3863 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3864 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3865 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3866 3867 /* If the Fport does not support signals. Set FPIN only */ 3868 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3869 goto out_no_support; 3870 3871 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3872 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3873 * to milliSeconds. 3874 */ 3875 switch (rsp_sig_freq_scale) { 3876 case EDC_CG_SIGFREQ_SEC: 3877 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3878 break; 3879 case EDC_CG_SIGFREQ_MSEC: 3880 rsp_sig_freq_cyc = 1; 3881 break; 3882 default: 3883 goto out_no_support; 3884 } 3885 3886 /* Convenient shorthand. */ 3887 drv_sig_cap = phba->cgn_reg_signal; 3888 3889 /* Choose the least capable frequency. */ 3890 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3891 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3892 3893 /* Should be some common signals support. Settle on least capable 3894 * signal and adjust FPIN values. Initialize defaults to ease the 3895 * decision. 3896 */ 3897 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3898 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3899 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3900 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3901 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3902 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3903 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3904 } 3905 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3906 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3907 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3908 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3909 } 3910 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3911 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3912 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3913 } 3914 } 3915 3916 if (!phba->cgn_i) 3917 return; 3918 3919 /* Update signal frequency in congestion info buffer */ 3920 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 3921 3922 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 3923 * are received by the HBA 3924 */ 3925 sig_freq = phba->cgn_sig_freq; 3926 3927 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 3928 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3929 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 3930 cp->cgn_alarm_freq = cpu_to_le16(sig_freq); 3931 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3932 } 3933 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 3934 cp->cgn_info_crc = cpu_to_le32(crc); 3935 return; 3936 3937 out_no_support: 3938 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3939 phba->cgn_sig_freq = 0; 3940 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3941 } 3942 3943 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3944 FC_LS_TLV_DTAG_INIT); 3945 3946 /** 3947 * lpfc_cmpl_els_edc - Completion callback function for EDC 3948 * @phba: pointer to lpfc hba data structure. 3949 * @cmdiocb: pointer to lpfc command iocb data structure. 3950 * @rspiocb: pointer to lpfc response iocb data structure. 3951 * 3952 * This routine is the completion callback function for issuing the Exchange 3953 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3954 * notify the FPort of its Congestion and Link Fault capabilities. This 3955 * routine parses the FPort's response and decides on the least common 3956 * values applicable to both FPort and NPort for Warnings and Alarms that 3957 * are communicated via hardware signals. 3958 **/ 3959 static void 3960 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3961 struct lpfc_iocbq *rspiocb) 3962 { 3963 IOCB_t *irsp_iocb; 3964 struct fc_els_edc_resp *edc_rsp; 3965 struct fc_tlv_desc *tlv; 3966 struct fc_diag_cg_sig_desc *pcgd; 3967 struct fc_diag_lnkflt_desc *plnkflt; 3968 struct lpfc_dmabuf *pcmd, *prsp; 3969 const char *dtag_nm; 3970 u32 *pdata, dtag; 3971 int desc_cnt = 0, bytes_remain; 3972 bool rcv_cap_desc = false; 3973 struct lpfc_nodelist *ndlp; 3974 u32 ulp_status, ulp_word4, tmo, did, iotag; 3975 3976 ndlp = cmdiocb->context1; 3977 3978 ulp_status = get_job_ulpstatus(phba, rspiocb); 3979 ulp_word4 = get_job_word4(phba, rspiocb); 3980 did = get_job_els_rsp64_did(phba, rspiocb); 3981 3982 if (phba->sli_rev == LPFC_SLI_REV4) { 3983 tmo = get_wqe_tmo(rspiocb); 3984 iotag = get_wqe_reqtag(rspiocb); 3985 } else { 3986 irsp_iocb = &rspiocb->iocb; 3987 tmo = irsp_iocb->ulpTimeout; 3988 iotag = irsp_iocb->ulpIoTag; 3989 } 3990 3991 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3992 "EDC cmpl: status:x%x/x%x did:x%x", 3993 ulp_status, ulp_word4, did); 3994 3995 /* ELS cmd tag <ulpIoTag> completes */ 3996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3997 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3998 iotag, ulp_status, ulp_word4, tmo); 3999 4000 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 4001 if (!pcmd) 4002 goto out; 4003 4004 pdata = (u32 *)pcmd->virt; 4005 if (!pdata) 4006 goto out; 4007 4008 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4009 if (ulp_status) 4010 goto out; 4011 4012 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4013 if (!prsp) 4014 goto out; 4015 4016 edc_rsp = prsp->virt; 4017 if (!edc_rsp) 4018 goto out; 4019 4020 /* ELS cmd tag <ulpIoTag> completes */ 4021 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4022 "4676 Fabric EDC Rsp: " 4023 "0x%02x, 0x%08x\n", 4024 edc_rsp->acc_hdr.la_cmd, 4025 be32_to_cpu(edc_rsp->desc_list_len)); 4026 4027 /* 4028 * Payload length in bytes is the response descriptor list 4029 * length minus the 12 bytes of Link Service Request 4030 * Information descriptor in the reply. 4031 */ 4032 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4033 sizeof(struct fc_els_lsri_desc); 4034 if (bytes_remain <= 0) 4035 goto out; 4036 4037 tlv = edc_rsp->desc; 4038 4039 /* 4040 * cycle through EDC diagnostic descriptors to find the 4041 * congestion signaling capability descriptor 4042 */ 4043 while (bytes_remain) { 4044 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4045 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4046 "6461 Truncated TLV hdr on " 4047 "Diagnostic descriptor[%d]\n", 4048 desc_cnt); 4049 goto out; 4050 } 4051 4052 dtag = be32_to_cpu(tlv->desc_tag); 4053 switch (dtag) { 4054 case ELS_DTAG_LNK_FAULT_CAP: 4055 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4056 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4057 sizeof(struct fc_diag_lnkflt_desc)) { 4058 lpfc_printf_log( 4059 phba, KERN_WARNING, LOG_CGN_MGMT, 4060 "6462 Truncated Link Fault Diagnostic " 4061 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4062 desc_cnt, bytes_remain, 4063 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4064 sizeof(struct fc_diag_cg_sig_desc)); 4065 goto out; 4066 } 4067 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4068 lpfc_printf_log( 4069 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4070 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4071 "0x%08x 0x%08x 0x%08x\n", 4072 be32_to_cpu(plnkflt->desc_tag), 4073 be32_to_cpu(plnkflt->desc_len), 4074 be32_to_cpu( 4075 plnkflt->degrade_activate_threshold), 4076 be32_to_cpu( 4077 plnkflt->degrade_deactivate_threshold), 4078 be32_to_cpu(plnkflt->fec_degrade_interval)); 4079 break; 4080 case ELS_DTAG_CG_SIGNAL_CAP: 4081 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4082 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4083 sizeof(struct fc_diag_cg_sig_desc)) { 4084 lpfc_printf_log( 4085 phba, KERN_WARNING, LOG_CGN_MGMT, 4086 "6463 Truncated Cgn Signal Diagnostic " 4087 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4088 desc_cnt, bytes_remain, 4089 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4090 sizeof(struct fc_diag_cg_sig_desc)); 4091 goto out; 4092 } 4093 4094 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4095 lpfc_printf_log( 4096 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4097 "4616 CGN Desc Data: 0x%08x 0x%08x " 4098 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4099 be32_to_cpu(pcgd->desc_tag), 4100 be32_to_cpu(pcgd->desc_len), 4101 be32_to_cpu(pcgd->xmt_signal_capability), 4102 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4103 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4104 be32_to_cpu(pcgd->rcv_signal_capability), 4105 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4106 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4107 4108 /* Compare driver and Fport capabilities and choose 4109 * least common. 4110 */ 4111 lpfc_least_capable_settings(phba, pcgd); 4112 rcv_cap_desc = true; 4113 break; 4114 default: 4115 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4116 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4117 "4919 unknown Diagnostic " 4118 "Descriptor[%d]: tag x%x (%s)\n", 4119 desc_cnt, dtag, dtag_nm); 4120 } 4121 4122 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4123 tlv = fc_tlv_next_desc(tlv); 4124 desc_cnt++; 4125 } 4126 4127 out: 4128 if (!rcv_cap_desc) { 4129 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4130 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4131 phba->cgn_sig_freq = 0; 4132 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4133 "4202 EDC rsp error - sending RDF " 4134 "for FPIN only.\n"); 4135 } 4136 4137 lpfc_config_cgn_signal(phba); 4138 4139 /* Check to see if link went down during discovery */ 4140 lpfc_els_chk_latt(phba->pport); 4141 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4142 "EDC Cmpl: did:x%x refcnt %d", 4143 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4144 lpfc_els_free_iocb(phba, cmdiocb); 4145 lpfc_nlp_put(ndlp); 4146 } 4147 4148 static void 4149 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4150 { 4151 /* We are assuming cgd was zero'ed before calling this routine */ 4152 4153 /* Configure the congestion detection capability */ 4154 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4155 4156 /* Descriptor len doesn't include the tag or len fields. */ 4157 cgd->desc_len = cpu_to_be32( 4158 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4159 4160 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4161 * xmt_signal_frequency.count already set to 0. 4162 * xmt_signal_frequency.units already set to 0. 4163 */ 4164 4165 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4166 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4167 * rcv_signal_frequency.count already set to 0. 4168 * rcv_signal_frequency.units already set to 0. 4169 */ 4170 phba->cgn_sig_freq = 0; 4171 return; 4172 } 4173 switch (phba->cgn_reg_signal) { 4174 case EDC_CG_SIG_WARN_ONLY: 4175 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4176 break; 4177 case EDC_CG_SIG_WARN_ALARM: 4178 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4179 break; 4180 default: 4181 /* rcv_signal_capability left 0 thus no support */ 4182 break; 4183 } 4184 4185 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4186 * the completion we settle on the higher frequency. 4187 */ 4188 cgd->rcv_signal_frequency.count = 4189 cpu_to_be16(lpfc_fabric_cgn_frequency); 4190 cgd->rcv_signal_frequency.units = 4191 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4192 } 4193 4194 /** 4195 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4196 * @vport: pointer to a host virtual N_Port data structure. 4197 * @retry: retry counter for the command iocb. 4198 * 4199 * This routine issues an ELS EDC to the F-Port Controller to communicate 4200 * this N_Port's support of hardware signals in its Congestion 4201 * Capabilities Descriptor. 4202 * 4203 * Note: This routine does not check if one or more signals are 4204 * set in the cgn_reg_signal parameter. The caller makes the 4205 * decision to enforce cgn_reg_signal as nonzero or zero depending 4206 * on the conditions. During Fabric requests, the driver 4207 * requires cgn_reg_signals to be nonzero. But a dynamic request 4208 * to set the congestion mode to OFF from Monitor or Manage 4209 * would correctly issue an EDC with no signals enabled to 4210 * turn off switch functionality and then update the FW. 4211 * 4212 * Return code 4213 * 0 - Successfully issued edc command 4214 * 1 - Failed to issue edc command 4215 **/ 4216 int 4217 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4218 { 4219 struct lpfc_hba *phba = vport->phba; 4220 struct lpfc_iocbq *elsiocb; 4221 struct lpfc_els_edc_req *edc_req; 4222 struct fc_diag_cg_sig_desc *cgn_desc; 4223 u16 cmdsize; 4224 struct lpfc_nodelist *ndlp; 4225 u8 *pcmd = NULL; 4226 u32 edc_req_size, cgn_desc_size; 4227 int rc; 4228 4229 if (vport->port_type == LPFC_NPIV_PORT) 4230 return -EACCES; 4231 4232 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4233 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4234 return -ENODEV; 4235 4236 /* If HBA doesn't support signals, drop into RDF */ 4237 if (!phba->cgn_init_reg_signal) 4238 goto try_rdf; 4239 4240 edc_req_size = sizeof(struct fc_els_edc); 4241 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4242 cmdsize = edc_req_size + cgn_desc_size; 4243 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4244 ndlp->nlp_DID, ELS_CMD_EDC); 4245 if (!elsiocb) 4246 goto try_rdf; 4247 4248 /* Configure the payload for the supported Diagnostics capabilities. */ 4249 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 4250 memset(pcmd, 0, cmdsize); 4251 edc_req = (struct lpfc_els_edc_req *)pcmd; 4252 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4253 edc_req->edc.edc_cmd = ELS_EDC; 4254 4255 cgn_desc = &edc_req->cgn_desc; 4256 4257 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4258 4259 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4260 4261 lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, 4262 "4623 Xmit EDC to remote " 4263 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4264 ndlp->nlp_DID, phba->cgn_reg_signal, 4265 phba->cgn_reg_fpin); 4266 4267 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4268 elsiocb->context1 = lpfc_nlp_get(ndlp); 4269 if (!elsiocb->context1) { 4270 lpfc_els_free_iocb(phba, elsiocb); 4271 return -EIO; 4272 } 4273 4274 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4275 "Issue EDC: did:x%x refcnt %d", 4276 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4277 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4278 if (rc == IOCB_ERROR) { 4279 /* The additional lpfc_nlp_put will cause the following 4280 * lpfc_els_free_iocb routine to trigger the rlease of 4281 * the node. 4282 */ 4283 lpfc_els_free_iocb(phba, elsiocb); 4284 lpfc_nlp_put(ndlp); 4285 goto try_rdf; 4286 } 4287 return 0; 4288 try_rdf: 4289 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4290 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4291 rc = lpfc_issue_els_rdf(vport, 0); 4292 return rc; 4293 } 4294 4295 /** 4296 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4297 * @vport: pointer to a host virtual N_Port data structure. 4298 * @nlp: pointer to a node-list data structure. 4299 * 4300 * This routine cancels the timer with a delayed IOCB-command retry for 4301 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4302 * removes the ELS retry event if it presents. In addition, if the 4303 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4304 * commands are sent for the @vport's nodes that require issuing discovery 4305 * ADISC. 4306 **/ 4307 void 4308 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4309 { 4310 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4311 struct lpfc_work_evt *evtp; 4312 4313 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4314 return; 4315 spin_lock_irq(&nlp->lock); 4316 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4317 spin_unlock_irq(&nlp->lock); 4318 del_timer_sync(&nlp->nlp_delayfunc); 4319 nlp->nlp_last_elscmd = 0; 4320 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4321 list_del_init(&nlp->els_retry_evt.evt_listp); 4322 /* Decrement nlp reference count held for the delayed retry */ 4323 evtp = &nlp->els_retry_evt; 4324 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4325 } 4326 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4327 spin_lock_irq(&nlp->lock); 4328 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4329 spin_unlock_irq(&nlp->lock); 4330 if (vport->num_disc_nodes) { 4331 if (vport->port_state < LPFC_VPORT_READY) { 4332 /* Check if there are more ADISCs to be sent */ 4333 lpfc_more_adisc(vport); 4334 } else { 4335 /* Check if there are more PLOGIs to be sent */ 4336 lpfc_more_plogi(vport); 4337 if (vport->num_disc_nodes == 0) { 4338 spin_lock_irq(shost->host_lock); 4339 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4340 spin_unlock_irq(shost->host_lock); 4341 lpfc_can_disctmo(vport); 4342 lpfc_end_rscn(vport); 4343 } 4344 } 4345 } 4346 } 4347 return; 4348 } 4349 4350 /** 4351 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4352 * @t: pointer to the timer function associated data (ndlp). 4353 * 4354 * This routine is invoked by the ndlp delayed-function timer to check 4355 * whether there is any pending ELS retry event(s) with the node. If not, it 4356 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4357 * adds the delayed events to the HBA work list and invokes the 4358 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4359 * event. Note that lpfc_nlp_get() is called before posting the event to 4360 * the work list to hold reference count of ndlp so that it guarantees the 4361 * reference to ndlp will still be available when the worker thread gets 4362 * to the event associated with the ndlp. 4363 **/ 4364 void 4365 lpfc_els_retry_delay(struct timer_list *t) 4366 { 4367 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4368 struct lpfc_vport *vport = ndlp->vport; 4369 struct lpfc_hba *phba = vport->phba; 4370 unsigned long flags; 4371 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4372 4373 spin_lock_irqsave(&phba->hbalock, flags); 4374 if (!list_empty(&evtp->evt_listp)) { 4375 spin_unlock_irqrestore(&phba->hbalock, flags); 4376 return; 4377 } 4378 4379 /* We need to hold the node by incrementing the reference 4380 * count until the queued work is done 4381 */ 4382 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4383 if (evtp->evt_arg1) { 4384 evtp->evt = LPFC_EVT_ELS_RETRY; 4385 list_add_tail(&evtp->evt_listp, &phba->work_list); 4386 lpfc_worker_wake_up(phba); 4387 } 4388 spin_unlock_irqrestore(&phba->hbalock, flags); 4389 return; 4390 } 4391 4392 /** 4393 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4394 * @ndlp: pointer to a node-list data structure. 4395 * 4396 * This routine is the worker-thread handler for processing the @ndlp delayed 4397 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4398 * the last ELS command from the associated ndlp and invokes the proper ELS 4399 * function according to the delayed ELS command to retry the command. 4400 **/ 4401 void 4402 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4403 { 4404 struct lpfc_vport *vport = ndlp->vport; 4405 uint32_t cmd, retry; 4406 4407 spin_lock_irq(&ndlp->lock); 4408 cmd = ndlp->nlp_last_elscmd; 4409 ndlp->nlp_last_elscmd = 0; 4410 4411 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4412 spin_unlock_irq(&ndlp->lock); 4413 return; 4414 } 4415 4416 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4417 spin_unlock_irq(&ndlp->lock); 4418 /* 4419 * If a discovery event readded nlp_delayfunc after timer 4420 * firing and before processing the timer, cancel the 4421 * nlp_delayfunc. 4422 */ 4423 del_timer_sync(&ndlp->nlp_delayfunc); 4424 retry = ndlp->nlp_retry; 4425 ndlp->nlp_retry = 0; 4426 4427 switch (cmd) { 4428 case ELS_CMD_FLOGI: 4429 lpfc_issue_els_flogi(vport, ndlp, retry); 4430 break; 4431 case ELS_CMD_PLOGI: 4432 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4433 ndlp->nlp_prev_state = ndlp->nlp_state; 4434 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4435 } 4436 break; 4437 case ELS_CMD_ADISC: 4438 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4439 ndlp->nlp_prev_state = ndlp->nlp_state; 4440 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4441 } 4442 break; 4443 case ELS_CMD_PRLI: 4444 case ELS_CMD_NVMEPRLI: 4445 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4446 ndlp->nlp_prev_state = ndlp->nlp_state; 4447 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4448 } 4449 break; 4450 case ELS_CMD_LOGO: 4451 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4452 ndlp->nlp_prev_state = ndlp->nlp_state; 4453 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4454 } 4455 break; 4456 case ELS_CMD_FDISC: 4457 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4458 lpfc_issue_els_fdisc(vport, ndlp, retry); 4459 break; 4460 } 4461 return; 4462 } 4463 4464 /** 4465 * lpfc_link_reset - Issue link reset 4466 * @vport: pointer to a virtual N_Port data structure. 4467 * 4468 * This routine performs link reset by sending INIT_LINK mailbox command. 4469 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4470 * INIT_LINK mailbox command. 4471 * 4472 * Return code 4473 * 0 - Link reset initiated successfully 4474 * 1 - Failed to initiate link reset 4475 **/ 4476 int 4477 lpfc_link_reset(struct lpfc_vport *vport) 4478 { 4479 struct lpfc_hba *phba = vport->phba; 4480 LPFC_MBOXQ_t *mbox; 4481 uint32_t control; 4482 int rc; 4483 4484 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4485 "2851 Attempt link reset\n"); 4486 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4487 if (!mbox) { 4488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4489 "2852 Failed to allocate mbox memory"); 4490 return 1; 4491 } 4492 4493 /* Enable Link attention interrupts */ 4494 if (phba->sli_rev <= LPFC_SLI_REV3) { 4495 spin_lock_irq(&phba->hbalock); 4496 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4497 control = readl(phba->HCregaddr); 4498 control |= HC_LAINT_ENA; 4499 writel(control, phba->HCregaddr); 4500 readl(phba->HCregaddr); /* flush */ 4501 spin_unlock_irq(&phba->hbalock); 4502 } 4503 4504 lpfc_init_link(phba, mbox, phba->cfg_topology, 4505 phba->cfg_link_speed); 4506 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4507 mbox->vport = vport; 4508 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4509 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4510 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4511 "2853 Failed to issue INIT_LINK " 4512 "mbox command, rc:x%x\n", rc); 4513 mempool_free(mbox, phba->mbox_mem_pool); 4514 return 1; 4515 } 4516 4517 return 0; 4518 } 4519 4520 /** 4521 * lpfc_els_retry - Make retry decision on an els command iocb 4522 * @phba: pointer to lpfc hba data structure. 4523 * @cmdiocb: pointer to lpfc command iocb data structure. 4524 * @rspiocb: pointer to lpfc response iocb data structure. 4525 * 4526 * This routine makes a retry decision on an ELS command IOCB, which has 4527 * failed. The following ELS IOCBs use this function for retrying the command 4528 * when previously issued command responsed with error status: FLOGI, PLOGI, 4529 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4530 * returned error status, it makes the decision whether a retry shall be 4531 * issued for the command, and whether a retry shall be made immediately or 4532 * delayed. In the former case, the corresponding ELS command issuing-function 4533 * is called to retry the command. In the later case, the ELS command shall 4534 * be posted to the ndlp delayed event and delayed function timer set to the 4535 * ndlp for the delayed command issusing. 4536 * 4537 * Return code 4538 * 0 - No retry of els command is made 4539 * 1 - Immediate or delayed retry of els command is made 4540 **/ 4541 static int 4542 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4543 struct lpfc_iocbq *rspiocb) 4544 { 4545 struct lpfc_vport *vport = cmdiocb->vport; 4546 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4547 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4548 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4549 uint32_t *elscmd; 4550 struct ls_rjt stat; 4551 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4552 int logerr = 0; 4553 uint32_t cmd = 0; 4554 uint32_t did; 4555 int link_reset = 0, rc; 4556 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4557 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4558 4559 4560 /* Note: context2 may be 0 for internal driver abort 4561 * of delays ELS command. 4562 */ 4563 4564 if (pcmd && pcmd->virt) { 4565 elscmd = (uint32_t *) (pcmd->virt); 4566 cmd = *elscmd++; 4567 } 4568 4569 if (ndlp) 4570 did = ndlp->nlp_DID; 4571 else { 4572 /* We should only hit this case for retrying PLOGI */ 4573 did = get_job_els_rsp64_did(phba, rspiocb); 4574 ndlp = lpfc_findnode_did(vport, did); 4575 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4576 return 0; 4577 } 4578 4579 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4580 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4581 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4582 4583 switch (ulp_status) { 4584 case IOSTAT_FCP_RSP_ERROR: 4585 break; 4586 case IOSTAT_REMOTE_STOP: 4587 if (phba->sli_rev == LPFC_SLI_REV4) { 4588 /* This IO was aborted by the target, we don't 4589 * know the rxid and because we did not send the 4590 * ABTS we cannot generate and RRQ. 4591 */ 4592 lpfc_set_rrq_active(phba, ndlp, 4593 cmdiocb->sli4_lxritag, 0, 0); 4594 } 4595 break; 4596 case IOSTAT_LOCAL_REJECT: 4597 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4598 case IOERR_LOOP_OPEN_FAILURE: 4599 if (cmd == ELS_CMD_FLOGI) { 4600 if (PCI_DEVICE_ID_HORNET == 4601 phba->pcidev->device) { 4602 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4603 phba->pport->fc_myDID = 0; 4604 phba->alpa_map[0] = 0; 4605 phba->alpa_map[1] = 0; 4606 } 4607 } 4608 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4609 delay = 1000; 4610 retry = 1; 4611 break; 4612 4613 case IOERR_ILLEGAL_COMMAND: 4614 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4615 "0124 Retry illegal cmd x%x " 4616 "retry:x%x delay:x%x\n", 4617 cmd, cmdiocb->retry, delay); 4618 retry = 1; 4619 /* All command's retry policy */ 4620 maxretry = 8; 4621 if (cmdiocb->retry > 2) 4622 delay = 1000; 4623 break; 4624 4625 case IOERR_NO_RESOURCES: 4626 logerr = 1; /* HBA out of resources */ 4627 retry = 1; 4628 if (cmdiocb->retry > 100) 4629 delay = 100; 4630 maxretry = 250; 4631 break; 4632 4633 case IOERR_ILLEGAL_FRAME: 4634 delay = 100; 4635 retry = 1; 4636 break; 4637 4638 case IOERR_INVALID_RPI: 4639 if (cmd == ELS_CMD_PLOGI && 4640 did == NameServer_DID) { 4641 /* Continue forever if plogi to */ 4642 /* the nameserver fails */ 4643 maxretry = 0; 4644 delay = 100; 4645 } 4646 retry = 1; 4647 break; 4648 4649 case IOERR_SEQUENCE_TIMEOUT: 4650 if (cmd == ELS_CMD_PLOGI && 4651 did == NameServer_DID && 4652 (cmdiocb->retry + 1) == maxretry) { 4653 /* Reset the Link */ 4654 link_reset = 1; 4655 break; 4656 } 4657 retry = 1; 4658 delay = 100; 4659 break; 4660 case IOERR_SLI_ABORTED: 4661 /* Retry ELS PLOGI command? 4662 * Possibly the rport just wasn't ready. 4663 */ 4664 if (cmd == ELS_CMD_PLOGI) { 4665 /* No retry if state change */ 4666 if (ndlp && 4667 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4668 goto out_retry; 4669 retry = 1; 4670 maxretry = 2; 4671 } 4672 break; 4673 } 4674 break; 4675 4676 case IOSTAT_NPORT_RJT: 4677 case IOSTAT_FABRIC_RJT: 4678 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4679 retry = 1; 4680 break; 4681 } 4682 break; 4683 4684 case IOSTAT_NPORT_BSY: 4685 case IOSTAT_FABRIC_BSY: 4686 logerr = 1; /* Fabric / Remote NPort out of resources */ 4687 retry = 1; 4688 break; 4689 4690 case IOSTAT_LS_RJT: 4691 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4692 /* Added for Vendor specifc support 4693 * Just keep retrying for these Rsn / Exp codes 4694 */ 4695 if ((vport->fc_flag & FC_PT2PT) && 4696 cmd == ELS_CMD_NVMEPRLI) { 4697 switch (stat.un.b.lsRjtRsnCode) { 4698 case LSRJT_UNABLE_TPC: 4699 case LSRJT_INVALID_CMD: 4700 case LSRJT_LOGICAL_ERR: 4701 case LSRJT_CMD_UNSUPPORTED: 4702 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4703 "0168 NVME PRLI LS_RJT " 4704 "reason %x port doesn't " 4705 "support NVME, disabling NVME\n", 4706 stat.un.b.lsRjtRsnCode); 4707 retry = 0; 4708 vport->fc_flag |= FC_PT2PT_NO_NVME; 4709 goto out_retry; 4710 } 4711 } 4712 switch (stat.un.b.lsRjtRsnCode) { 4713 case LSRJT_UNABLE_TPC: 4714 /* The driver has a VALID PLOGI but the rport has 4715 * rejected the PRLI - can't do it now. Delay 4716 * for 1 second and try again. 4717 * 4718 * However, if explanation is REQ_UNSUPPORTED there's 4719 * no point to retry PRLI. 4720 */ 4721 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4722 stat.un.b.lsRjtRsnCodeExp != 4723 LSEXP_REQ_UNSUPPORTED) { 4724 delay = 1000; 4725 maxretry = lpfc_max_els_tries + 1; 4726 retry = 1; 4727 break; 4728 } 4729 4730 /* Legacy bug fix code for targets with PLOGI delays. */ 4731 if (stat.un.b.lsRjtRsnCodeExp == 4732 LSEXP_CMD_IN_PROGRESS) { 4733 if (cmd == ELS_CMD_PLOGI) { 4734 delay = 1000; 4735 maxretry = 48; 4736 } 4737 retry = 1; 4738 break; 4739 } 4740 if (stat.un.b.lsRjtRsnCodeExp == 4741 LSEXP_CANT_GIVE_DATA) { 4742 if (cmd == ELS_CMD_PLOGI) { 4743 delay = 1000; 4744 maxretry = 48; 4745 } 4746 retry = 1; 4747 break; 4748 } 4749 if (cmd == ELS_CMD_PLOGI) { 4750 delay = 1000; 4751 maxretry = lpfc_max_els_tries + 1; 4752 retry = 1; 4753 break; 4754 } 4755 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4756 (cmd == ELS_CMD_FDISC) && 4757 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4758 lpfc_printf_vlog(vport, KERN_ERR, 4759 LOG_TRACE_EVENT, 4760 "0125 FDISC Failed (x%x). " 4761 "Fabric out of resources\n", 4762 stat.un.lsRjtError); 4763 lpfc_vport_set_state(vport, 4764 FC_VPORT_NO_FABRIC_RSCS); 4765 } 4766 break; 4767 4768 case LSRJT_LOGICAL_BSY: 4769 if ((cmd == ELS_CMD_PLOGI) || 4770 (cmd == ELS_CMD_PRLI) || 4771 (cmd == ELS_CMD_NVMEPRLI)) { 4772 delay = 1000; 4773 maxretry = 48; 4774 } else if (cmd == ELS_CMD_FDISC) { 4775 /* FDISC retry policy */ 4776 maxretry = 48; 4777 if (cmdiocb->retry >= 32) 4778 delay = 1000; 4779 } 4780 retry = 1; 4781 break; 4782 4783 case LSRJT_LOGICAL_ERR: 4784 /* There are some cases where switches return this 4785 * error when they are not ready and should be returning 4786 * Logical Busy. We should delay every time. 4787 */ 4788 if (cmd == ELS_CMD_FDISC && 4789 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4790 maxretry = 3; 4791 delay = 1000; 4792 retry = 1; 4793 } else if (cmd == ELS_CMD_FLOGI && 4794 stat.un.b.lsRjtRsnCodeExp == 4795 LSEXP_NOTHING_MORE) { 4796 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4797 retry = 1; 4798 lpfc_printf_vlog(vport, KERN_ERR, 4799 LOG_TRACE_EVENT, 4800 "0820 FLOGI Failed (x%x). " 4801 "BBCredit Not Supported\n", 4802 stat.un.lsRjtError); 4803 } 4804 break; 4805 4806 case LSRJT_PROTOCOL_ERR: 4807 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4808 (cmd == ELS_CMD_FDISC) && 4809 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4810 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4811 ) { 4812 lpfc_printf_vlog(vport, KERN_ERR, 4813 LOG_TRACE_EVENT, 4814 "0122 FDISC Failed (x%x). " 4815 "Fabric Detected Bad WWN\n", 4816 stat.un.lsRjtError); 4817 lpfc_vport_set_state(vport, 4818 FC_VPORT_FABRIC_REJ_WWN); 4819 } 4820 break; 4821 case LSRJT_VENDOR_UNIQUE: 4822 if ((stat.un.b.vendorUnique == 0x45) && 4823 (cmd == ELS_CMD_FLOGI)) { 4824 goto out_retry; 4825 } 4826 break; 4827 case LSRJT_CMD_UNSUPPORTED: 4828 /* lpfc nvmet returns this type of LS_RJT when it 4829 * receives an FCP PRLI because lpfc nvmet only 4830 * support NVME. ELS request is terminated for FCP4 4831 * on this rport. 4832 */ 4833 if (stat.un.b.lsRjtRsnCodeExp == 4834 LSEXP_REQ_UNSUPPORTED) { 4835 if (cmd == ELS_CMD_PRLI) { 4836 spin_lock_irq(&ndlp->lock); 4837 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4838 spin_unlock_irq(&ndlp->lock); 4839 retry = 0; 4840 goto out_retry; 4841 } 4842 } 4843 break; 4844 } 4845 break; 4846 4847 case IOSTAT_INTERMED_RSP: 4848 case IOSTAT_BA_RJT: 4849 break; 4850 4851 default: 4852 break; 4853 } 4854 4855 if (link_reset) { 4856 rc = lpfc_link_reset(vport); 4857 if (rc) { 4858 /* Do not give up. Retry PLOGI one more time and attempt 4859 * link reset if PLOGI fails again. 4860 */ 4861 retry = 1; 4862 delay = 100; 4863 goto out_retry; 4864 } 4865 return 1; 4866 } 4867 4868 if (did == FDMI_DID) 4869 retry = 1; 4870 4871 if ((cmd == ELS_CMD_FLOGI) && 4872 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4873 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4874 /* FLOGI retry policy */ 4875 retry = 1; 4876 /* retry FLOGI forever */ 4877 if (phba->link_flag != LS_LOOPBACK_MODE) 4878 maxretry = 0; 4879 else 4880 maxretry = 2; 4881 4882 if (cmdiocb->retry >= 100) 4883 delay = 5000; 4884 else if (cmdiocb->retry >= 32) 4885 delay = 1000; 4886 } else if ((cmd == ELS_CMD_FDISC) && 4887 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4888 /* retry FDISCs every second up to devloss */ 4889 retry = 1; 4890 maxretry = vport->cfg_devloss_tmo; 4891 delay = 1000; 4892 } 4893 4894 cmdiocb->retry++; 4895 if (maxretry && (cmdiocb->retry >= maxretry)) { 4896 phba->fc_stat.elsRetryExceeded++; 4897 retry = 0; 4898 } 4899 4900 if ((vport->load_flag & FC_UNLOADING) != 0) 4901 retry = 0; 4902 4903 out_retry: 4904 if (retry) { 4905 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4906 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4907 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4908 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4909 "2849 Stop retry ELS command " 4910 "x%x to remote NPORT x%x, " 4911 "Data: x%x x%x\n", cmd, did, 4912 cmdiocb->retry, delay); 4913 return 0; 4914 } 4915 } 4916 4917 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4918 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4919 "0107 Retry ELS command x%x to remote " 4920 "NPORT x%x Data: x%x x%x\n", 4921 cmd, did, cmdiocb->retry, delay); 4922 4923 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4924 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4925 ((ulp_word4 & IOERR_PARAM_MASK) != 4926 IOERR_NO_RESOURCES))) { 4927 /* Don't reset timer for no resources */ 4928 4929 /* If discovery / RSCN timer is running, reset it */ 4930 if (timer_pending(&vport->fc_disctmo) || 4931 (vport->fc_flag & FC_RSCN_MODE)) 4932 lpfc_set_disctmo(vport); 4933 } 4934 4935 phba->fc_stat.elsXmitRetry++; 4936 if (ndlp && delay) { 4937 phba->fc_stat.elsDelayRetry++; 4938 ndlp->nlp_retry = cmdiocb->retry; 4939 4940 /* delay is specified in milliseconds */ 4941 mod_timer(&ndlp->nlp_delayfunc, 4942 jiffies + msecs_to_jiffies(delay)); 4943 spin_lock_irq(&ndlp->lock); 4944 ndlp->nlp_flag |= NLP_DELAY_TMO; 4945 spin_unlock_irq(&ndlp->lock); 4946 4947 ndlp->nlp_prev_state = ndlp->nlp_state; 4948 if ((cmd == ELS_CMD_PRLI) || 4949 (cmd == ELS_CMD_NVMEPRLI)) 4950 lpfc_nlp_set_state(vport, ndlp, 4951 NLP_STE_PRLI_ISSUE); 4952 else if (cmd != ELS_CMD_ADISC) 4953 lpfc_nlp_set_state(vport, ndlp, 4954 NLP_STE_NPR_NODE); 4955 ndlp->nlp_last_elscmd = cmd; 4956 4957 return 1; 4958 } 4959 switch (cmd) { 4960 case ELS_CMD_FLOGI: 4961 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4962 return 1; 4963 case ELS_CMD_FDISC: 4964 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4965 return 1; 4966 case ELS_CMD_PLOGI: 4967 if (ndlp) { 4968 ndlp->nlp_prev_state = ndlp->nlp_state; 4969 lpfc_nlp_set_state(vport, ndlp, 4970 NLP_STE_PLOGI_ISSUE); 4971 } 4972 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4973 return 1; 4974 case ELS_CMD_ADISC: 4975 ndlp->nlp_prev_state = ndlp->nlp_state; 4976 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4977 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4978 return 1; 4979 case ELS_CMD_PRLI: 4980 case ELS_CMD_NVMEPRLI: 4981 ndlp->nlp_prev_state = ndlp->nlp_state; 4982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4983 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4984 return 1; 4985 case ELS_CMD_LOGO: 4986 ndlp->nlp_prev_state = ndlp->nlp_state; 4987 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4988 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4989 return 1; 4990 } 4991 } 4992 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4993 if (logerr) { 4994 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4995 "0137 No retry ELS command x%x to remote " 4996 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4997 cmd, did, ulp_status, 4998 ulp_word4); 4999 } 5000 else { 5001 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5002 "0108 No retry ELS command x%x to remote " 5003 "NPORT x%x Retried:%d Error:x%x/%x\n", 5004 cmd, did, cmdiocb->retry, ulp_status, 5005 ulp_word4); 5006 } 5007 return 0; 5008 } 5009 5010 /** 5011 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5012 * @phba: pointer to lpfc hba data structure. 5013 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5014 * 5015 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5016 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5017 * checks to see whether there is a lpfc DMA buffer associated with the 5018 * response of the command IOCB. If so, it will be released before releasing 5019 * the lpfc DMA buffer associated with the IOCB itself. 5020 * 5021 * Return code 5022 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5023 **/ 5024 static int 5025 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5026 { 5027 struct lpfc_dmabuf *buf_ptr; 5028 5029 /* Free the response before processing the command. */ 5030 if (!list_empty(&buf_ptr1->list)) { 5031 list_remove_head(&buf_ptr1->list, buf_ptr, 5032 struct lpfc_dmabuf, 5033 list); 5034 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5035 kfree(buf_ptr); 5036 } 5037 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5038 kfree(buf_ptr1); 5039 return 0; 5040 } 5041 5042 /** 5043 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5044 * @phba: pointer to lpfc hba data structure. 5045 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5046 * 5047 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5048 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5049 * pool. 5050 * 5051 * Return code 5052 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5053 **/ 5054 static int 5055 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5056 { 5057 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5058 kfree(buf_ptr); 5059 return 0; 5060 } 5061 5062 /** 5063 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5064 * @phba: pointer to lpfc hba data structure. 5065 * @elsiocb: pointer to lpfc els command iocb data structure. 5066 * 5067 * This routine frees a command IOCB and its associated resources. The 5068 * command IOCB data structure contains the reference to various associated 5069 * resources, these fields must be set to NULL if the associated reference 5070 * not present: 5071 * context1 - reference to ndlp 5072 * context2 - reference to cmd 5073 * context2->next - reference to rsp 5074 * context3 - reference to bpl 5075 * 5076 * It first properly decrements the reference count held on ndlp for the 5077 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5078 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5079 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5080 * adds the DMA buffer the @phba data structure for the delayed release. 5081 * If reference to the Buffer Pointer List (BPL) is present, the 5082 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5083 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5084 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5085 * 5086 * Return code 5087 * 0 - Success (currently, always return 0) 5088 **/ 5089 int 5090 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5091 { 5092 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5093 5094 /* The I/O iocb is complete. Clear the context1 data. */ 5095 elsiocb->context1 = NULL; 5096 5097 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 5098 if (elsiocb->context2) { 5099 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5100 /* Firmware could still be in progress of DMAing 5101 * payload, so don't free data buffer till after 5102 * a hbeat. 5103 */ 5104 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5105 buf_ptr = elsiocb->context2; 5106 elsiocb->context2 = NULL; 5107 if (buf_ptr) { 5108 buf_ptr1 = NULL; 5109 spin_lock_irq(&phba->hbalock); 5110 if (!list_empty(&buf_ptr->list)) { 5111 list_remove_head(&buf_ptr->list, 5112 buf_ptr1, struct lpfc_dmabuf, 5113 list); 5114 INIT_LIST_HEAD(&buf_ptr1->list); 5115 list_add_tail(&buf_ptr1->list, 5116 &phba->elsbuf); 5117 phba->elsbuf_cnt++; 5118 } 5119 INIT_LIST_HEAD(&buf_ptr->list); 5120 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5121 phba->elsbuf_cnt++; 5122 spin_unlock_irq(&phba->hbalock); 5123 } 5124 } else { 5125 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 5126 lpfc_els_free_data(phba, buf_ptr1); 5127 elsiocb->context2 = NULL; 5128 } 5129 } 5130 5131 if (elsiocb->context3) { 5132 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 5133 lpfc_els_free_bpl(phba, buf_ptr); 5134 elsiocb->context3 = NULL; 5135 } 5136 lpfc_sli_release_iocbq(phba, elsiocb); 5137 return 0; 5138 } 5139 5140 /** 5141 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5142 * @phba: pointer to lpfc hba data structure. 5143 * @cmdiocb: pointer to lpfc command iocb data structure. 5144 * @rspiocb: pointer to lpfc response iocb data structure. 5145 * 5146 * This routine is the completion callback function to the Logout (LOGO) 5147 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5148 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5149 * release the ndlp if it has the last reference remaining (reference count 5150 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 5151 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5152 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5153 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5154 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5155 * IOCB data structure. 5156 **/ 5157 static void 5158 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5159 struct lpfc_iocbq *rspiocb) 5160 { 5161 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5162 struct lpfc_vport *vport = cmdiocb->vport; 5163 u32 ulp_status, ulp_word4; 5164 5165 ulp_status = get_job_ulpstatus(phba, rspiocb); 5166 ulp_word4 = get_job_word4(phba, rspiocb); 5167 5168 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5169 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5170 ulp_status, ulp_word4, ndlp->nlp_DID); 5171 /* ACC to LOGO completes to NPort <nlp_DID> */ 5172 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5173 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5174 "Data: x%x x%x x%x\n", 5175 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5176 ndlp->nlp_state, ndlp->nlp_rpi); 5177 5178 /* This clause allows the LOGO ACC to complete and free resources 5179 * for the Fabric Domain Controller. It does deliberately skip 5180 * the unreg_rpi and release rpi because some fabrics send RDP 5181 * requests after logging out from the initiator. 5182 */ 5183 if (ndlp->nlp_type & NLP_FABRIC && 5184 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5185 goto out; 5186 5187 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5188 /* If PLOGI is being retried, PLOGI completion will cleanup the 5189 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5190 * progress on nodes discovered from last RSCN. 5191 */ 5192 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5193 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5194 goto out; 5195 5196 /* NPort Recovery mode or node is just allocated */ 5197 if (!lpfc_nlp_not_used(ndlp)) { 5198 /* A LOGO is completing and the node is in NPR state. 5199 * Just unregister the RPI because the node is still 5200 * required. 5201 */ 5202 lpfc_unreg_rpi(vport, ndlp); 5203 } else { 5204 /* Indicate the node has already released, should 5205 * not reference to it from within lpfc_els_free_iocb. 5206 */ 5207 cmdiocb->context1 = NULL; 5208 } 5209 } 5210 out: 5211 /* 5212 * The driver received a LOGO from the rport and has ACK'd it. 5213 * At this point, the driver is done so release the IOCB 5214 */ 5215 lpfc_els_free_iocb(phba, cmdiocb); 5216 lpfc_nlp_put(ndlp); 5217 } 5218 5219 /** 5220 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5221 * @phba: pointer to lpfc hba data structure. 5222 * @pmb: pointer to the driver internal queue element for mailbox command. 5223 * 5224 * This routine is the completion callback function for unregister default 5225 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5226 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5227 * decrements the ndlp reference count held for this completion callback 5228 * function. After that, it invokes the lpfc_nlp_not_used() to check 5229 * whether there is only one reference left on the ndlp. If so, it will 5230 * perform one more decrement and trigger the release of the ndlp. 5231 **/ 5232 void 5233 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5234 { 5235 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 5236 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 5237 u32 mbx_flag = pmb->mbox_flag; 5238 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5239 5240 pmb->ctx_buf = NULL; 5241 pmb->ctx_ndlp = NULL; 5242 5243 if (ndlp) { 5244 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5245 "0006 rpi x%x DID:%x flg:%x %d x%px " 5246 "mbx_cmd x%x mbx_flag x%x x%px\n", 5247 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5248 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5249 mbx_flag, pmb); 5250 5251 /* This ends the default/temporary RPI cleanup logic for this 5252 * ndlp and the node and rpi needs to be released. Free the rpi 5253 * first on an UNREG_LOGIN and then release the final 5254 * references. 5255 */ 5256 spin_lock_irq(&ndlp->lock); 5257 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5258 if (mbx_cmd == MBX_UNREG_LOGIN) 5259 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5260 spin_unlock_irq(&ndlp->lock); 5261 lpfc_nlp_put(ndlp); 5262 lpfc_drop_node(ndlp->vport, ndlp); 5263 } 5264 5265 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5266 kfree(mp); 5267 mempool_free(pmb, phba->mbox_mem_pool); 5268 return; 5269 } 5270 5271 /** 5272 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5273 * @phba: pointer to lpfc hba data structure. 5274 * @cmdiocb: pointer to lpfc command iocb data structure. 5275 * @rspiocb: pointer to lpfc response iocb data structure. 5276 * 5277 * This routine is the completion callback function for ELS Response IOCB 5278 * command. In normal case, this callback function just properly sets the 5279 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5280 * field in the command IOCB is not NULL, the referred mailbox command will 5281 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5282 * the IOCB. 5283 **/ 5284 static void 5285 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5286 struct lpfc_iocbq *rspiocb) 5287 { 5288 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5289 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5290 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5291 IOCB_t *irsp; 5292 LPFC_MBOXQ_t *mbox = NULL; 5293 struct lpfc_dmabuf *mp = NULL; 5294 u32 ulp_status, ulp_word4, tmo, did, iotag; 5295 5296 if (!vport) { 5297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5298 "3177 ELS response failed\n"); 5299 goto out; 5300 } 5301 if (cmdiocb->context_un.mbox) 5302 mbox = cmdiocb->context_un.mbox; 5303 5304 ulp_status = get_job_ulpstatus(phba, rspiocb); 5305 ulp_word4 = get_job_word4(phba, rspiocb); 5306 did = get_job_els_rsp64_did(phba, cmdiocb); 5307 5308 if (phba->sli_rev == LPFC_SLI_REV4) { 5309 tmo = get_wqe_tmo(cmdiocb); 5310 iotag = get_wqe_reqtag(cmdiocb); 5311 } else { 5312 irsp = &rspiocb->iocb; 5313 tmo = irsp->ulpTimeout; 5314 iotag = irsp->ulpIoTag; 5315 } 5316 5317 /* Check to see if link went down during discovery */ 5318 if (!ndlp || lpfc_els_chk_latt(vport)) { 5319 if (mbox) { 5320 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5321 if (mp) { 5322 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5323 kfree(mp); 5324 } 5325 mempool_free(mbox, phba->mbox_mem_pool); 5326 } 5327 goto out; 5328 } 5329 5330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5331 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5332 ulp_status, ulp_word4, did); 5333 /* ELS response tag <ulpIoTag> completes */ 5334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5335 "0110 ELS response tag x%x completes " 5336 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5337 iotag, ulp_status, ulp_word4, tmo, 5338 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5339 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5340 if (mbox) { 5341 if (ulp_status == 0 5342 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5343 if (!lpfc_unreg_rpi(vport, ndlp) && 5344 (!(vport->fc_flag & FC_PT2PT))) { 5345 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5346 ndlp->nlp_state == 5347 NLP_STE_REG_LOGIN_ISSUE) { 5348 lpfc_printf_vlog(vport, KERN_INFO, 5349 LOG_DISCOVERY, 5350 "0314 PLOGI recov " 5351 "DID x%x " 5352 "Data: x%x x%x x%x\n", 5353 ndlp->nlp_DID, 5354 ndlp->nlp_state, 5355 ndlp->nlp_rpi, 5356 ndlp->nlp_flag); 5357 mp = mbox->ctx_buf; 5358 if (mp) { 5359 lpfc_mbuf_free(phba, mp->virt, 5360 mp->phys); 5361 kfree(mp); 5362 } 5363 mempool_free(mbox, phba->mbox_mem_pool); 5364 goto out; 5365 } 5366 } 5367 5368 /* Increment reference count to ndlp to hold the 5369 * reference to ndlp for the callback function. 5370 */ 5371 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5372 if (!mbox->ctx_ndlp) 5373 goto out; 5374 5375 mbox->vport = vport; 5376 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5377 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5378 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5379 } 5380 else { 5381 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5382 ndlp->nlp_prev_state = ndlp->nlp_state; 5383 lpfc_nlp_set_state(vport, ndlp, 5384 NLP_STE_REG_LOGIN_ISSUE); 5385 } 5386 5387 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5388 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5389 != MBX_NOT_FINISHED) 5390 goto out; 5391 5392 /* Decrement the ndlp reference count we 5393 * set for this failed mailbox command. 5394 */ 5395 lpfc_nlp_put(ndlp); 5396 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5397 5398 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5399 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5400 "0138 ELS rsp: Cannot issue reg_login for x%x " 5401 "Data: x%x x%x x%x\n", 5402 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5403 ndlp->nlp_rpi); 5404 } 5405 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5406 if (mp) { 5407 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5408 kfree(mp); 5409 } 5410 mempool_free(mbox, phba->mbox_mem_pool); 5411 } 5412 out: 5413 if (ndlp && shost) { 5414 spin_lock_irq(&ndlp->lock); 5415 if (mbox) 5416 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5417 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5418 spin_unlock_irq(&ndlp->lock); 5419 } 5420 5421 /* An SLI4 NPIV instance wants to drop the node at this point under 5422 * these conditions and release the RPI. 5423 */ 5424 if (phba->sli_rev == LPFC_SLI_REV4 && 5425 (vport && vport->port_type == LPFC_NPIV_PORT) && 5426 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5427 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5428 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5429 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5430 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5431 spin_lock_irq(&ndlp->lock); 5432 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5433 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5434 spin_unlock_irq(&ndlp->lock); 5435 lpfc_drop_node(vport, ndlp); 5436 } 5437 } 5438 5439 /* Release the originating I/O reference. */ 5440 lpfc_els_free_iocb(phba, cmdiocb); 5441 lpfc_nlp_put(ndlp); 5442 return; 5443 } 5444 5445 /** 5446 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5447 * @vport: pointer to a host virtual N_Port data structure. 5448 * @flag: the els command code to be accepted. 5449 * @oldiocb: pointer to the original lpfc command iocb data structure. 5450 * @ndlp: pointer to a node-list data structure. 5451 * @mbox: pointer to the driver internal queue element for mailbox command. 5452 * 5453 * This routine prepares and issues an Accept (ACC) response IOCB 5454 * command. It uses the @flag to properly set up the IOCB field for the 5455 * specific ACC response command to be issued and invokes the 5456 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5457 * @mbox pointer is passed in, it will be put into the context_un.mbox 5458 * field of the IOCB for the completion callback function to issue the 5459 * mailbox command to the HBA later when callback is invoked. 5460 * 5461 * Note that the ndlp reference count will be incremented by 1 for holding the 5462 * ndlp and the reference to ndlp will be stored into the context1 field of 5463 * the IOCB for the completion callback function to the corresponding 5464 * response ELS IOCB command. 5465 * 5466 * Return code 5467 * 0 - Successfully issued acc response 5468 * 1 - Failed to issue acc response 5469 **/ 5470 int 5471 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5472 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5473 LPFC_MBOXQ_t *mbox) 5474 { 5475 struct lpfc_hba *phba = vport->phba; 5476 IOCB_t *icmd; 5477 IOCB_t *oldcmd; 5478 union lpfc_wqe128 *wqe; 5479 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5480 struct lpfc_iocbq *elsiocb; 5481 uint8_t *pcmd; 5482 struct serv_parm *sp; 5483 uint16_t cmdsize; 5484 int rc; 5485 ELS_PKT *els_pkt_ptr; 5486 struct fc_els_rdf_resp *rdf_resp; 5487 5488 switch (flag) { 5489 case ELS_CMD_ACC: 5490 cmdsize = sizeof(uint32_t); 5491 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5492 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5493 if (!elsiocb) { 5494 spin_lock_irq(&ndlp->lock); 5495 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5496 spin_unlock_irq(&ndlp->lock); 5497 return 1; 5498 } 5499 5500 if (phba->sli_rev == LPFC_SLI_REV4) { 5501 wqe = &elsiocb->wqe; 5502 /* XRI / rx_id */ 5503 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5504 bf_get(wqe_ctxt_tag, 5505 &oldwqe->xmit_els_rsp.wqe_com)); 5506 5507 /* oxid */ 5508 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5509 bf_get(wqe_rcvoxid, 5510 &oldwqe->xmit_els_rsp.wqe_com)); 5511 } else { 5512 icmd = &elsiocb->iocb; 5513 oldcmd = &oldiocb->iocb; 5514 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5515 icmd->unsli3.rcvsli3.ox_id = 5516 oldcmd->unsli3.rcvsli3.ox_id; 5517 } 5518 5519 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5520 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5521 pcmd += sizeof(uint32_t); 5522 5523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5524 "Issue ACC: did:x%x flg:x%x", 5525 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5526 break; 5527 case ELS_CMD_FLOGI: 5528 case ELS_CMD_PLOGI: 5529 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5530 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5531 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5532 if (!elsiocb) 5533 return 1; 5534 5535 if (phba->sli_rev == LPFC_SLI_REV4) { 5536 wqe = &elsiocb->wqe; 5537 /* XRI / rx_id */ 5538 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5539 bf_get(wqe_ctxt_tag, 5540 &oldwqe->xmit_els_rsp.wqe_com)); 5541 5542 /* oxid */ 5543 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5544 bf_get(wqe_rcvoxid, 5545 &oldwqe->xmit_els_rsp.wqe_com)); 5546 } else { 5547 icmd = &elsiocb->iocb; 5548 oldcmd = &oldiocb->iocb; 5549 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5550 icmd->unsli3.rcvsli3.ox_id = 5551 oldcmd->unsli3.rcvsli3.ox_id; 5552 } 5553 5554 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5555 5556 if (mbox) 5557 elsiocb->context_un.mbox = mbox; 5558 5559 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5560 pcmd += sizeof(uint32_t); 5561 sp = (struct serv_parm *)pcmd; 5562 5563 if (flag == ELS_CMD_FLOGI) { 5564 /* Copy the received service parameters back */ 5565 memcpy(sp, &phba->fc_fabparam, 5566 sizeof(struct serv_parm)); 5567 5568 /* Clear the F_Port bit */ 5569 sp->cmn.fPort = 0; 5570 5571 /* Mark all class service parameters as invalid */ 5572 sp->cls1.classValid = 0; 5573 sp->cls2.classValid = 0; 5574 sp->cls3.classValid = 0; 5575 sp->cls4.classValid = 0; 5576 5577 /* Copy our worldwide names */ 5578 memcpy(&sp->portName, &vport->fc_sparam.portName, 5579 sizeof(struct lpfc_name)); 5580 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5581 sizeof(struct lpfc_name)); 5582 } else { 5583 memcpy(pcmd, &vport->fc_sparam, 5584 sizeof(struct serv_parm)); 5585 5586 sp->cmn.valid_vendor_ver_level = 0; 5587 memset(sp->un.vendorVersion, 0, 5588 sizeof(sp->un.vendorVersion)); 5589 sp->cmn.bbRcvSizeMsb &= 0xF; 5590 5591 /* If our firmware supports this feature, convey that 5592 * info to the target using the vendor specific field. 5593 */ 5594 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5595 sp->cmn.valid_vendor_ver_level = 1; 5596 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5597 sp->un.vv.flags = 5598 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5599 } 5600 } 5601 5602 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5603 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5604 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5605 break; 5606 case ELS_CMD_PRLO: 5607 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5608 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5609 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5610 if (!elsiocb) 5611 return 1; 5612 5613 if (phba->sli_rev == LPFC_SLI_REV4) { 5614 wqe = &elsiocb->wqe; 5615 /* XRI / rx_id */ 5616 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5617 bf_get(wqe_ctxt_tag, 5618 &oldwqe->xmit_els_rsp.wqe_com)); 5619 5620 /* oxid */ 5621 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5622 bf_get(wqe_rcvoxid, 5623 &oldwqe->xmit_els_rsp.wqe_com)); 5624 } else { 5625 icmd = &elsiocb->iocb; 5626 oldcmd = &oldiocb->iocb; 5627 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5628 icmd->unsli3.rcvsli3.ox_id = 5629 oldcmd->unsli3.rcvsli3.ox_id; 5630 } 5631 5632 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5633 5634 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 5635 sizeof(uint32_t) + sizeof(PRLO)); 5636 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5637 els_pkt_ptr = (ELS_PKT *) pcmd; 5638 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5639 5640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5641 "Issue ACC PRLO: did:x%x flg:x%x", 5642 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5643 break; 5644 case ELS_CMD_RDF: 5645 cmdsize = sizeof(*rdf_resp); 5646 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5647 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5648 if (!elsiocb) 5649 return 1; 5650 5651 if (phba->sli_rev == LPFC_SLI_REV4) { 5652 wqe = &elsiocb->wqe; 5653 /* XRI / rx_id */ 5654 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5655 bf_get(wqe_ctxt_tag, 5656 &oldwqe->xmit_els_rsp.wqe_com)); 5657 5658 /* oxid */ 5659 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5660 bf_get(wqe_rcvoxid, 5661 &oldwqe->xmit_els_rsp.wqe_com)); 5662 } else { 5663 icmd = &elsiocb->iocb; 5664 oldcmd = &oldiocb->iocb; 5665 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5666 icmd->unsli3.rcvsli3.ox_id = 5667 oldcmd->unsli3.rcvsli3.ox_id; 5668 } 5669 5670 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5671 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5672 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5673 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5674 5675 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5676 rdf_resp->desc_list_len = cpu_to_be32(12); 5677 5678 /* FC-LS-5 specifies LS REQ Information descriptor */ 5679 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5680 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5681 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5682 break; 5683 default: 5684 return 1; 5685 } 5686 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5687 spin_lock_irq(&ndlp->lock); 5688 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5689 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5690 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5691 spin_unlock_irq(&ndlp->lock); 5692 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5693 } else { 5694 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5695 } 5696 5697 phba->fc_stat.elsXmitACC++; 5698 elsiocb->context1 = lpfc_nlp_get(ndlp); 5699 if (!elsiocb->context1) { 5700 lpfc_els_free_iocb(phba, elsiocb); 5701 return 1; 5702 } 5703 5704 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5705 if (rc == IOCB_ERROR) { 5706 lpfc_els_free_iocb(phba, elsiocb); 5707 lpfc_nlp_put(ndlp); 5708 return 1; 5709 } 5710 5711 /* Xmit ELS ACC response tag <ulpIoTag> */ 5712 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5713 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5714 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5715 "RPI: x%x, fc_flag x%x refcnt %d\n", 5716 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5717 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5718 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5719 return 0; 5720 } 5721 5722 /** 5723 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5724 * @vport: pointer to a virtual N_Port data structure. 5725 * @rejectError: reject response to issue 5726 * @oldiocb: pointer to the original lpfc command iocb data structure. 5727 * @ndlp: pointer to a node-list data structure. 5728 * @mbox: pointer to the driver internal queue element for mailbox command. 5729 * 5730 * This routine prepares and issue an Reject (RJT) response IOCB 5731 * command. If a @mbox pointer is passed in, it will be put into the 5732 * context_un.mbox field of the IOCB for the completion callback function 5733 * to issue to the HBA later. 5734 * 5735 * Note that the ndlp reference count will be incremented by 1 for holding the 5736 * ndlp and the reference to ndlp will be stored into the context1 field of 5737 * the IOCB for the completion callback function to the reject response 5738 * ELS IOCB command. 5739 * 5740 * Return code 5741 * 0 - Successfully issued reject response 5742 * 1 - Failed to issue reject response 5743 **/ 5744 int 5745 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5746 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5747 LPFC_MBOXQ_t *mbox) 5748 { 5749 int rc; 5750 struct lpfc_hba *phba = vport->phba; 5751 IOCB_t *icmd; 5752 IOCB_t *oldcmd; 5753 union lpfc_wqe128 *wqe; 5754 struct lpfc_iocbq *elsiocb; 5755 uint8_t *pcmd; 5756 uint16_t cmdsize; 5757 5758 cmdsize = 2 * sizeof(uint32_t); 5759 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5760 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5761 if (!elsiocb) 5762 return 1; 5763 5764 if (phba->sli_rev == LPFC_SLI_REV4) { 5765 wqe = &elsiocb->wqe; 5766 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5767 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5768 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5769 get_job_rcvoxid(phba, oldiocb)); 5770 } else { 5771 icmd = &elsiocb->iocb; 5772 oldcmd = &oldiocb->iocb; 5773 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5774 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5775 } 5776 5777 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5778 5779 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5780 pcmd += sizeof(uint32_t); 5781 *((uint32_t *) (pcmd)) = rejectError; 5782 5783 if (mbox) 5784 elsiocb->context_un.mbox = mbox; 5785 5786 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5788 "0129 Xmit ELS RJT x%x response tag x%x " 5789 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5790 "rpi x%x\n", 5791 rejectError, elsiocb->iotag, 5792 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5793 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5795 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5796 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5797 5798 phba->fc_stat.elsXmitLSRJT++; 5799 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5800 elsiocb->context1 = lpfc_nlp_get(ndlp); 5801 if (!elsiocb->context1) { 5802 lpfc_els_free_iocb(phba, elsiocb); 5803 return 1; 5804 } 5805 5806 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5807 * node's assigned RPI gets released provided this node is not already 5808 * registered with the transport. 5809 */ 5810 if (phba->sli_rev == LPFC_SLI_REV4 && 5811 vport->port_type == LPFC_NPIV_PORT && 5812 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5813 spin_lock_irq(&ndlp->lock); 5814 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5815 spin_unlock_irq(&ndlp->lock); 5816 } 5817 5818 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5819 if (rc == IOCB_ERROR) { 5820 lpfc_els_free_iocb(phba, elsiocb); 5821 lpfc_nlp_put(ndlp); 5822 return 1; 5823 } 5824 5825 return 0; 5826 } 5827 5828 /** 5829 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5830 * @vport: pointer to a host virtual N_Port data structure. 5831 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5832 * @ndlp: NPort to where rsp is directed 5833 * 5834 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5835 * this N_Port's support of hardware signals in its Congestion 5836 * Capabilities Descriptor. 5837 * 5838 * Return code 5839 * 0 - Successfully issued edc rsp command 5840 * 1 - Failed to issue edc rsp command 5841 **/ 5842 static int 5843 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5844 struct lpfc_nodelist *ndlp) 5845 { 5846 struct lpfc_hba *phba = vport->phba; 5847 struct lpfc_els_edc_rsp *edc_rsp; 5848 struct lpfc_iocbq *elsiocb; 5849 IOCB_t *icmd, *cmd; 5850 union lpfc_wqe128 *wqe; 5851 uint8_t *pcmd; 5852 int cmdsize, rc; 5853 5854 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5855 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5856 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5857 if (!elsiocb) 5858 return 1; 5859 5860 if (phba->sli_rev == LPFC_SLI_REV4) { 5861 wqe = &elsiocb->wqe; 5862 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5863 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5864 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5865 get_job_rcvoxid(phba, cmdiocb)); 5866 } else { 5867 icmd = &elsiocb->iocb; 5868 cmd = &cmdiocb->iocb; 5869 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5870 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5871 } 5872 5873 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5874 5875 memset(pcmd, 0, cmdsize); 5876 5877 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5878 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5879 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5880 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5881 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5882 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5883 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5884 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5885 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5886 5887 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5888 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5889 ndlp->nlp_DID, ndlp->nlp_flag, 5890 kref_read(&ndlp->kref)); 5891 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5892 5893 phba->fc_stat.elsXmitACC++; 5894 elsiocb->context1 = lpfc_nlp_get(ndlp); 5895 if (!elsiocb->context1) { 5896 lpfc_els_free_iocb(phba, elsiocb); 5897 return 1; 5898 } 5899 5900 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5901 if (rc == IOCB_ERROR) { 5902 lpfc_els_free_iocb(phba, elsiocb); 5903 lpfc_nlp_put(ndlp); 5904 return 1; 5905 } 5906 5907 /* Xmit ELS ACC response tag <ulpIoTag> */ 5908 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5909 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5910 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5911 "RPI: x%x, fc_flag x%x\n", 5912 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5913 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5914 ndlp->nlp_rpi, vport->fc_flag); 5915 5916 return 0; 5917 } 5918 5919 /** 5920 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5921 * @vport: pointer to a virtual N_Port data structure. 5922 * @oldiocb: pointer to the original lpfc command iocb data structure. 5923 * @ndlp: pointer to a node-list data structure. 5924 * 5925 * This routine prepares and issues an Accept (ACC) response to Address 5926 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5927 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5928 * 5929 * Note that the ndlp reference count will be incremented by 1 for holding the 5930 * ndlp and the reference to ndlp will be stored into the context1 field of 5931 * the IOCB for the completion callback function to the ADISC Accept response 5932 * ELS IOCB command. 5933 * 5934 * Return code 5935 * 0 - Successfully issued acc adisc response 5936 * 1 - Failed to issue adisc acc response 5937 **/ 5938 int 5939 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5940 struct lpfc_nodelist *ndlp) 5941 { 5942 struct lpfc_hba *phba = vport->phba; 5943 ADISC *ap; 5944 IOCB_t *icmd, *oldcmd; 5945 union lpfc_wqe128 *wqe; 5946 struct lpfc_iocbq *elsiocb; 5947 uint8_t *pcmd; 5948 uint16_t cmdsize; 5949 int rc; 5950 u32 ulp_context; 5951 5952 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5953 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5954 ndlp->nlp_DID, ELS_CMD_ACC); 5955 if (!elsiocb) 5956 return 1; 5957 5958 if (phba->sli_rev == LPFC_SLI_REV4) { 5959 wqe = &elsiocb->wqe; 5960 /* XRI / rx_id */ 5961 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5962 get_job_ulpcontext(phba, oldiocb)); 5963 ulp_context = get_job_ulpcontext(phba, elsiocb); 5964 /* oxid */ 5965 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5966 get_job_rcvoxid(phba, oldiocb)); 5967 } else { 5968 icmd = &elsiocb->iocb; 5969 oldcmd = &oldiocb->iocb; 5970 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5971 ulp_context = elsiocb->iocb.ulpContext; 5972 icmd->unsli3.rcvsli3.ox_id = 5973 oldcmd->unsli3.rcvsli3.ox_id; 5974 } 5975 5976 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5977 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5978 "0130 Xmit ADISC ACC response iotag x%x xri: " 5979 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5980 elsiocb->iotag, ulp_context, 5981 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5982 ndlp->nlp_rpi); 5983 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5984 5985 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5986 pcmd += sizeof(uint32_t); 5987 5988 ap = (ADISC *) (pcmd); 5989 ap->hardAL_PA = phba->fc_pref_ALPA; 5990 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5991 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5992 ap->DID = be32_to_cpu(vport->fc_myDID); 5993 5994 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5995 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5996 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5997 5998 phba->fc_stat.elsXmitACC++; 5999 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6000 elsiocb->context1 = lpfc_nlp_get(ndlp); 6001 if (!elsiocb->context1) { 6002 lpfc_els_free_iocb(phba, elsiocb); 6003 return 1; 6004 } 6005 6006 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6007 if (rc == IOCB_ERROR) { 6008 lpfc_els_free_iocb(phba, elsiocb); 6009 lpfc_nlp_put(ndlp); 6010 return 1; 6011 } 6012 6013 return 0; 6014 } 6015 6016 /** 6017 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6018 * @vport: pointer to a virtual N_Port data structure. 6019 * @oldiocb: pointer to the original lpfc command iocb data structure. 6020 * @ndlp: pointer to a node-list data structure. 6021 * 6022 * This routine prepares and issues an Accept (ACC) response to Process 6023 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6024 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6025 * 6026 * Note that the ndlp reference count will be incremented by 1 for holding the 6027 * ndlp and the reference to ndlp will be stored into the context1 field of 6028 * the IOCB for the completion callback function to the PRLI Accept response 6029 * ELS IOCB command. 6030 * 6031 * Return code 6032 * 0 - Successfully issued acc prli response 6033 * 1 - Failed to issue acc prli response 6034 **/ 6035 int 6036 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6037 struct lpfc_nodelist *ndlp) 6038 { 6039 struct lpfc_hba *phba = vport->phba; 6040 PRLI *npr; 6041 struct lpfc_nvme_prli *npr_nvme; 6042 lpfc_vpd_t *vpd; 6043 IOCB_t *icmd; 6044 IOCB_t *oldcmd; 6045 union lpfc_wqe128 *wqe; 6046 struct lpfc_iocbq *elsiocb; 6047 uint8_t *pcmd; 6048 uint16_t cmdsize; 6049 uint32_t prli_fc4_req, *req_payload; 6050 struct lpfc_dmabuf *req_buf; 6051 int rc; 6052 u32 elsrspcmd, ulp_context; 6053 6054 /* Need the incoming PRLI payload to determine if the ACC is for an 6055 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6056 */ 6057 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 6058 req_payload = (((uint32_t *)req_buf->virt) + 1); 6059 6060 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6061 prli_fc4_req = be32_to_cpu(*req_payload); 6062 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6063 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6064 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6065 prli_fc4_req, *((uint32_t *)req_payload)); 6066 6067 if (prli_fc4_req == PRLI_FCP_TYPE) { 6068 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6069 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6070 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6071 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6072 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6073 } else { 6074 return 1; 6075 } 6076 6077 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6078 ndlp->nlp_DID, elsrspcmd); 6079 if (!elsiocb) 6080 return 1; 6081 6082 if (phba->sli_rev == LPFC_SLI_REV4) { 6083 wqe = &elsiocb->wqe; 6084 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6085 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6086 ulp_context = get_job_ulpcontext(phba, elsiocb); 6087 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6088 get_job_rcvoxid(phba, oldiocb)); 6089 } else { 6090 icmd = &elsiocb->iocb; 6091 oldcmd = &oldiocb->iocb; 6092 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6093 ulp_context = elsiocb->iocb.ulpContext; 6094 icmd->unsli3.rcvsli3.ox_id = 6095 oldcmd->unsli3.rcvsli3.ox_id; 6096 } 6097 6098 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6099 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6100 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6101 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6102 elsiocb->iotag, ulp_context, 6103 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6104 ndlp->nlp_rpi); 6105 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6106 memset(pcmd, 0, cmdsize); 6107 6108 *((uint32_t *)(pcmd)) = elsrspcmd; 6109 pcmd += sizeof(uint32_t); 6110 6111 /* For PRLI, remainder of payload is PRLI parameter page */ 6112 vpd = &phba->vpd; 6113 6114 if (prli_fc4_req == PRLI_FCP_TYPE) { 6115 /* 6116 * If the remote port is a target and our firmware version 6117 * is 3.20 or later, set the following bits for FC-TAPE 6118 * support. 6119 */ 6120 npr = (PRLI *) pcmd; 6121 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6122 (vpd->rev.feaLevelHigh >= 0x02)) { 6123 npr->ConfmComplAllowed = 1; 6124 npr->Retry = 1; 6125 npr->TaskRetryIdReq = 1; 6126 } 6127 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6128 npr->estabImagePair = 1; 6129 npr->readXferRdyDis = 1; 6130 npr->ConfmComplAllowed = 1; 6131 npr->prliType = PRLI_FCP_TYPE; 6132 npr->initiatorFunc = 1; 6133 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6134 /* Respond with an NVME PRLI Type */ 6135 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6136 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6137 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6138 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6139 if (phba->nvmet_support) { 6140 bf_set(prli_tgt, npr_nvme, 1); 6141 bf_set(prli_disc, npr_nvme, 1); 6142 if (phba->cfg_nvme_enable_fb) { 6143 bf_set(prli_fba, npr_nvme, 1); 6144 6145 /* TBD. Target mode needs to post buffers 6146 * that support the configured first burst 6147 * byte size. 6148 */ 6149 bf_set(prli_fb_sz, npr_nvme, 6150 phba->cfg_nvmet_fb_size); 6151 } 6152 } else { 6153 bf_set(prli_init, npr_nvme, 1); 6154 } 6155 6156 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6157 "6015 NVME issue PRLI ACC word1 x%08x " 6158 "word4 x%08x word5 x%08x flag x%x, " 6159 "fcp_info x%x nlp_type x%x\n", 6160 npr_nvme->word1, npr_nvme->word4, 6161 npr_nvme->word5, ndlp->nlp_flag, 6162 ndlp->nlp_fcp_info, ndlp->nlp_type); 6163 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6164 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6165 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6166 } else 6167 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6168 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6169 prli_fc4_req, ndlp->nlp_fc4_type, 6170 ndlp->nlp_DID); 6171 6172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6173 "Issue ACC PRLI: did:x%x flg:x%x", 6174 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6175 6176 phba->fc_stat.elsXmitACC++; 6177 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6178 elsiocb->context1 = lpfc_nlp_get(ndlp); 6179 if (!elsiocb->context1) { 6180 lpfc_els_free_iocb(phba, elsiocb); 6181 return 1; 6182 } 6183 6184 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6185 if (rc == IOCB_ERROR) { 6186 lpfc_els_free_iocb(phba, elsiocb); 6187 lpfc_nlp_put(ndlp); 6188 return 1; 6189 } 6190 6191 return 0; 6192 } 6193 6194 /** 6195 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6196 * @vport: pointer to a virtual N_Port data structure. 6197 * @format: rnid command format. 6198 * @oldiocb: pointer to the original lpfc command iocb data structure. 6199 * @ndlp: pointer to a node-list data structure. 6200 * 6201 * This routine issues a Request Node Identification Data (RNID) Accept 6202 * (ACC) response. It constructs the RNID ACC response command according to 6203 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6204 * issue the response. 6205 * 6206 * Note that the ndlp reference count will be incremented by 1 for holding the 6207 * ndlp and the reference to ndlp will be stored into the context1 field of 6208 * the IOCB for the completion callback function. 6209 * 6210 * Return code 6211 * 0 - Successfully issued acc rnid response 6212 * 1 - Failed to issue acc rnid response 6213 **/ 6214 static int 6215 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6216 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6217 { 6218 struct lpfc_hba *phba = vport->phba; 6219 RNID *rn; 6220 IOCB_t *icmd, *oldcmd; 6221 union lpfc_wqe128 *wqe; 6222 struct lpfc_iocbq *elsiocb; 6223 uint8_t *pcmd; 6224 uint16_t cmdsize; 6225 int rc; 6226 u32 ulp_context; 6227 6228 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6229 + (2 * sizeof(struct lpfc_name)); 6230 if (format) 6231 cmdsize += sizeof(RNID_TOP_DISC); 6232 6233 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6234 ndlp->nlp_DID, ELS_CMD_ACC); 6235 if (!elsiocb) 6236 return 1; 6237 6238 if (phba->sli_rev == LPFC_SLI_REV4) { 6239 wqe = &elsiocb->wqe; 6240 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6241 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6242 ulp_context = get_job_ulpcontext(phba, elsiocb); 6243 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6244 get_job_rcvoxid(phba, oldiocb)); 6245 } else { 6246 icmd = &elsiocb->iocb; 6247 oldcmd = &oldiocb->iocb; 6248 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6249 ulp_context = elsiocb->iocb.ulpContext; 6250 icmd->unsli3.rcvsli3.ox_id = 6251 oldcmd->unsli3.rcvsli3.ox_id; 6252 } 6253 6254 /* Xmit RNID ACC response tag <ulpIoTag> */ 6255 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6256 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6257 elsiocb->iotag, ulp_context); 6258 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6259 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6260 pcmd += sizeof(uint32_t); 6261 6262 memset(pcmd, 0, sizeof(RNID)); 6263 rn = (RNID *) (pcmd); 6264 rn->Format = format; 6265 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6266 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6267 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6268 switch (format) { 6269 case 0: 6270 rn->SpecificLen = 0; 6271 break; 6272 case RNID_TOPOLOGY_DISC: 6273 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6274 memcpy(&rn->un.topologyDisc.portName, 6275 &vport->fc_portname, sizeof(struct lpfc_name)); 6276 rn->un.topologyDisc.unitType = RNID_HBA; 6277 rn->un.topologyDisc.physPort = 0; 6278 rn->un.topologyDisc.attachedNodes = 0; 6279 break; 6280 default: 6281 rn->CommonLen = 0; 6282 rn->SpecificLen = 0; 6283 break; 6284 } 6285 6286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6287 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6288 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6289 6290 phba->fc_stat.elsXmitACC++; 6291 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6292 elsiocb->context1 = lpfc_nlp_get(ndlp); 6293 if (!elsiocb->context1) { 6294 lpfc_els_free_iocb(phba, elsiocb); 6295 return 1; 6296 } 6297 6298 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6299 if (rc == IOCB_ERROR) { 6300 lpfc_els_free_iocb(phba, elsiocb); 6301 lpfc_nlp_put(ndlp); 6302 return 1; 6303 } 6304 6305 return 0; 6306 } 6307 6308 /** 6309 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6310 * @vport: pointer to a virtual N_Port data structure. 6311 * @iocb: pointer to the lpfc command iocb data structure. 6312 * @ndlp: pointer to a node-list data structure. 6313 * 6314 * Return 6315 **/ 6316 static void 6317 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6318 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6319 { 6320 struct lpfc_hba *phba = vport->phba; 6321 uint8_t *pcmd; 6322 struct RRQ *rrq; 6323 uint16_t rxid; 6324 uint16_t xri; 6325 struct lpfc_node_rrq *prrq; 6326 6327 6328 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 6329 pcmd += sizeof(uint32_t); 6330 rrq = (struct RRQ *)pcmd; 6331 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6332 rxid = bf_get(rrq_rxid, rrq); 6333 6334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6335 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6336 " x%x x%x\n", 6337 be32_to_cpu(bf_get(rrq_did, rrq)), 6338 bf_get(rrq_oxid, rrq), 6339 rxid, 6340 get_wqe_reqtag(iocb), 6341 get_job_ulpcontext(phba, iocb)); 6342 6343 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6344 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6345 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6346 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6347 xri = bf_get(rrq_oxid, rrq); 6348 else 6349 xri = rxid; 6350 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6351 if (prrq) 6352 lpfc_clr_rrq_active(phba, xri, prrq); 6353 return; 6354 } 6355 6356 /** 6357 * lpfc_els_rsp_echo_acc - Issue echo acc response 6358 * @vport: pointer to a virtual N_Port data structure. 6359 * @data: pointer to echo data to return in the accept. 6360 * @oldiocb: pointer to the original lpfc command iocb data structure. 6361 * @ndlp: pointer to a node-list data structure. 6362 * 6363 * Return code 6364 * 0 - Successfully issued acc echo response 6365 * 1 - Failed to issue acc echo response 6366 **/ 6367 static int 6368 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6369 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6370 { 6371 struct lpfc_hba *phba = vport->phba; 6372 IOCB_t *icmd, *oldcmd; 6373 union lpfc_wqe128 *wqe; 6374 struct lpfc_iocbq *elsiocb; 6375 uint8_t *pcmd; 6376 uint16_t cmdsize; 6377 int rc; 6378 u32 ulp_context; 6379 6380 if (phba->sli_rev == LPFC_SLI_REV4) 6381 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6382 else 6383 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6384 6385 /* The accumulated length can exceed the BPL_SIZE. For 6386 * now, use this as the limit 6387 */ 6388 if (cmdsize > LPFC_BPL_SIZE) 6389 cmdsize = LPFC_BPL_SIZE; 6390 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6391 ndlp->nlp_DID, ELS_CMD_ACC); 6392 if (!elsiocb) 6393 return 1; 6394 6395 if (phba->sli_rev == LPFC_SLI_REV4) { 6396 wqe = &elsiocb->wqe; 6397 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6398 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6399 ulp_context = get_job_ulpcontext(phba, elsiocb); 6400 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6401 get_job_rcvoxid(phba, oldiocb)); 6402 } else { 6403 icmd = &elsiocb->iocb; 6404 oldcmd = &oldiocb->iocb; 6405 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6406 ulp_context = elsiocb->iocb.ulpContext; 6407 icmd->unsli3.rcvsli3.ox_id = 6408 oldcmd->unsli3.rcvsli3.ox_id; 6409 } 6410 6411 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6412 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6413 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6414 elsiocb->iotag, ulp_context); 6415 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6416 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6417 pcmd += sizeof(uint32_t); 6418 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6419 6420 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6421 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6422 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6423 6424 phba->fc_stat.elsXmitACC++; 6425 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6426 elsiocb->context1 = lpfc_nlp_get(ndlp); 6427 if (!elsiocb->context1) { 6428 lpfc_els_free_iocb(phba, elsiocb); 6429 return 1; 6430 } 6431 6432 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6433 if (rc == IOCB_ERROR) { 6434 lpfc_els_free_iocb(phba, elsiocb); 6435 lpfc_nlp_put(ndlp); 6436 return 1; 6437 } 6438 6439 return 0; 6440 } 6441 6442 /** 6443 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6444 * @vport: pointer to a host virtual N_Port data structure. 6445 * 6446 * This routine issues Address Discover (ADISC) ELS commands to those 6447 * N_Ports which are in node port recovery state and ADISC has not been issued 6448 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6449 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6450 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6451 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6452 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6453 * IOCBs quit for later pick up. On the other hand, after walking through 6454 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6455 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6456 * no more ADISC need to be sent. 6457 * 6458 * Return code 6459 * The number of N_Ports with adisc issued. 6460 **/ 6461 int 6462 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6463 { 6464 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6465 struct lpfc_nodelist *ndlp, *next_ndlp; 6466 int sentadisc = 0; 6467 6468 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6469 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6470 6471 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6472 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6473 continue; 6474 6475 spin_lock_irq(&ndlp->lock); 6476 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6477 spin_unlock_irq(&ndlp->lock); 6478 6479 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6480 /* This node was marked for ADISC but was not picked 6481 * for discovery. This is possible if the node was 6482 * missing in gidft response. 6483 * 6484 * At time of marking node for ADISC, we skipped unreg 6485 * from backend 6486 */ 6487 lpfc_nlp_unreg_node(vport, ndlp); 6488 lpfc_unreg_rpi(vport, ndlp); 6489 continue; 6490 } 6491 6492 ndlp->nlp_prev_state = ndlp->nlp_state; 6493 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6494 lpfc_issue_els_adisc(vport, ndlp, 0); 6495 sentadisc++; 6496 vport->num_disc_nodes++; 6497 if (vport->num_disc_nodes >= 6498 vport->cfg_discovery_threads) { 6499 spin_lock_irq(shost->host_lock); 6500 vport->fc_flag |= FC_NLP_MORE; 6501 spin_unlock_irq(shost->host_lock); 6502 break; 6503 } 6504 6505 } 6506 if (sentadisc == 0) { 6507 spin_lock_irq(shost->host_lock); 6508 vport->fc_flag &= ~FC_NLP_MORE; 6509 spin_unlock_irq(shost->host_lock); 6510 } 6511 return sentadisc; 6512 } 6513 6514 /** 6515 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6516 * @vport: pointer to a host virtual N_Port data structure. 6517 * 6518 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6519 * which are in node port recovery state, with a @vport. Each time an ELS 6520 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6521 * the per @vport number of discover count (num_disc_nodes) shall be 6522 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6523 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6524 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6525 * later pick up. On the other hand, after walking through all the ndlps with 6526 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6527 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6528 * PLOGI need to be sent. 6529 * 6530 * Return code 6531 * The number of N_Ports with plogi issued. 6532 **/ 6533 int 6534 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6535 { 6536 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6537 struct lpfc_nodelist *ndlp, *next_ndlp; 6538 int sentplogi = 0; 6539 6540 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6541 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6542 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6543 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6544 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6545 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6546 ndlp->nlp_prev_state = ndlp->nlp_state; 6547 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6548 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6549 sentplogi++; 6550 vport->num_disc_nodes++; 6551 if (vport->num_disc_nodes >= 6552 vport->cfg_discovery_threads) { 6553 spin_lock_irq(shost->host_lock); 6554 vport->fc_flag |= FC_NLP_MORE; 6555 spin_unlock_irq(shost->host_lock); 6556 break; 6557 } 6558 } 6559 } 6560 6561 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6562 "6452 Discover PLOGI %d flag x%x\n", 6563 sentplogi, vport->fc_flag); 6564 6565 if (sentplogi) { 6566 lpfc_set_disctmo(vport); 6567 } 6568 else { 6569 spin_lock_irq(shost->host_lock); 6570 vport->fc_flag &= ~FC_NLP_MORE; 6571 spin_unlock_irq(shost->host_lock); 6572 } 6573 return sentplogi; 6574 } 6575 6576 static uint32_t 6577 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6578 uint32_t word0) 6579 { 6580 6581 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6582 desc->payload.els_req = word0; 6583 desc->length = cpu_to_be32(sizeof(desc->payload)); 6584 6585 return sizeof(struct fc_rdp_link_service_desc); 6586 } 6587 6588 static uint32_t 6589 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6590 uint8_t *page_a0, uint8_t *page_a2) 6591 { 6592 uint16_t wavelength; 6593 uint16_t temperature; 6594 uint16_t rx_power; 6595 uint16_t tx_bias; 6596 uint16_t tx_power; 6597 uint16_t vcc; 6598 uint16_t flag = 0; 6599 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6600 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6601 6602 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6603 6604 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6605 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6606 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6607 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6608 6609 if ((trasn_code_byte4->fc_sw_laser) || 6610 (trasn_code_byte5->fc_sw_laser_sl) || 6611 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6612 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6613 } else if (trasn_code_byte4->fc_lw_laser) { 6614 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6615 page_a0[SSF_WAVELENGTH_B0]; 6616 if (wavelength == SFP_WAVELENGTH_LC1310) 6617 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6618 if (wavelength == SFP_WAVELENGTH_LL1550) 6619 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6620 } 6621 /* check if its SFP+ */ 6622 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6623 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6624 << SFP_FLAG_CT_SHIFT; 6625 6626 /* check if its OPTICAL */ 6627 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6628 SFP_FLAG_IS_OPTICAL_PORT : 0) 6629 << SFP_FLAG_IS_OPTICAL_SHIFT; 6630 6631 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6632 page_a2[SFF_TEMPERATURE_B0]); 6633 vcc = (page_a2[SFF_VCC_B1] << 8 | 6634 page_a2[SFF_VCC_B0]); 6635 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6636 page_a2[SFF_TXPOWER_B0]); 6637 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6638 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6639 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6640 page_a2[SFF_RXPOWER_B0]); 6641 desc->sfp_info.temperature = cpu_to_be16(temperature); 6642 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6643 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6644 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6645 desc->sfp_info.vcc = cpu_to_be16(vcc); 6646 6647 desc->sfp_info.flags = cpu_to_be16(flag); 6648 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6649 6650 return sizeof(struct fc_rdp_sfp_desc); 6651 } 6652 6653 static uint32_t 6654 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6655 READ_LNK_VAR *stat) 6656 { 6657 uint32_t type; 6658 6659 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6660 6661 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6662 6663 desc->info.port_type = cpu_to_be32(type); 6664 6665 desc->info.link_status.link_failure_cnt = 6666 cpu_to_be32(stat->linkFailureCnt); 6667 desc->info.link_status.loss_of_synch_cnt = 6668 cpu_to_be32(stat->lossSyncCnt); 6669 desc->info.link_status.loss_of_signal_cnt = 6670 cpu_to_be32(stat->lossSignalCnt); 6671 desc->info.link_status.primitive_seq_proto_err = 6672 cpu_to_be32(stat->primSeqErrCnt); 6673 desc->info.link_status.invalid_trans_word = 6674 cpu_to_be32(stat->invalidXmitWord); 6675 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6676 6677 desc->length = cpu_to_be32(sizeof(desc->info)); 6678 6679 return sizeof(struct fc_rdp_link_error_status_desc); 6680 } 6681 6682 static uint32_t 6683 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6684 struct lpfc_vport *vport) 6685 { 6686 uint32_t bbCredit; 6687 6688 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6689 6690 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6691 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6692 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6693 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6694 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6695 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6696 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6697 } else { 6698 desc->bbc_info.attached_port_bbc = 0; 6699 } 6700 6701 desc->bbc_info.rtt = 0; 6702 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6703 6704 return sizeof(struct fc_rdp_bbc_desc); 6705 } 6706 6707 static uint32_t 6708 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6709 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6710 { 6711 uint32_t flags = 0; 6712 6713 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6714 6715 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6716 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6717 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6718 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6719 6720 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6721 flags |= RDP_OET_HIGH_ALARM; 6722 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6723 flags |= RDP_OET_LOW_ALARM; 6724 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6725 flags |= RDP_OET_HIGH_WARNING; 6726 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6727 flags |= RDP_OET_LOW_WARNING; 6728 6729 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6730 desc->oed_info.function_flags = cpu_to_be32(flags); 6731 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6732 return sizeof(struct fc_rdp_oed_sfp_desc); 6733 } 6734 6735 static uint32_t 6736 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6737 struct fc_rdp_oed_sfp_desc *desc, 6738 uint8_t *page_a2) 6739 { 6740 uint32_t flags = 0; 6741 6742 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6743 6744 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6745 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6746 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6747 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6748 6749 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6750 flags |= RDP_OET_HIGH_ALARM; 6751 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6752 flags |= RDP_OET_LOW_ALARM; 6753 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6754 flags |= RDP_OET_HIGH_WARNING; 6755 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6756 flags |= RDP_OET_LOW_WARNING; 6757 6758 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6759 desc->oed_info.function_flags = cpu_to_be32(flags); 6760 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6761 return sizeof(struct fc_rdp_oed_sfp_desc); 6762 } 6763 6764 static uint32_t 6765 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6766 struct fc_rdp_oed_sfp_desc *desc, 6767 uint8_t *page_a2) 6768 { 6769 uint32_t flags = 0; 6770 6771 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6772 6773 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6774 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6775 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6776 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6777 6778 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6779 flags |= RDP_OET_HIGH_ALARM; 6780 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6781 flags |= RDP_OET_LOW_ALARM; 6782 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6783 flags |= RDP_OET_HIGH_WARNING; 6784 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6785 flags |= RDP_OET_LOW_WARNING; 6786 6787 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6788 desc->oed_info.function_flags = cpu_to_be32(flags); 6789 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6790 return sizeof(struct fc_rdp_oed_sfp_desc); 6791 } 6792 6793 static uint32_t 6794 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6795 struct fc_rdp_oed_sfp_desc *desc, 6796 uint8_t *page_a2) 6797 { 6798 uint32_t flags = 0; 6799 6800 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6801 6802 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6803 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6804 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6805 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6806 6807 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6808 flags |= RDP_OET_HIGH_ALARM; 6809 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6810 flags |= RDP_OET_LOW_ALARM; 6811 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6812 flags |= RDP_OET_HIGH_WARNING; 6813 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6814 flags |= RDP_OET_LOW_WARNING; 6815 6816 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6817 desc->oed_info.function_flags = cpu_to_be32(flags); 6818 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6819 return sizeof(struct fc_rdp_oed_sfp_desc); 6820 } 6821 6822 6823 static uint32_t 6824 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6825 struct fc_rdp_oed_sfp_desc *desc, 6826 uint8_t *page_a2) 6827 { 6828 uint32_t flags = 0; 6829 6830 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6831 6832 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6833 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6834 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6835 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6836 6837 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6838 flags |= RDP_OET_HIGH_ALARM; 6839 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6840 flags |= RDP_OET_LOW_ALARM; 6841 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6842 flags |= RDP_OET_HIGH_WARNING; 6843 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6844 flags |= RDP_OET_LOW_WARNING; 6845 6846 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6847 desc->oed_info.function_flags = cpu_to_be32(flags); 6848 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6849 return sizeof(struct fc_rdp_oed_sfp_desc); 6850 } 6851 6852 static uint32_t 6853 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6854 uint8_t *page_a0, struct lpfc_vport *vport) 6855 { 6856 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6857 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6858 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6859 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6860 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6861 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6862 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6863 return sizeof(struct fc_rdp_opd_sfp_desc); 6864 } 6865 6866 static uint32_t 6867 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6868 { 6869 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6870 return 0; 6871 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6872 6873 desc->info.CorrectedBlocks = 6874 cpu_to_be32(stat->fecCorrBlkCount); 6875 desc->info.UncorrectableBlocks = 6876 cpu_to_be32(stat->fecUncorrBlkCount); 6877 6878 desc->length = cpu_to_be32(sizeof(desc->info)); 6879 6880 return sizeof(struct fc_fec_rdp_desc); 6881 } 6882 6883 static uint32_t 6884 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6885 { 6886 uint16_t rdp_cap = 0; 6887 uint16_t rdp_speed; 6888 6889 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6890 6891 switch (phba->fc_linkspeed) { 6892 case LPFC_LINK_SPEED_1GHZ: 6893 rdp_speed = RDP_PS_1GB; 6894 break; 6895 case LPFC_LINK_SPEED_2GHZ: 6896 rdp_speed = RDP_PS_2GB; 6897 break; 6898 case LPFC_LINK_SPEED_4GHZ: 6899 rdp_speed = RDP_PS_4GB; 6900 break; 6901 case LPFC_LINK_SPEED_8GHZ: 6902 rdp_speed = RDP_PS_8GB; 6903 break; 6904 case LPFC_LINK_SPEED_10GHZ: 6905 rdp_speed = RDP_PS_10GB; 6906 break; 6907 case LPFC_LINK_SPEED_16GHZ: 6908 rdp_speed = RDP_PS_16GB; 6909 break; 6910 case LPFC_LINK_SPEED_32GHZ: 6911 rdp_speed = RDP_PS_32GB; 6912 break; 6913 case LPFC_LINK_SPEED_64GHZ: 6914 rdp_speed = RDP_PS_64GB; 6915 break; 6916 case LPFC_LINK_SPEED_128GHZ: 6917 rdp_speed = RDP_PS_128GB; 6918 break; 6919 case LPFC_LINK_SPEED_256GHZ: 6920 rdp_speed = RDP_PS_256GB; 6921 break; 6922 default: 6923 rdp_speed = RDP_PS_UNKNOWN; 6924 break; 6925 } 6926 6927 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6928 6929 if (phba->lmt & LMT_256Gb) 6930 rdp_cap |= RDP_PS_256GB; 6931 if (phba->lmt & LMT_128Gb) 6932 rdp_cap |= RDP_PS_128GB; 6933 if (phba->lmt & LMT_64Gb) 6934 rdp_cap |= RDP_PS_64GB; 6935 if (phba->lmt & LMT_32Gb) 6936 rdp_cap |= RDP_PS_32GB; 6937 if (phba->lmt & LMT_16Gb) 6938 rdp_cap |= RDP_PS_16GB; 6939 if (phba->lmt & LMT_10Gb) 6940 rdp_cap |= RDP_PS_10GB; 6941 if (phba->lmt & LMT_8Gb) 6942 rdp_cap |= RDP_PS_8GB; 6943 if (phba->lmt & LMT_4Gb) 6944 rdp_cap |= RDP_PS_4GB; 6945 if (phba->lmt & LMT_2Gb) 6946 rdp_cap |= RDP_PS_2GB; 6947 if (phba->lmt & LMT_1Gb) 6948 rdp_cap |= RDP_PS_1GB; 6949 6950 if (rdp_cap == 0) 6951 rdp_cap = RDP_CAP_UNKNOWN; 6952 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6953 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6954 6955 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6956 desc->length = cpu_to_be32(sizeof(desc->info)); 6957 return sizeof(struct fc_rdp_port_speed_desc); 6958 } 6959 6960 static uint32_t 6961 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6962 struct lpfc_vport *vport) 6963 { 6964 6965 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6966 6967 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6968 sizeof(desc->port_names.wwnn)); 6969 6970 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6971 sizeof(desc->port_names.wwpn)); 6972 6973 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6974 return sizeof(struct fc_rdp_port_name_desc); 6975 } 6976 6977 static uint32_t 6978 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6979 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6980 { 6981 6982 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6983 if (vport->fc_flag & FC_FABRIC) { 6984 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6985 sizeof(desc->port_names.wwnn)); 6986 6987 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6988 sizeof(desc->port_names.wwpn)); 6989 } else { /* Point to Point */ 6990 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6991 sizeof(desc->port_names.wwnn)); 6992 6993 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6994 sizeof(desc->port_names.wwpn)); 6995 } 6996 6997 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6998 return sizeof(struct fc_rdp_port_name_desc); 6999 } 7000 7001 static void 7002 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7003 int status) 7004 { 7005 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7006 struct lpfc_vport *vport = ndlp->vport; 7007 struct lpfc_iocbq *elsiocb; 7008 struct ulp_bde64 *bpl; 7009 IOCB_t *icmd; 7010 union lpfc_wqe128 *wqe; 7011 uint8_t *pcmd; 7012 struct ls_rjt *stat; 7013 struct fc_rdp_res_frame *rdp_res; 7014 uint32_t cmdsize, len; 7015 uint16_t *flag_ptr; 7016 int rc; 7017 u32 ulp_context; 7018 7019 if (status != SUCCESS) 7020 goto error; 7021 7022 /* This will change once we know the true size of the RDP payload */ 7023 cmdsize = sizeof(struct fc_rdp_res_frame); 7024 7025 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7026 lpfc_max_els_tries, rdp_context->ndlp, 7027 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7028 if (!elsiocb) 7029 goto free_rdp_context; 7030 7031 ulp_context = get_job_ulpcontext(phba, elsiocb); 7032 if (phba->sli_rev == LPFC_SLI_REV4) { 7033 wqe = &elsiocb->wqe; 7034 /* ox-id of the frame */ 7035 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7036 rdp_context->ox_id); 7037 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7038 rdp_context->rx_id); 7039 } else { 7040 icmd = &elsiocb->iocb; 7041 icmd->ulpContext = rdp_context->rx_id; 7042 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7043 } 7044 7045 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7046 "2171 Xmit RDP response tag x%x xri x%x, " 7047 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7048 elsiocb->iotag, ulp_context, 7049 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7050 ndlp->nlp_rpi); 7051 rdp_res = (struct fc_rdp_res_frame *) 7052 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7053 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7054 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7055 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7056 7057 /* Update Alarm and Warning */ 7058 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7059 phba->sfp_alarm |= *flag_ptr; 7060 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7061 phba->sfp_warning |= *flag_ptr; 7062 7063 /* For RDP payload */ 7064 len = 8; 7065 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7066 (len + pcmd), ELS_CMD_RDP); 7067 7068 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7069 rdp_context->page_a0, rdp_context->page_a2); 7070 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7071 phba); 7072 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7073 (len + pcmd), &rdp_context->link_stat); 7074 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7075 (len + pcmd), vport); 7076 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7077 (len + pcmd), vport, ndlp); 7078 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7079 &rdp_context->link_stat); 7080 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7081 &rdp_context->link_stat, vport); 7082 len += lpfc_rdp_res_oed_temp_desc(phba, 7083 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7084 rdp_context->page_a2); 7085 len += lpfc_rdp_res_oed_voltage_desc(phba, 7086 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7087 rdp_context->page_a2); 7088 len += lpfc_rdp_res_oed_txbias_desc(phba, 7089 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7090 rdp_context->page_a2); 7091 len += lpfc_rdp_res_oed_txpower_desc(phba, 7092 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7093 rdp_context->page_a2); 7094 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7095 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7096 rdp_context->page_a2); 7097 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7098 rdp_context->page_a0, vport); 7099 7100 rdp_res->length = cpu_to_be32(len - 8); 7101 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7102 7103 /* Now that we know the true size of the payload, update the BPL */ 7104 bpl = (struct ulp_bde64 *) 7105 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 7106 bpl->tus.f.bdeSize = len; 7107 bpl->tus.f.bdeFlags = 0; 7108 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7109 7110 phba->fc_stat.elsXmitACC++; 7111 elsiocb->context1 = lpfc_nlp_get(ndlp); 7112 if (!elsiocb->context1) { 7113 lpfc_els_free_iocb(phba, elsiocb); 7114 goto free_rdp_context; 7115 } 7116 7117 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7118 if (rc == IOCB_ERROR) { 7119 lpfc_els_free_iocb(phba, elsiocb); 7120 lpfc_nlp_put(ndlp); 7121 } 7122 7123 goto free_rdp_context; 7124 7125 error: 7126 cmdsize = 2 * sizeof(uint32_t); 7127 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7128 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7129 if (!elsiocb) 7130 goto free_rdp_context; 7131 7132 if (phba->sli_rev == LPFC_SLI_REV4) { 7133 wqe = &elsiocb->wqe; 7134 /* ox-id of the frame */ 7135 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7136 rdp_context->ox_id); 7137 bf_set(wqe_ctxt_tag, 7138 &wqe->xmit_els_rsp.wqe_com, 7139 rdp_context->rx_id); 7140 } else { 7141 icmd = &elsiocb->iocb; 7142 icmd->ulpContext = rdp_context->rx_id; 7143 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7144 } 7145 7146 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7147 7148 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7149 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7150 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7151 7152 phba->fc_stat.elsXmitLSRJT++; 7153 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7154 elsiocb->context1 = lpfc_nlp_get(ndlp); 7155 if (!elsiocb->context1) { 7156 lpfc_els_free_iocb(phba, elsiocb); 7157 goto free_rdp_context; 7158 } 7159 7160 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7161 if (rc == IOCB_ERROR) { 7162 lpfc_els_free_iocb(phba, elsiocb); 7163 lpfc_nlp_put(ndlp); 7164 } 7165 7166 free_rdp_context: 7167 /* This reference put is for the original unsolicited RDP. If the 7168 * prep failed, there is no reference to remove. 7169 */ 7170 lpfc_nlp_put(ndlp); 7171 kfree(rdp_context); 7172 } 7173 7174 static int 7175 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7176 { 7177 LPFC_MBOXQ_t *mbox = NULL; 7178 struct lpfc_dmabuf *mp; 7179 int rc; 7180 7181 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7182 if (!mbox) { 7183 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7184 "7105 failed to allocate mailbox memory"); 7185 return 1; 7186 } 7187 7188 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7189 goto prep_mbox_fail; 7190 mbox->vport = rdp_context->ndlp->vport; 7191 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7192 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7193 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7194 if (rc == MBX_NOT_FINISHED) { 7195 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 7196 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7197 goto issue_mbox_fail; 7198 } 7199 7200 return 0; 7201 7202 prep_mbox_fail: 7203 issue_mbox_fail: 7204 mempool_free(mbox, phba->mbox_mem_pool); 7205 return 1; 7206 } 7207 7208 /* 7209 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7210 * @vport: pointer to a host virtual N_Port data structure. 7211 * @cmdiocb: pointer to lpfc command iocb data structure. 7212 * @ndlp: pointer to a node-list data structure. 7213 * 7214 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7215 * IOCB. First, the payload of the unsolicited RDP is checked. 7216 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7217 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7218 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7219 * gather all data and send RDP response. 7220 * 7221 * Return code 7222 * 0 - Sent the acc response 7223 * 1 - Sent the reject response. 7224 */ 7225 static int 7226 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7227 struct lpfc_nodelist *ndlp) 7228 { 7229 struct lpfc_hba *phba = vport->phba; 7230 struct lpfc_dmabuf *pcmd; 7231 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7232 struct fc_rdp_req_frame *rdp_req; 7233 struct lpfc_rdp_context *rdp_context; 7234 union lpfc_wqe128 *cmd = NULL; 7235 struct ls_rjt stat; 7236 7237 if (phba->sli_rev < LPFC_SLI_REV4 || 7238 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7239 LPFC_SLI_INTF_IF_TYPE_2) { 7240 rjt_err = LSRJT_UNABLE_TPC; 7241 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7242 goto error; 7243 } 7244 7245 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7246 rjt_err = LSRJT_UNABLE_TPC; 7247 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7248 goto error; 7249 } 7250 7251 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7252 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7253 7254 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7255 "2422 ELS RDP Request " 7256 "dec len %d tag x%x port_id %d len %d\n", 7257 be32_to_cpu(rdp_req->rdp_des_length), 7258 be32_to_cpu(rdp_req->nport_id_desc.tag), 7259 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7260 be32_to_cpu(rdp_req->nport_id_desc.length)); 7261 7262 if (sizeof(struct fc_rdp_nport_desc) != 7263 be32_to_cpu(rdp_req->rdp_des_length)) 7264 goto rjt_logerr; 7265 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7266 goto rjt_logerr; 7267 if (RDP_NPORT_ID_SIZE != 7268 be32_to_cpu(rdp_req->nport_id_desc.length)) 7269 goto rjt_logerr; 7270 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7271 if (!rdp_context) { 7272 rjt_err = LSRJT_UNABLE_TPC; 7273 goto error; 7274 } 7275 7276 cmd = &cmdiocb->wqe; 7277 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7278 if (!rdp_context->ndlp) { 7279 kfree(rdp_context); 7280 rjt_err = LSRJT_UNABLE_TPC; 7281 goto error; 7282 } 7283 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7284 &cmd->xmit_els_rsp.wqe_com); 7285 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7286 &cmd->xmit_els_rsp.wqe_com); 7287 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7288 if (lpfc_get_rdp_info(phba, rdp_context)) { 7289 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7290 "2423 Unable to send mailbox"); 7291 kfree(rdp_context); 7292 rjt_err = LSRJT_UNABLE_TPC; 7293 lpfc_nlp_put(ndlp); 7294 goto error; 7295 } 7296 7297 return 0; 7298 7299 rjt_logerr: 7300 rjt_err = LSRJT_LOGICAL_ERR; 7301 7302 error: 7303 memset(&stat, 0, sizeof(stat)); 7304 stat.un.b.lsRjtRsnCode = rjt_err; 7305 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7306 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7307 return 1; 7308 } 7309 7310 7311 static void 7312 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7313 { 7314 MAILBOX_t *mb; 7315 IOCB_t *icmd; 7316 union lpfc_wqe128 *wqe; 7317 uint8_t *pcmd; 7318 struct lpfc_iocbq *elsiocb; 7319 struct lpfc_nodelist *ndlp; 7320 struct ls_rjt *stat; 7321 union lpfc_sli4_cfg_shdr *shdr; 7322 struct lpfc_lcb_context *lcb_context; 7323 struct fc_lcb_res_frame *lcb_res; 7324 uint32_t cmdsize, shdr_status, shdr_add_status; 7325 int rc; 7326 7327 mb = &pmb->u.mb; 7328 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7329 ndlp = lcb_context->ndlp; 7330 pmb->ctx_ndlp = NULL; 7331 pmb->ctx_buf = NULL; 7332 7333 shdr = (union lpfc_sli4_cfg_shdr *) 7334 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7337 7338 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7339 "0194 SET_BEACON_CONFIG mailbox " 7340 "completed with status x%x add_status x%x," 7341 " mbx status x%x\n", 7342 shdr_status, shdr_add_status, mb->mbxStatus); 7343 7344 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7345 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7346 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7347 mempool_free(pmb, phba->mbox_mem_pool); 7348 goto error; 7349 } 7350 7351 mempool_free(pmb, phba->mbox_mem_pool); 7352 cmdsize = sizeof(struct fc_lcb_res_frame); 7353 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7354 lpfc_max_els_tries, ndlp, 7355 ndlp->nlp_DID, ELS_CMD_ACC); 7356 7357 /* Decrement the ndlp reference count from previous mbox command */ 7358 lpfc_nlp_put(ndlp); 7359 7360 if (!elsiocb) 7361 goto free_lcb_context; 7362 7363 lcb_res = (struct fc_lcb_res_frame *) 7364 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7365 7366 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7367 7368 if (phba->sli_rev == LPFC_SLI_REV4) { 7369 wqe = &elsiocb->wqe; 7370 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7371 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7372 lcb_context->ox_id); 7373 } else { 7374 icmd = &elsiocb->iocb; 7375 icmd->ulpContext = lcb_context->rx_id; 7376 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7377 } 7378 7379 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7380 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7381 lcb_res->lcb_sub_command = lcb_context->sub_command; 7382 lcb_res->lcb_type = lcb_context->type; 7383 lcb_res->capability = lcb_context->capability; 7384 lcb_res->lcb_frequency = lcb_context->frequency; 7385 lcb_res->lcb_duration = lcb_context->duration; 7386 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7387 phba->fc_stat.elsXmitACC++; 7388 7389 elsiocb->context1 = lpfc_nlp_get(ndlp); 7390 if (!elsiocb->context1) { 7391 lpfc_els_free_iocb(phba, elsiocb); 7392 goto out; 7393 } 7394 7395 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7396 if (rc == IOCB_ERROR) { 7397 lpfc_els_free_iocb(phba, elsiocb); 7398 lpfc_nlp_put(ndlp); 7399 } 7400 out: 7401 kfree(lcb_context); 7402 return; 7403 7404 error: 7405 cmdsize = sizeof(struct fc_lcb_res_frame); 7406 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7407 lpfc_max_els_tries, ndlp, 7408 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7409 lpfc_nlp_put(ndlp); 7410 if (!elsiocb) 7411 goto free_lcb_context; 7412 7413 if (phba->sli_rev == LPFC_SLI_REV4) { 7414 wqe = &elsiocb->wqe; 7415 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7416 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7417 lcb_context->ox_id); 7418 } else { 7419 icmd = &elsiocb->iocb; 7420 icmd->ulpContext = lcb_context->rx_id; 7421 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7422 } 7423 7424 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7425 7426 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7427 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7428 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7429 7430 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7431 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7432 7433 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7434 phba->fc_stat.elsXmitLSRJT++; 7435 elsiocb->context1 = lpfc_nlp_get(ndlp); 7436 if (!elsiocb->context1) { 7437 lpfc_els_free_iocb(phba, elsiocb); 7438 goto free_lcb_context; 7439 } 7440 7441 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7442 if (rc == IOCB_ERROR) { 7443 lpfc_els_free_iocb(phba, elsiocb); 7444 lpfc_nlp_put(ndlp); 7445 } 7446 free_lcb_context: 7447 kfree(lcb_context); 7448 } 7449 7450 static int 7451 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7452 struct lpfc_lcb_context *lcb_context, 7453 uint32_t beacon_state) 7454 { 7455 struct lpfc_hba *phba = vport->phba; 7456 union lpfc_sli4_cfg_shdr *cfg_shdr; 7457 LPFC_MBOXQ_t *mbox = NULL; 7458 uint32_t len; 7459 int rc; 7460 7461 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7462 if (!mbox) 7463 return 1; 7464 7465 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7466 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7467 sizeof(struct lpfc_sli4_cfg_mhdr); 7468 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7469 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7470 LPFC_SLI4_MBX_EMBED); 7471 mbox->ctx_ndlp = (void *)lcb_context; 7472 mbox->vport = phba->pport; 7473 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7474 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7475 phba->sli4_hba.physical_port); 7476 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7477 beacon_state); 7478 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7479 7480 /* 7481 * Check bv1s bit before issuing the mailbox 7482 * if bv1s == 1, LCB V1 supported 7483 * else, LCB V0 supported 7484 */ 7485 7486 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7487 /* COMMON_SET_BEACON_CONFIG_V1 */ 7488 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7489 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7490 bf_set(lpfc_mbx_set_beacon_port_type, 7491 &mbox->u.mqe.un.beacon_config, 0); 7492 bf_set(lpfc_mbx_set_beacon_duration_v1, 7493 &mbox->u.mqe.un.beacon_config, 7494 be16_to_cpu(lcb_context->duration)); 7495 } else { 7496 /* COMMON_SET_BEACON_CONFIG_V0 */ 7497 if (be16_to_cpu(lcb_context->duration) != 0) { 7498 mempool_free(mbox, phba->mbox_mem_pool); 7499 return 1; 7500 } 7501 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7502 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7503 bf_set(lpfc_mbx_set_beacon_state, 7504 &mbox->u.mqe.un.beacon_config, beacon_state); 7505 bf_set(lpfc_mbx_set_beacon_port_type, 7506 &mbox->u.mqe.un.beacon_config, 1); 7507 bf_set(lpfc_mbx_set_beacon_duration, 7508 &mbox->u.mqe.un.beacon_config, 7509 be16_to_cpu(lcb_context->duration)); 7510 } 7511 7512 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7513 if (rc == MBX_NOT_FINISHED) { 7514 mempool_free(mbox, phba->mbox_mem_pool); 7515 return 1; 7516 } 7517 7518 return 0; 7519 } 7520 7521 7522 /** 7523 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7524 * @vport: pointer to a host virtual N_Port data structure. 7525 * @cmdiocb: pointer to lpfc command iocb data structure. 7526 * @ndlp: pointer to a node-list data structure. 7527 * 7528 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7529 * First, the payload of the unsolicited LCB is checked. 7530 * Then based on Subcommand beacon will either turn on or off. 7531 * 7532 * Return code 7533 * 0 - Sent the acc response 7534 * 1 - Sent the reject response. 7535 **/ 7536 static int 7537 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7538 struct lpfc_nodelist *ndlp) 7539 { 7540 struct lpfc_hba *phba = vport->phba; 7541 struct lpfc_dmabuf *pcmd; 7542 uint8_t *lp; 7543 struct fc_lcb_request_frame *beacon; 7544 struct lpfc_lcb_context *lcb_context; 7545 u8 state, rjt_err = 0; 7546 struct ls_rjt stat; 7547 7548 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 7549 lp = (uint8_t *)pcmd->virt; 7550 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7551 7552 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7553 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7554 "type x%x frequency %x duration x%x\n", 7555 lp[0], lp[1], lp[2], 7556 beacon->lcb_command, 7557 beacon->lcb_sub_command, 7558 beacon->lcb_type, 7559 beacon->lcb_frequency, 7560 be16_to_cpu(beacon->lcb_duration)); 7561 7562 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7563 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7564 rjt_err = LSRJT_CMD_UNSUPPORTED; 7565 goto rjt; 7566 } 7567 7568 if (phba->sli_rev < LPFC_SLI_REV4 || 7569 phba->hba_flag & HBA_FCOE_MODE || 7570 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7571 LPFC_SLI_INTF_IF_TYPE_2)) { 7572 rjt_err = LSRJT_CMD_UNSUPPORTED; 7573 goto rjt; 7574 } 7575 7576 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7577 if (!lcb_context) { 7578 rjt_err = LSRJT_UNABLE_TPC; 7579 goto rjt; 7580 } 7581 7582 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7583 lcb_context->sub_command = beacon->lcb_sub_command; 7584 lcb_context->capability = 0; 7585 lcb_context->type = beacon->lcb_type; 7586 lcb_context->frequency = beacon->lcb_frequency; 7587 lcb_context->duration = beacon->lcb_duration; 7588 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7589 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7590 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7591 if (!lcb_context->ndlp) { 7592 rjt_err = LSRJT_UNABLE_TPC; 7593 goto rjt_free; 7594 } 7595 7596 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7597 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7598 "0193 failed to send mail box"); 7599 lpfc_nlp_put(ndlp); 7600 rjt_err = LSRJT_UNABLE_TPC; 7601 goto rjt_free; 7602 } 7603 return 0; 7604 7605 rjt_free: 7606 kfree(lcb_context); 7607 rjt: 7608 memset(&stat, 0, sizeof(stat)); 7609 stat.un.b.lsRjtRsnCode = rjt_err; 7610 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7611 return 1; 7612 } 7613 7614 7615 /** 7616 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7617 * @vport: pointer to a host virtual N_Port data structure. 7618 * 7619 * This routine cleans up any Registration State Change Notification 7620 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7621 * @vport together with the host_lock is used to prevent multiple thread 7622 * trying to access the RSCN array on a same @vport at the same time. 7623 **/ 7624 void 7625 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7626 { 7627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7628 struct lpfc_hba *phba = vport->phba; 7629 int i; 7630 7631 spin_lock_irq(shost->host_lock); 7632 if (vport->fc_rscn_flush) { 7633 /* Another thread is walking fc_rscn_id_list on this vport */ 7634 spin_unlock_irq(shost->host_lock); 7635 return; 7636 } 7637 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7638 vport->fc_rscn_flush = 1; 7639 spin_unlock_irq(shost->host_lock); 7640 7641 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7642 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7643 vport->fc_rscn_id_list[i] = NULL; 7644 } 7645 spin_lock_irq(shost->host_lock); 7646 vport->fc_rscn_id_cnt = 0; 7647 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7648 spin_unlock_irq(shost->host_lock); 7649 lpfc_can_disctmo(vport); 7650 /* Indicate we are done walking this fc_rscn_id_list */ 7651 vport->fc_rscn_flush = 0; 7652 } 7653 7654 /** 7655 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7656 * @vport: pointer to a host virtual N_Port data structure. 7657 * @did: remote destination port identifier. 7658 * 7659 * This routine checks whether there is any pending Registration State 7660 * Configuration Notification (RSCN) to a @did on @vport. 7661 * 7662 * Return code 7663 * None zero - The @did matched with a pending rscn 7664 * 0 - not able to match @did with a pending rscn 7665 **/ 7666 int 7667 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7668 { 7669 D_ID ns_did; 7670 D_ID rscn_did; 7671 uint32_t *lp; 7672 uint32_t payload_len, i; 7673 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7674 7675 ns_did.un.word = did; 7676 7677 /* Never match fabric nodes for RSCNs */ 7678 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7679 return 0; 7680 7681 /* If we are doing a FULL RSCN rediscovery, match everything */ 7682 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7683 return did; 7684 7685 spin_lock_irq(shost->host_lock); 7686 if (vport->fc_rscn_flush) { 7687 /* Another thread is walking fc_rscn_id_list on this vport */ 7688 spin_unlock_irq(shost->host_lock); 7689 return 0; 7690 } 7691 /* Indicate we are walking fc_rscn_id_list on this vport */ 7692 vport->fc_rscn_flush = 1; 7693 spin_unlock_irq(shost->host_lock); 7694 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7695 lp = vport->fc_rscn_id_list[i]->virt; 7696 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7697 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7698 while (payload_len) { 7699 rscn_did.un.word = be32_to_cpu(*lp++); 7700 payload_len -= sizeof(uint32_t); 7701 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7702 case RSCN_ADDRESS_FORMAT_PORT: 7703 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7704 && (ns_did.un.b.area == rscn_did.un.b.area) 7705 && (ns_did.un.b.id == rscn_did.un.b.id)) 7706 goto return_did_out; 7707 break; 7708 case RSCN_ADDRESS_FORMAT_AREA: 7709 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7710 && (ns_did.un.b.area == rscn_did.un.b.area)) 7711 goto return_did_out; 7712 break; 7713 case RSCN_ADDRESS_FORMAT_DOMAIN: 7714 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7715 goto return_did_out; 7716 break; 7717 case RSCN_ADDRESS_FORMAT_FABRIC: 7718 goto return_did_out; 7719 } 7720 } 7721 } 7722 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7723 vport->fc_rscn_flush = 0; 7724 return 0; 7725 return_did_out: 7726 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7727 vport->fc_rscn_flush = 0; 7728 return did; 7729 } 7730 7731 /** 7732 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7733 * @vport: pointer to a host virtual N_Port data structure. 7734 * 7735 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7736 * state machine for a @vport's nodes that are with pending RSCN (Registration 7737 * State Change Notification). 7738 * 7739 * Return code 7740 * 0 - Successful (currently alway return 0) 7741 **/ 7742 static int 7743 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7744 { 7745 struct lpfc_nodelist *ndlp = NULL; 7746 7747 /* Move all affected nodes by pending RSCNs to NPR state. */ 7748 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 7749 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7750 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7751 continue; 7752 7753 /* NVME Target mode does not do RSCN Recovery. */ 7754 if (vport->phba->nvmet_support) 7755 continue; 7756 7757 /* If we are in the process of doing discovery on this 7758 * NPort, let it continue on its own. 7759 */ 7760 switch (ndlp->nlp_state) { 7761 case NLP_STE_PLOGI_ISSUE: 7762 case NLP_STE_ADISC_ISSUE: 7763 case NLP_STE_REG_LOGIN_ISSUE: 7764 case NLP_STE_PRLI_ISSUE: 7765 case NLP_STE_LOGO_ISSUE: 7766 continue; 7767 } 7768 7769 lpfc_disc_state_machine(vport, ndlp, NULL, 7770 NLP_EVT_DEVICE_RECOVERY); 7771 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7772 } 7773 return 0; 7774 } 7775 7776 /** 7777 * lpfc_send_rscn_event - Send an RSCN event to management application 7778 * @vport: pointer to a host virtual N_Port data structure. 7779 * @cmdiocb: pointer to lpfc command iocb data structure. 7780 * 7781 * lpfc_send_rscn_event sends an RSCN netlink event to management 7782 * applications. 7783 */ 7784 static void 7785 lpfc_send_rscn_event(struct lpfc_vport *vport, 7786 struct lpfc_iocbq *cmdiocb) 7787 { 7788 struct lpfc_dmabuf *pcmd; 7789 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7790 uint32_t *payload_ptr; 7791 uint32_t payload_len; 7792 struct lpfc_rscn_event_header *rscn_event_data; 7793 7794 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7795 payload_ptr = (uint32_t *) pcmd->virt; 7796 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7797 7798 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7799 payload_len, GFP_KERNEL); 7800 if (!rscn_event_data) { 7801 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7802 "0147 Failed to allocate memory for RSCN event\n"); 7803 return; 7804 } 7805 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7806 rscn_event_data->payload_length = payload_len; 7807 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7808 payload_len); 7809 7810 fc_host_post_vendor_event(shost, 7811 fc_get_event_number(), 7812 sizeof(struct lpfc_rscn_event_header) + payload_len, 7813 (char *)rscn_event_data, 7814 LPFC_NL_VENDOR_ID); 7815 7816 kfree(rscn_event_data); 7817 } 7818 7819 /** 7820 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7821 * @vport: pointer to a host virtual N_Port data structure. 7822 * @cmdiocb: pointer to lpfc command iocb data structure. 7823 * @ndlp: pointer to a node-list data structure. 7824 * 7825 * This routine processes an unsolicited RSCN (Registration State Change 7826 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7827 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7828 * discover state machine is about to begin discovery, it just accepts the 7829 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7830 * contains N_Port IDs for other vports on this HBA, it just accepts the 7831 * RSCN and ignore processing it. If the state machine is in the recovery 7832 * state, the fc_rscn_id_list of this @vport is walked and the 7833 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7834 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7835 * routine is invoked to handle the RSCN event. 7836 * 7837 * Return code 7838 * 0 - Just sent the acc response 7839 * 1 - Sent the acc response and waited for name server completion 7840 **/ 7841 static int 7842 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7843 struct lpfc_nodelist *ndlp) 7844 { 7845 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7846 struct lpfc_hba *phba = vport->phba; 7847 struct lpfc_dmabuf *pcmd; 7848 uint32_t *lp, *datap; 7849 uint32_t payload_len, length, nportid, *cmd; 7850 int rscn_cnt; 7851 int rscn_id = 0, hba_id = 0; 7852 int i, tmo; 7853 7854 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7855 lp = (uint32_t *) pcmd->virt; 7856 7857 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7858 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7859 /* RSCN received */ 7860 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7861 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7862 vport->fc_flag, payload_len, *lp, 7863 vport->fc_rscn_id_cnt); 7864 7865 /* Send an RSCN event to the management application */ 7866 lpfc_send_rscn_event(vport, cmdiocb); 7867 7868 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7869 fc_host_post_event(shost, fc_get_event_number(), 7870 FCH_EVT_RSCN, lp[i]); 7871 7872 /* Check if RSCN is coming from a direct-connected remote NPort */ 7873 if (vport->fc_flag & FC_PT2PT) { 7874 /* If so, just ACC it, no other action needed for now */ 7875 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7876 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7877 *lp, vport->fc_flag, payload_len); 7878 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7879 7880 /* Check to see if we need to NVME rescan this target 7881 * remoteport. 7882 */ 7883 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7884 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7885 lpfc_nvme_rescan_port(vport, ndlp); 7886 return 0; 7887 } 7888 7889 /* If we are about to begin discovery, just ACC the RSCN. 7890 * Discovery processing will satisfy it. 7891 */ 7892 if (vport->port_state <= LPFC_NS_QRY) { 7893 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7894 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7895 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7896 7897 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7898 return 0; 7899 } 7900 7901 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7902 * just ACC and ignore it. 7903 */ 7904 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7905 !(vport->cfg_peer_port_login)) { 7906 i = payload_len; 7907 datap = lp; 7908 while (i > 0) { 7909 nportid = *datap++; 7910 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7911 i -= sizeof(uint32_t); 7912 rscn_id++; 7913 if (lpfc_find_vport_by_did(phba, nportid)) 7914 hba_id++; 7915 } 7916 if (rscn_id == hba_id) { 7917 /* ALL NPortIDs in RSCN are on HBA */ 7918 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7919 "0219 Ignore RSCN " 7920 "Data: x%x x%x x%x x%x\n", 7921 vport->fc_flag, payload_len, 7922 *lp, vport->fc_rscn_id_cnt); 7923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7924 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7925 ndlp->nlp_DID, vport->port_state, 7926 ndlp->nlp_flag); 7927 7928 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7929 ndlp, NULL); 7930 /* Restart disctmo if its already running */ 7931 if (vport->fc_flag & FC_DISC_TMO) { 7932 tmo = ((phba->fc_ratov * 3) + 3); 7933 mod_timer(&vport->fc_disctmo, 7934 jiffies + 7935 msecs_to_jiffies(1000 * tmo)); 7936 } 7937 return 0; 7938 } 7939 } 7940 7941 spin_lock_irq(shost->host_lock); 7942 if (vport->fc_rscn_flush) { 7943 /* Another thread is walking fc_rscn_id_list on this vport */ 7944 vport->fc_flag |= FC_RSCN_DISCOVERY; 7945 spin_unlock_irq(shost->host_lock); 7946 /* Send back ACC */ 7947 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7948 return 0; 7949 } 7950 /* Indicate we are walking fc_rscn_id_list on this vport */ 7951 vport->fc_rscn_flush = 1; 7952 spin_unlock_irq(shost->host_lock); 7953 /* Get the array count after successfully have the token */ 7954 rscn_cnt = vport->fc_rscn_id_cnt; 7955 /* If we are already processing an RSCN, save the received 7956 * RSCN payload buffer, cmdiocb->context2 to process later. 7957 */ 7958 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7959 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7960 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7961 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7962 7963 spin_lock_irq(shost->host_lock); 7964 vport->fc_flag |= FC_RSCN_DEFERRED; 7965 7966 /* Restart disctmo if its already running */ 7967 if (vport->fc_flag & FC_DISC_TMO) { 7968 tmo = ((phba->fc_ratov * 3) + 3); 7969 mod_timer(&vport->fc_disctmo, 7970 jiffies + msecs_to_jiffies(1000 * tmo)); 7971 } 7972 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7973 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7974 vport->fc_flag |= FC_RSCN_MODE; 7975 spin_unlock_irq(shost->host_lock); 7976 if (rscn_cnt) { 7977 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7978 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7979 } 7980 if ((rscn_cnt) && 7981 (payload_len + length <= LPFC_BPL_SIZE)) { 7982 *cmd &= ELS_CMD_MASK; 7983 *cmd |= cpu_to_be32(payload_len + length); 7984 memcpy(((uint8_t *)cmd) + length, lp, 7985 payload_len); 7986 } else { 7987 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7988 vport->fc_rscn_id_cnt++; 7989 /* If we zero, cmdiocb->context2, the calling 7990 * routine will not try to free it. 7991 */ 7992 cmdiocb->context2 = NULL; 7993 } 7994 /* Deferred RSCN */ 7995 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7996 "0235 Deferred RSCN " 7997 "Data: x%x x%x x%x\n", 7998 vport->fc_rscn_id_cnt, vport->fc_flag, 7999 vport->port_state); 8000 } else { 8001 vport->fc_flag |= FC_RSCN_DISCOVERY; 8002 spin_unlock_irq(shost->host_lock); 8003 /* ReDiscovery RSCN */ 8004 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8005 "0234 ReDiscovery RSCN " 8006 "Data: x%x x%x x%x\n", 8007 vport->fc_rscn_id_cnt, vport->fc_flag, 8008 vport->port_state); 8009 } 8010 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8011 vport->fc_rscn_flush = 0; 8012 /* Send back ACC */ 8013 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8014 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8015 lpfc_rscn_recovery_check(vport); 8016 return 0; 8017 } 8018 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8019 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8020 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8021 8022 spin_lock_irq(shost->host_lock); 8023 vport->fc_flag |= FC_RSCN_MODE; 8024 spin_unlock_irq(shost->host_lock); 8025 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8026 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8027 vport->fc_rscn_flush = 0; 8028 /* 8029 * If we zero, cmdiocb->context2, the calling routine will 8030 * not try to free it. 8031 */ 8032 cmdiocb->context2 = NULL; 8033 lpfc_set_disctmo(vport); 8034 /* Send back ACC */ 8035 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8036 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8037 lpfc_rscn_recovery_check(vport); 8038 return lpfc_els_handle_rscn(vport); 8039 } 8040 8041 /** 8042 * lpfc_els_handle_rscn - Handle rscn for a vport 8043 * @vport: pointer to a host virtual N_Port data structure. 8044 * 8045 * This routine handles the Registration State Configuration Notification 8046 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8047 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8048 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8049 * NameServer shall be issued. If CT command to the NameServer fails to be 8050 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8051 * RSCN activities with the @vport. 8052 * 8053 * Return code 8054 * 0 - Cleaned up rscn on the @vport 8055 * 1 - Wait for plogi to name server before proceed 8056 **/ 8057 int 8058 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8059 { 8060 struct lpfc_nodelist *ndlp; 8061 struct lpfc_hba *phba = vport->phba; 8062 8063 /* Ignore RSCN if the port is being torn down. */ 8064 if (vport->load_flag & FC_UNLOADING) { 8065 lpfc_els_flush_rscn(vport); 8066 return 0; 8067 } 8068 8069 /* Start timer for RSCN processing */ 8070 lpfc_set_disctmo(vport); 8071 8072 /* RSCN processed */ 8073 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8074 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8075 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8076 vport->port_state, vport->num_disc_nodes, 8077 vport->gidft_inp); 8078 8079 /* To process RSCN, first compare RSCN data with NameServer */ 8080 vport->fc_ns_retry = 0; 8081 vport->num_disc_nodes = 0; 8082 8083 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8084 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8085 /* Good ndlp, issue CT Request to NameServer. Need to 8086 * know how many gidfts were issued. If none, then just 8087 * flush the RSCN. Otherwise, the outstanding requests 8088 * need to complete. 8089 */ 8090 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8091 if (lpfc_issue_gidft(vport) > 0) 8092 return 1; 8093 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8094 if (lpfc_issue_gidpt(vport) > 0) 8095 return 1; 8096 } else { 8097 return 1; 8098 } 8099 } else { 8100 /* Nameserver login in question. Revalidate. */ 8101 if (ndlp) { 8102 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8103 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8104 } else { 8105 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8106 if (!ndlp) { 8107 lpfc_els_flush_rscn(vport); 8108 return 0; 8109 } 8110 ndlp->nlp_prev_state = ndlp->nlp_state; 8111 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8112 } 8113 ndlp->nlp_type |= NLP_FABRIC; 8114 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8115 /* Wait for NameServer login cmpl before we can 8116 * continue 8117 */ 8118 return 1; 8119 } 8120 8121 lpfc_els_flush_rscn(vport); 8122 return 0; 8123 } 8124 8125 /** 8126 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8127 * @vport: pointer to a host virtual N_Port data structure. 8128 * @cmdiocb: pointer to lpfc command iocb data structure. 8129 * @ndlp: pointer to a node-list data structure. 8130 * 8131 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8132 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8133 * point topology. As an unsolicited FLOGI should not be received in a loop 8134 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8135 * lpfc_check_sparm() routine is invoked to check the parameters in the 8136 * unsolicited FLOGI. If parameters validation failed, the routine 8137 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8138 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8139 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8140 * will initiate PLOGI. The higher lexicographical value party shall has 8141 * higher priority (as the winning port) and will initiate PLOGI and 8142 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8143 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8144 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8145 * 8146 * Return code 8147 * 0 - Successfully processed the unsolicited flogi 8148 * 1 - Failed to process the unsolicited flogi 8149 **/ 8150 static int 8151 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8152 struct lpfc_nodelist *ndlp) 8153 { 8154 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8155 struct lpfc_hba *phba = vport->phba; 8156 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8157 uint32_t *lp = (uint32_t *) pcmd->virt; 8158 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8159 struct serv_parm *sp; 8160 LPFC_MBOXQ_t *mbox; 8161 uint32_t cmd, did; 8162 int rc; 8163 uint32_t fc_flag = 0; 8164 uint32_t port_state = 0; 8165 8166 cmd = *lp++; 8167 sp = (struct serv_parm *) lp; 8168 8169 /* FLOGI received */ 8170 8171 lpfc_set_disctmo(vport); 8172 8173 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8174 /* We should never receive a FLOGI in loop mode, ignore it */ 8175 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8176 8177 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8178 Loop Mode */ 8179 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8180 "0113 An FLOGI ELS command x%x was " 8181 "received from DID x%x in Loop Mode\n", 8182 cmd, did); 8183 return 1; 8184 } 8185 8186 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8187 8188 /* 8189 * If our portname is greater than the remote portname, 8190 * then we initiate Nport login. 8191 */ 8192 8193 rc = memcmp(&vport->fc_portname, &sp->portName, 8194 sizeof(struct lpfc_name)); 8195 8196 if (!rc) { 8197 if (phba->sli_rev < LPFC_SLI_REV4) { 8198 mbox = mempool_alloc(phba->mbox_mem_pool, 8199 GFP_KERNEL); 8200 if (!mbox) 8201 return 1; 8202 lpfc_linkdown(phba); 8203 lpfc_init_link(phba, mbox, 8204 phba->cfg_topology, 8205 phba->cfg_link_speed); 8206 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8207 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8208 mbox->vport = vport; 8209 rc = lpfc_sli_issue_mbox(phba, mbox, 8210 MBX_NOWAIT); 8211 lpfc_set_loopback_flag(phba); 8212 if (rc == MBX_NOT_FINISHED) 8213 mempool_free(mbox, phba->mbox_mem_pool); 8214 return 1; 8215 } 8216 8217 /* abort the flogi coming back to ourselves 8218 * due to external loopback on the port. 8219 */ 8220 lpfc_els_abort_flogi(phba); 8221 return 0; 8222 8223 } else if (rc > 0) { /* greater than */ 8224 spin_lock_irq(shost->host_lock); 8225 vport->fc_flag |= FC_PT2PT_PLOGI; 8226 spin_unlock_irq(shost->host_lock); 8227 8228 /* If we have the high WWPN we can assign our own 8229 * myDID; otherwise, we have to WAIT for a PLOGI 8230 * from the remote NPort to find out what it 8231 * will be. 8232 */ 8233 vport->fc_myDID = PT2PT_LocalID; 8234 } else { 8235 vport->fc_myDID = PT2PT_RemoteID; 8236 } 8237 8238 /* 8239 * The vport state should go to LPFC_FLOGI only 8240 * AFTER we issue a FLOGI, not receive one. 8241 */ 8242 spin_lock_irq(shost->host_lock); 8243 fc_flag = vport->fc_flag; 8244 port_state = vport->port_state; 8245 vport->fc_flag |= FC_PT2PT; 8246 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8247 8248 /* Acking an unsol FLOGI. Count 1 for link bounce 8249 * work-around. 8250 */ 8251 vport->rcv_flogi_cnt++; 8252 spin_unlock_irq(shost->host_lock); 8253 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8254 "3311 Rcv Flogi PS x%x new PS x%x " 8255 "fc_flag x%x new fc_flag x%x\n", 8256 port_state, vport->port_state, 8257 fc_flag, vport->fc_flag); 8258 8259 /* 8260 * We temporarily set fc_myDID to make it look like we are 8261 * a Fabric. This is done just so we end up with the right 8262 * did / sid on the FLOGI ACC rsp. 8263 */ 8264 did = vport->fc_myDID; 8265 vport->fc_myDID = Fabric_DID; 8266 8267 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8268 8269 /* Defer ACC response until AFTER we issue a FLOGI */ 8270 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8271 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8272 &wqe->xmit_els_rsp.wqe_com); 8273 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8274 &wqe->xmit_els_rsp.wqe_com); 8275 8276 vport->fc_myDID = did; 8277 8278 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8279 "3344 Deferring FLOGI ACC: rx_id: x%x," 8280 " ox_id: x%x, hba_flag x%x\n", 8281 phba->defer_flogi_acc_rx_id, 8282 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8283 8284 phba->defer_flogi_acc_flag = true; 8285 8286 return 0; 8287 } 8288 8289 /* Send back ACC */ 8290 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8291 8292 /* Now lets put fc_myDID back to what its supposed to be */ 8293 vport->fc_myDID = did; 8294 8295 return 0; 8296 } 8297 8298 /** 8299 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8300 * @vport: pointer to a host virtual N_Port data structure. 8301 * @cmdiocb: pointer to lpfc command iocb data structure. 8302 * @ndlp: pointer to a node-list data structure. 8303 * 8304 * This routine processes Request Node Identification Data (RNID) IOCB 8305 * received as an ELS unsolicited event. Only when the RNID specified format 8306 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8307 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8308 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8309 * rejected by invoking the lpfc_els_rsp_reject() routine. 8310 * 8311 * Return code 8312 * 0 - Successfully processed rnid iocb (currently always return 0) 8313 **/ 8314 static int 8315 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8316 struct lpfc_nodelist *ndlp) 8317 { 8318 struct lpfc_dmabuf *pcmd; 8319 uint32_t *lp; 8320 RNID *rn; 8321 struct ls_rjt stat; 8322 8323 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8324 lp = (uint32_t *) pcmd->virt; 8325 8326 lp++; 8327 rn = (RNID *) lp; 8328 8329 /* RNID received */ 8330 8331 switch (rn->Format) { 8332 case 0: 8333 case RNID_TOPOLOGY_DISC: 8334 /* Send back ACC */ 8335 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8336 break; 8337 default: 8338 /* Reject this request because format not supported */ 8339 stat.un.b.lsRjtRsvd0 = 0; 8340 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8341 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8342 stat.un.b.vendorUnique = 0; 8343 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8344 NULL); 8345 } 8346 return 0; 8347 } 8348 8349 /** 8350 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8351 * @vport: pointer to a host virtual N_Port data structure. 8352 * @cmdiocb: pointer to lpfc command iocb data structure. 8353 * @ndlp: pointer to a node-list data structure. 8354 * 8355 * Return code 8356 * 0 - Successfully processed echo iocb (currently always return 0) 8357 **/ 8358 static int 8359 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8360 struct lpfc_nodelist *ndlp) 8361 { 8362 uint8_t *pcmd; 8363 8364 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 8365 8366 /* skip over first word of echo command to find echo data */ 8367 pcmd += sizeof(uint32_t); 8368 8369 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8370 return 0; 8371 } 8372 8373 /** 8374 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8375 * @vport: pointer to a host virtual N_Port data structure. 8376 * @cmdiocb: pointer to lpfc command iocb data structure. 8377 * @ndlp: pointer to a node-list data structure. 8378 * 8379 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8380 * received as an ELS unsolicited event. Currently, this function just invokes 8381 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8382 * 8383 * Return code 8384 * 0 - Successfully processed lirr iocb (currently always return 0) 8385 **/ 8386 static int 8387 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8388 struct lpfc_nodelist *ndlp) 8389 { 8390 struct ls_rjt stat; 8391 8392 /* For now, unconditionally reject this command */ 8393 stat.un.b.lsRjtRsvd0 = 0; 8394 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8395 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8396 stat.un.b.vendorUnique = 0; 8397 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8398 return 0; 8399 } 8400 8401 /** 8402 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8403 * @vport: pointer to a host virtual N_Port data structure. 8404 * @cmdiocb: pointer to lpfc command iocb data structure. 8405 * @ndlp: pointer to a node-list data structure. 8406 * 8407 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8408 * received as an ELS unsolicited event. A request to RRQ shall only 8409 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8410 * Nx_Port N_Port_ID of the target Exchange is the same as the 8411 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8412 * not accepted, an LS_RJT with reason code "Unable to perform 8413 * command request" and reason code explanation "Invalid Originator 8414 * S_ID" shall be returned. For now, we just unconditionally accept 8415 * RRQ from the target. 8416 **/ 8417 static void 8418 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8419 struct lpfc_nodelist *ndlp) 8420 { 8421 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8422 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8423 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8424 } 8425 8426 /** 8427 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8428 * @phba: pointer to lpfc hba data structure. 8429 * @pmb: pointer to the driver internal queue element for mailbox command. 8430 * 8431 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8432 * mailbox command. This callback function is to actually send the Accept 8433 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8434 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8435 * mailbox command, constructs the RLS response with the link statistics 8436 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8437 * response to the RLS. 8438 * 8439 * Note that the ndlp reference count will be incremented by 1 for holding the 8440 * ndlp and the reference to ndlp will be stored into the context1 field of 8441 * the IOCB for the completion callback function to the RLS Accept Response 8442 * ELS IOCB command. 8443 * 8444 **/ 8445 static void 8446 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8447 { 8448 int rc = 0; 8449 MAILBOX_t *mb; 8450 IOCB_t *icmd; 8451 union lpfc_wqe128 *wqe; 8452 struct RLS_RSP *rls_rsp; 8453 uint8_t *pcmd; 8454 struct lpfc_iocbq *elsiocb; 8455 struct lpfc_nodelist *ndlp; 8456 uint16_t oxid; 8457 uint16_t rxid; 8458 uint32_t cmdsize; 8459 u32 ulp_context; 8460 8461 mb = &pmb->u.mb; 8462 8463 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 8464 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8465 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8466 pmb->ctx_buf = NULL; 8467 pmb->ctx_ndlp = NULL; 8468 8469 if (mb->mbxStatus) { 8470 mempool_free(pmb, phba->mbox_mem_pool); 8471 return; 8472 } 8473 8474 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8475 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8476 lpfc_max_els_tries, ndlp, 8477 ndlp->nlp_DID, ELS_CMD_ACC); 8478 8479 /* Decrement the ndlp reference count from previous mbox command */ 8480 lpfc_nlp_put(ndlp); 8481 8482 if (!elsiocb) { 8483 mempool_free(pmb, phba->mbox_mem_pool); 8484 return; 8485 } 8486 8487 ulp_context = get_job_ulpcontext(phba, elsiocb); 8488 if (phba->sli_rev == LPFC_SLI_REV4) { 8489 wqe = &elsiocb->wqe; 8490 /* Xri / rx_id */ 8491 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8492 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8493 } else { 8494 icmd = &elsiocb->iocb; 8495 icmd->ulpContext = rxid; 8496 icmd->unsli3.rcvsli3.ox_id = oxid; 8497 } 8498 8499 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8500 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8501 pcmd += sizeof(uint32_t); /* Skip past command */ 8502 rls_rsp = (struct RLS_RSP *)pcmd; 8503 8504 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8505 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8506 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8507 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8508 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8509 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8510 mempool_free(pmb, phba->mbox_mem_pool); 8511 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8512 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8513 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8514 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8515 elsiocb->iotag, ulp_context, 8516 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8517 ndlp->nlp_rpi); 8518 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8519 phba->fc_stat.elsXmitACC++; 8520 elsiocb->context1 = lpfc_nlp_get(ndlp); 8521 if (!elsiocb->context1) { 8522 lpfc_els_free_iocb(phba, elsiocb); 8523 return; 8524 } 8525 8526 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8527 if (rc == IOCB_ERROR) { 8528 lpfc_els_free_iocb(phba, elsiocb); 8529 lpfc_nlp_put(ndlp); 8530 } 8531 return; 8532 } 8533 8534 /** 8535 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8536 * @vport: pointer to a host virtual N_Port data structure. 8537 * @cmdiocb: pointer to lpfc command iocb data structure. 8538 * @ndlp: pointer to a node-list data structure. 8539 * 8540 * This routine processes Read Link Status (RLS) IOCB received as an 8541 * ELS unsolicited event. It first checks the remote port state. If the 8542 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8543 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8544 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8545 * for reading the HBA link statistics. It is for the callback function, 8546 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8547 * to actually sending out RPL Accept (ACC) response. 8548 * 8549 * Return codes 8550 * 0 - Successfully processed rls iocb (currently always return 0) 8551 **/ 8552 static int 8553 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8554 struct lpfc_nodelist *ndlp) 8555 { 8556 struct lpfc_hba *phba = vport->phba; 8557 LPFC_MBOXQ_t *mbox; 8558 struct ls_rjt stat; 8559 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8560 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8561 8562 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8563 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8564 /* reject the unsolicited RLS request and done with it */ 8565 goto reject_out; 8566 8567 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8568 if (mbox) { 8569 lpfc_read_lnk_stat(phba, mbox); 8570 mbox->ctx_buf = (void *)((unsigned long) 8571 (ox_id << 16 | ctx)); 8572 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8573 if (!mbox->ctx_ndlp) 8574 goto node_err; 8575 mbox->vport = vport; 8576 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8577 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8578 != MBX_NOT_FINISHED) 8579 /* Mbox completion will send ELS Response */ 8580 return 0; 8581 /* Decrement reference count used for the failed mbox 8582 * command. 8583 */ 8584 lpfc_nlp_put(ndlp); 8585 node_err: 8586 mempool_free(mbox, phba->mbox_mem_pool); 8587 } 8588 reject_out: 8589 /* issue rejection response */ 8590 stat.un.b.lsRjtRsvd0 = 0; 8591 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8592 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8593 stat.un.b.vendorUnique = 0; 8594 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8595 return 0; 8596 } 8597 8598 /** 8599 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8600 * @vport: pointer to a host virtual N_Port data structure. 8601 * @cmdiocb: pointer to lpfc command iocb data structure. 8602 * @ndlp: pointer to a node-list data structure. 8603 * 8604 * This routine processes Read Timout Value (RTV) IOCB received as an 8605 * ELS unsolicited event. It first checks the remote port state. If the 8606 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8607 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8608 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8609 * Value (RTV) unsolicited IOCB event. 8610 * 8611 * Note that the ndlp reference count will be incremented by 1 for holding the 8612 * ndlp and the reference to ndlp will be stored into the context1 field of 8613 * the IOCB for the completion callback function to the RTV Accept Response 8614 * ELS IOCB command. 8615 * 8616 * Return codes 8617 * 0 - Successfully processed rtv iocb (currently always return 0) 8618 **/ 8619 static int 8620 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8621 struct lpfc_nodelist *ndlp) 8622 { 8623 int rc = 0; 8624 IOCB_t *icmd; 8625 union lpfc_wqe128 *wqe; 8626 struct lpfc_hba *phba = vport->phba; 8627 struct ls_rjt stat; 8628 struct RTV_RSP *rtv_rsp; 8629 uint8_t *pcmd; 8630 struct lpfc_iocbq *elsiocb; 8631 uint32_t cmdsize; 8632 u32 ulp_context; 8633 8634 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8635 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8636 /* reject the unsolicited RTV request and done with it */ 8637 goto reject_out; 8638 8639 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8640 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8641 lpfc_max_els_tries, ndlp, 8642 ndlp->nlp_DID, ELS_CMD_ACC); 8643 8644 if (!elsiocb) 8645 return 1; 8646 8647 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8648 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8649 pcmd += sizeof(uint32_t); /* Skip past command */ 8650 8651 ulp_context = get_job_ulpcontext(phba, elsiocb); 8652 /* use the command's xri in the response */ 8653 if (phba->sli_rev == LPFC_SLI_REV4) { 8654 wqe = &elsiocb->wqe; 8655 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8656 get_job_ulpcontext(phba, cmdiocb)); 8657 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8658 get_job_rcvoxid(phba, cmdiocb)); 8659 } else { 8660 icmd = &elsiocb->iocb; 8661 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8662 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8663 } 8664 8665 rtv_rsp = (struct RTV_RSP *)pcmd; 8666 8667 /* populate RTV payload */ 8668 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8669 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8670 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8671 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8672 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8673 8674 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8675 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8676 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8677 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8678 "Data: x%x x%x x%x\n", 8679 elsiocb->iotag, ulp_context, 8680 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8681 ndlp->nlp_rpi, 8682 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8683 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8684 phba->fc_stat.elsXmitACC++; 8685 elsiocb->context1 = lpfc_nlp_get(ndlp); 8686 if (!elsiocb->context1) { 8687 lpfc_els_free_iocb(phba, elsiocb); 8688 return 0; 8689 } 8690 8691 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8692 if (rc == IOCB_ERROR) { 8693 lpfc_els_free_iocb(phba, elsiocb); 8694 lpfc_nlp_put(ndlp); 8695 } 8696 return 0; 8697 8698 reject_out: 8699 /* issue rejection response */ 8700 stat.un.b.lsRjtRsvd0 = 0; 8701 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8702 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8703 stat.un.b.vendorUnique = 0; 8704 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8705 return 0; 8706 } 8707 8708 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8709 * @vport: pointer to a host virtual N_Port data structure. 8710 * @ndlp: pointer to a node-list data structure. 8711 * @did: DID of the target. 8712 * @rrq: Pointer to the rrq struct. 8713 * 8714 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8715 * Successful the the completion handler will clear the RRQ. 8716 * 8717 * Return codes 8718 * 0 - Successfully sent rrq els iocb. 8719 * 1 - Failed to send rrq els iocb. 8720 **/ 8721 static int 8722 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8723 uint32_t did, struct lpfc_node_rrq *rrq) 8724 { 8725 struct lpfc_hba *phba = vport->phba; 8726 struct RRQ *els_rrq; 8727 struct lpfc_iocbq *elsiocb; 8728 uint8_t *pcmd; 8729 uint16_t cmdsize; 8730 int ret; 8731 8732 if (!ndlp) 8733 return 1; 8734 8735 /* If ndlp is not NULL, we will bump the reference count on it */ 8736 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8737 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8738 ELS_CMD_RRQ); 8739 if (!elsiocb) 8740 return 1; 8741 8742 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8743 8744 /* For RRQ request, remainder of payload is Exchange IDs */ 8745 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8746 pcmd += sizeof(uint32_t); 8747 els_rrq = (struct RRQ *) pcmd; 8748 8749 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8750 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8751 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8752 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8753 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8754 8755 8756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8757 "Issue RRQ: did:x%x", 8758 did, rrq->xritag, rrq->rxid); 8759 elsiocb->context_un.rrq = rrq; 8760 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8761 8762 lpfc_nlp_get(ndlp); 8763 elsiocb->context1 = ndlp; 8764 8765 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8766 if (ret == IOCB_ERROR) 8767 goto io_err; 8768 return 0; 8769 8770 io_err: 8771 lpfc_els_free_iocb(phba, elsiocb); 8772 lpfc_nlp_put(ndlp); 8773 return 1; 8774 } 8775 8776 /** 8777 * lpfc_send_rrq - Sends ELS RRQ if needed. 8778 * @phba: pointer to lpfc hba data structure. 8779 * @rrq: pointer to the active rrq. 8780 * 8781 * This routine will call the lpfc_issue_els_rrq if the rrq is 8782 * still active for the xri. If this function returns a failure then 8783 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8784 * 8785 * Returns 0 Success. 8786 * 1 Failure. 8787 **/ 8788 int 8789 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8790 { 8791 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8792 rrq->nlp_DID); 8793 if (!ndlp) 8794 return 1; 8795 8796 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8797 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8798 rrq->nlp_DID, rrq); 8799 else 8800 return 1; 8801 } 8802 8803 /** 8804 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8805 * @vport: pointer to a host virtual N_Port data structure. 8806 * @cmdsize: size of the ELS command. 8807 * @oldiocb: pointer to the original lpfc command iocb data structure. 8808 * @ndlp: pointer to a node-list data structure. 8809 * 8810 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8811 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8812 * 8813 * Note that the ndlp reference count will be incremented by 1 for holding the 8814 * ndlp and the reference to ndlp will be stored into the context1 field of 8815 * the IOCB for the completion callback function to the RPL Accept Response 8816 * ELS command. 8817 * 8818 * Return code 8819 * 0 - Successfully issued ACC RPL ELS command 8820 * 1 - Failed to issue ACC RPL ELS command 8821 **/ 8822 static int 8823 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8824 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8825 { 8826 int rc = 0; 8827 struct lpfc_hba *phba = vport->phba; 8828 IOCB_t *icmd; 8829 union lpfc_wqe128 *wqe; 8830 RPL_RSP rpl_rsp; 8831 struct lpfc_iocbq *elsiocb; 8832 uint8_t *pcmd; 8833 u32 ulp_context; 8834 8835 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8836 ndlp->nlp_DID, ELS_CMD_ACC); 8837 8838 if (!elsiocb) 8839 return 1; 8840 8841 ulp_context = get_job_ulpcontext(phba, elsiocb); 8842 if (phba->sli_rev == LPFC_SLI_REV4) { 8843 wqe = &elsiocb->wqe; 8844 /* Xri / rx_id */ 8845 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8846 get_job_ulpcontext(phba, oldiocb)); 8847 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8848 get_job_rcvoxid(phba, oldiocb)); 8849 } else { 8850 icmd = &elsiocb->iocb; 8851 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 8852 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 8853 } 8854 8855 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8856 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8857 pcmd += sizeof(uint16_t); 8858 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8859 pcmd += sizeof(uint16_t); 8860 8861 /* Setup the RPL ACC payload */ 8862 rpl_rsp.listLen = be32_to_cpu(1); 8863 rpl_rsp.index = 0; 8864 rpl_rsp.port_num_blk.portNum = 0; 8865 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8866 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8867 sizeof(struct lpfc_name)); 8868 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8869 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8870 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8871 "0120 Xmit ELS RPL ACC response tag x%x " 8872 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8873 "rpi x%x\n", 8874 elsiocb->iotag, ulp_context, 8875 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8876 ndlp->nlp_rpi); 8877 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8878 phba->fc_stat.elsXmitACC++; 8879 elsiocb->context1 = lpfc_nlp_get(ndlp); 8880 if (!elsiocb->context1) { 8881 lpfc_els_free_iocb(phba, elsiocb); 8882 return 1; 8883 } 8884 8885 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8886 if (rc == IOCB_ERROR) { 8887 lpfc_els_free_iocb(phba, elsiocb); 8888 lpfc_nlp_put(ndlp); 8889 return 1; 8890 } 8891 8892 return 0; 8893 } 8894 8895 /** 8896 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8897 * @vport: pointer to a host virtual N_Port data structure. 8898 * @cmdiocb: pointer to lpfc command iocb data structure. 8899 * @ndlp: pointer to a node-list data structure. 8900 * 8901 * This routine processes Read Port List (RPL) IOCB received as an ELS 8902 * unsolicited event. It first checks the remote port state. If the remote 8903 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8904 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8905 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8906 * to accept the RPL. 8907 * 8908 * Return code 8909 * 0 - Successfully processed rpl iocb (currently always return 0) 8910 **/ 8911 static int 8912 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8913 struct lpfc_nodelist *ndlp) 8914 { 8915 struct lpfc_dmabuf *pcmd; 8916 uint32_t *lp; 8917 uint32_t maxsize; 8918 uint16_t cmdsize; 8919 RPL *rpl; 8920 struct ls_rjt stat; 8921 8922 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8923 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8924 /* issue rejection response */ 8925 stat.un.b.lsRjtRsvd0 = 0; 8926 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8927 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8928 stat.un.b.vendorUnique = 0; 8929 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8930 NULL); 8931 /* rejected the unsolicited RPL request and done with it */ 8932 return 0; 8933 } 8934 8935 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8936 lp = (uint32_t *) pcmd->virt; 8937 rpl = (RPL *) (lp + 1); 8938 maxsize = be32_to_cpu(rpl->maxsize); 8939 8940 /* We support only one port */ 8941 if ((rpl->index == 0) && 8942 ((maxsize == 0) || 8943 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8944 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8945 } else { 8946 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8947 } 8948 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8949 8950 return 0; 8951 } 8952 8953 /** 8954 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8955 * @vport: pointer to a virtual N_Port data structure. 8956 * @cmdiocb: pointer to lpfc command iocb data structure. 8957 * @ndlp: pointer to a node-list data structure. 8958 * 8959 * This routine processes Fibre Channel Address Resolution Protocol 8960 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8961 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8962 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8963 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8964 * remote PortName is compared against the FC PortName stored in the @vport 8965 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8966 * compared against the FC NodeName stored in the @vport data structure. 8967 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8968 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8969 * invoked to send out FARP Response to the remote node. Before sending the 8970 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8971 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8972 * routine is invoked to log into the remote port first. 8973 * 8974 * Return code 8975 * 0 - Either the FARP Match Mode not supported or successfully processed 8976 **/ 8977 static int 8978 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8979 struct lpfc_nodelist *ndlp) 8980 { 8981 struct lpfc_dmabuf *pcmd; 8982 uint32_t *lp; 8983 FARP *fp; 8984 uint32_t cnt, did; 8985 8986 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 8987 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8988 lp = (uint32_t *) pcmd->virt; 8989 8990 lp++; 8991 fp = (FARP *) lp; 8992 /* FARP-REQ received from DID <did> */ 8993 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8994 "0601 FARP-REQ received from DID x%x\n", did); 8995 /* We will only support match on WWPN or WWNN */ 8996 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8997 return 0; 8998 } 8999 9000 cnt = 0; 9001 /* If this FARP command is searching for my portname */ 9002 if (fp->Mflags & FARP_MATCH_PORT) { 9003 if (memcmp(&fp->RportName, &vport->fc_portname, 9004 sizeof(struct lpfc_name)) == 0) 9005 cnt = 1; 9006 } 9007 9008 /* If this FARP command is searching for my nodename */ 9009 if (fp->Mflags & FARP_MATCH_NODE) { 9010 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9011 sizeof(struct lpfc_name)) == 0) 9012 cnt = 1; 9013 } 9014 9015 if (cnt) { 9016 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9017 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9018 /* Log back into the node before sending the FARP. */ 9019 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9020 ndlp->nlp_prev_state = ndlp->nlp_state; 9021 lpfc_nlp_set_state(vport, ndlp, 9022 NLP_STE_PLOGI_ISSUE); 9023 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9024 } 9025 9026 /* Send a FARP response to that node */ 9027 if (fp->Rflags & FARP_REQUEST_FARPR) 9028 lpfc_issue_els_farpr(vport, did, 0); 9029 } 9030 } 9031 return 0; 9032 } 9033 9034 /** 9035 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9036 * @vport: pointer to a host virtual N_Port data structure. 9037 * @cmdiocb: pointer to lpfc command iocb data structure. 9038 * @ndlp: pointer to a node-list data structure. 9039 * 9040 * This routine processes Fibre Channel Address Resolution Protocol 9041 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9042 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9043 * the FARP response request. 9044 * 9045 * Return code 9046 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9047 **/ 9048 static int 9049 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9050 struct lpfc_nodelist *ndlp) 9051 { 9052 struct lpfc_dmabuf *pcmd; 9053 uint32_t *lp; 9054 uint32_t did; 9055 9056 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9057 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 9058 lp = (uint32_t *) pcmd->virt; 9059 9060 lp++; 9061 /* FARP-RSP received from DID <did> */ 9062 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9063 "0600 FARP-RSP received from DID x%x\n", did); 9064 /* ACCEPT the Farp resp request */ 9065 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9066 9067 return 0; 9068 } 9069 9070 /** 9071 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9072 * @vport: pointer to a host virtual N_Port data structure. 9073 * @cmdiocb: pointer to lpfc command iocb data structure. 9074 * @fan_ndlp: pointer to a node-list data structure. 9075 * 9076 * This routine processes a Fabric Address Notification (FAN) IOCB 9077 * command received as an ELS unsolicited event. The FAN ELS command will 9078 * only be processed on a physical port (i.e., the @vport represents the 9079 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9080 * compared against those in the phba data structure. If any of those is 9081 * different, the lpfc_initial_flogi() routine is invoked to initialize 9082 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9083 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9084 * is invoked to register login to the fabric. 9085 * 9086 * Return code 9087 * 0 - Successfully processed fan iocb (currently always return 0). 9088 **/ 9089 static int 9090 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9091 struct lpfc_nodelist *fan_ndlp) 9092 { 9093 struct lpfc_hba *phba = vport->phba; 9094 uint32_t *lp; 9095 FAN *fp; 9096 9097 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9098 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 9099 fp = (FAN *) ++lp; 9100 /* FAN received; Fan does not have a reply sequence */ 9101 if ((vport == phba->pport) && 9102 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9103 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9104 sizeof(struct lpfc_name))) || 9105 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9106 sizeof(struct lpfc_name)))) { 9107 /* This port has switched fabrics. FLOGI is required */ 9108 lpfc_issue_init_vfi(vport); 9109 } else { 9110 /* FAN verified - skip FLOGI */ 9111 vport->fc_myDID = vport->fc_prevDID; 9112 if (phba->sli_rev < LPFC_SLI_REV4) 9113 lpfc_issue_fabric_reglogin(vport); 9114 else { 9115 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9116 "3138 Need register VFI: (x%x/%x)\n", 9117 vport->fc_prevDID, vport->fc_myDID); 9118 lpfc_issue_reg_vfi(vport); 9119 } 9120 } 9121 } 9122 return 0; 9123 } 9124 9125 /** 9126 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9127 * @vport: pointer to a host virtual N_Port data structure. 9128 * @cmdiocb: pointer to lpfc command iocb data structure. 9129 * @ndlp: pointer to a node-list data structure. 9130 * 9131 * Return code 9132 * 0 - Successfully processed echo iocb (currently always return 0) 9133 **/ 9134 static int 9135 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9136 struct lpfc_nodelist *ndlp) 9137 { 9138 struct lpfc_hba *phba = vport->phba; 9139 struct fc_els_edc *edc_req; 9140 struct fc_tlv_desc *tlv; 9141 uint8_t *payload; 9142 uint32_t *ptr, dtag; 9143 const char *dtag_nm; 9144 int desc_cnt = 0, bytes_remain; 9145 bool rcv_cap_desc = false; 9146 9147 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 9148 9149 edc_req = (struct fc_els_edc *)payload; 9150 bytes_remain = be32_to_cpu(edc_req->desc_len); 9151 9152 ptr = (uint32_t *)payload; 9153 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 9154 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9155 bytes_remain, be32_to_cpu(*ptr), 9156 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9157 9158 /* No signal support unless there is a congestion descriptor */ 9159 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9160 phba->cgn_sig_freq = 0; 9161 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9162 9163 if (bytes_remain <= 0) 9164 goto out; 9165 9166 tlv = edc_req->desc; 9167 9168 /* 9169 * cycle through EDC diagnostic descriptors to find the 9170 * congestion signaling capability descriptor 9171 */ 9172 while (bytes_remain && !rcv_cap_desc) { 9173 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9174 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9175 "6464 Truncated TLV hdr on " 9176 "Diagnostic descriptor[%d]\n", 9177 desc_cnt); 9178 goto out; 9179 } 9180 9181 dtag = be32_to_cpu(tlv->desc_tag); 9182 switch (dtag) { 9183 case ELS_DTAG_LNK_FAULT_CAP: 9184 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9185 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9186 sizeof(struct fc_diag_lnkflt_desc)) { 9187 lpfc_printf_log( 9188 phba, KERN_WARNING, LOG_CGN_MGMT, 9189 "6465 Truncated Link Fault Diagnostic " 9190 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9191 desc_cnt, bytes_remain, 9192 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9193 sizeof(struct fc_diag_cg_sig_desc)); 9194 goto out; 9195 } 9196 /* No action for Link Fault descriptor for now */ 9197 break; 9198 case ELS_DTAG_CG_SIGNAL_CAP: 9199 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9200 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9201 sizeof(struct fc_diag_cg_sig_desc)) { 9202 lpfc_printf_log( 9203 phba, KERN_WARNING, LOG_CGN_MGMT, 9204 "6466 Truncated cgn signal Diagnostic " 9205 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9206 desc_cnt, bytes_remain, 9207 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9208 sizeof(struct fc_diag_cg_sig_desc)); 9209 goto out; 9210 } 9211 9212 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9213 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9214 9215 /* We start negotiation with lpfc_fabric_cgn_frequency. 9216 * When we process the EDC, we will settle on the 9217 * higher frequency. 9218 */ 9219 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9220 9221 lpfc_least_capable_settings( 9222 phba, (struct fc_diag_cg_sig_desc *)tlv); 9223 rcv_cap_desc = true; 9224 break; 9225 default: 9226 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9227 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9228 "6467 unknown Diagnostic " 9229 "Descriptor[%d]: tag x%x (%s)\n", 9230 desc_cnt, dtag, dtag_nm); 9231 } 9232 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9233 tlv = fc_tlv_next_desc(tlv); 9234 desc_cnt++; 9235 } 9236 out: 9237 /* Need to send back an ACC */ 9238 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9239 9240 lpfc_config_cgn_signal(phba); 9241 return 0; 9242 } 9243 9244 /** 9245 * lpfc_els_timeout - Handler funciton to the els timer 9246 * @t: timer context used to obtain the vport. 9247 * 9248 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9249 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9250 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9251 * up the worker thread. It is for the worker thread to invoke the routine 9252 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9253 **/ 9254 void 9255 lpfc_els_timeout(struct timer_list *t) 9256 { 9257 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9258 struct lpfc_hba *phba = vport->phba; 9259 uint32_t tmo_posted; 9260 unsigned long iflag; 9261 9262 spin_lock_irqsave(&vport->work_port_lock, iflag); 9263 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9264 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9265 vport->work_port_events |= WORKER_ELS_TMO; 9266 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9267 9268 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9269 lpfc_worker_wake_up(phba); 9270 return; 9271 } 9272 9273 9274 /** 9275 * lpfc_els_timeout_handler - Process an els timeout event 9276 * @vport: pointer to a virtual N_Port data structure. 9277 * 9278 * This routine is the actual handler function that processes an ELS timeout 9279 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9280 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9281 * invoking the lpfc_sli_issue_abort_iotag() routine. 9282 **/ 9283 void 9284 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9285 { 9286 struct lpfc_hba *phba = vport->phba; 9287 struct lpfc_sli_ring *pring; 9288 struct lpfc_iocbq *tmp_iocb, *piocb; 9289 IOCB_t *cmd = NULL; 9290 struct lpfc_dmabuf *pcmd; 9291 uint32_t els_command = 0; 9292 uint32_t timeout; 9293 uint32_t remote_ID = 0xffffffff; 9294 LIST_HEAD(abort_list); 9295 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9296 9297 9298 timeout = (uint32_t)(phba->fc_ratov << 1); 9299 9300 pring = lpfc_phba_elsring(phba); 9301 if (unlikely(!pring)) 9302 return; 9303 9304 if (phba->pport->load_flag & FC_UNLOADING) 9305 return; 9306 9307 spin_lock_irq(&phba->hbalock); 9308 if (phba->sli_rev == LPFC_SLI_REV4) 9309 spin_lock(&pring->ring_lock); 9310 9311 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9312 ulp_command = get_job_cmnd(phba, piocb); 9313 ulp_context = get_job_ulpcontext(phba, piocb); 9314 did = get_job_els_rsp64_did(phba, piocb); 9315 9316 if (phba->sli_rev == LPFC_SLI_REV4) { 9317 iotag = get_wqe_reqtag(piocb); 9318 } else { 9319 cmd = &piocb->iocb; 9320 iotag = cmd->ulpIoTag; 9321 } 9322 9323 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9324 ulp_command == CMD_ABORT_XRI_CX || 9325 ulp_command == CMD_ABORT_XRI_CN || 9326 ulp_command == CMD_CLOSE_XRI_CN) 9327 continue; 9328 9329 if (piocb->vport != vport) 9330 continue; 9331 9332 pcmd = (struct lpfc_dmabuf *) piocb->context2; 9333 if (pcmd) 9334 els_command = *(uint32_t *) (pcmd->virt); 9335 9336 if (els_command == ELS_CMD_FARP || 9337 els_command == ELS_CMD_FARPR || 9338 els_command == ELS_CMD_FDISC) 9339 continue; 9340 9341 if (piocb->drvrTimeout > 0) { 9342 if (piocb->drvrTimeout >= timeout) 9343 piocb->drvrTimeout -= timeout; 9344 else 9345 piocb->drvrTimeout = 0; 9346 continue; 9347 } 9348 9349 remote_ID = 0xffffffff; 9350 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9351 remote_ID = did; 9352 } else { 9353 struct lpfc_nodelist *ndlp; 9354 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9355 if (ndlp) 9356 remote_ID = ndlp->nlp_DID; 9357 } 9358 list_add_tail(&piocb->dlist, &abort_list); 9359 } 9360 if (phba->sli_rev == LPFC_SLI_REV4) 9361 spin_unlock(&pring->ring_lock); 9362 spin_unlock_irq(&phba->hbalock); 9363 9364 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9365 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9366 "0127 ELS timeout Data: x%x x%x x%x " 9367 "x%x\n", els_command, 9368 remote_ID, ulp_command, iotag); 9369 9370 spin_lock_irq(&phba->hbalock); 9371 list_del_init(&piocb->dlist); 9372 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9373 spin_unlock_irq(&phba->hbalock); 9374 } 9375 9376 /* Make sure HBA is alive */ 9377 lpfc_issue_hb_tmo(phba); 9378 9379 if (!list_empty(&pring->txcmplq)) 9380 if (!(phba->pport->load_flag & FC_UNLOADING)) 9381 mod_timer(&vport->els_tmofunc, 9382 jiffies + msecs_to_jiffies(1000 * timeout)); 9383 } 9384 9385 /** 9386 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9387 * @vport: pointer to a host virtual N_Port data structure. 9388 * 9389 * This routine is used to clean up all the outstanding ELS commands on a 9390 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9391 * routine. After that, it walks the ELS transmit queue to remove all the 9392 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9393 * the IOCBs with a non-NULL completion callback function, the callback 9394 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9395 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9396 * callback function, the IOCB will simply be released. Finally, it walks 9397 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9398 * completion queue IOCB that is associated with the @vport and is not 9399 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9400 * part of the discovery state machine) out to HBA by invoking the 9401 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9402 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9403 * the IOCBs are aborted when this function returns. 9404 **/ 9405 void 9406 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9407 { 9408 LIST_HEAD(abort_list); 9409 struct lpfc_hba *phba = vport->phba; 9410 struct lpfc_sli_ring *pring; 9411 struct lpfc_iocbq *tmp_iocb, *piocb; 9412 u32 ulp_command; 9413 unsigned long iflags = 0; 9414 9415 lpfc_fabric_abort_vport(vport); 9416 9417 /* 9418 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9419 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9420 * ultimately grabs the ring_lock, the driver must splice the list into 9421 * a working list and release the locks before calling the abort. 9422 */ 9423 spin_lock_irqsave(&phba->hbalock, iflags); 9424 pring = lpfc_phba_elsring(phba); 9425 9426 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9427 if (unlikely(!pring)) { 9428 spin_unlock_irqrestore(&phba->hbalock, iflags); 9429 return; 9430 } 9431 9432 if (phba->sli_rev == LPFC_SLI_REV4) 9433 spin_lock(&pring->ring_lock); 9434 9435 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9436 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9437 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9438 continue; 9439 9440 if (piocb->vport != vport) 9441 continue; 9442 9443 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9444 continue; 9445 9446 /* On the ELS ring we can have ELS_REQUESTs or 9447 * GEN_REQUESTs waiting for a response. 9448 */ 9449 ulp_command = get_job_cmnd(phba, piocb); 9450 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9451 list_add_tail(&piocb->dlist, &abort_list); 9452 9453 /* If the link is down when flushing ELS commands 9454 * the firmware will not complete them till after 9455 * the link comes back up. This may confuse 9456 * discovery for the new link up, so we need to 9457 * change the compl routine to just clean up the iocb 9458 * and avoid any retry logic. 9459 */ 9460 if (phba->link_state == LPFC_LINK_DOWN) 9461 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9462 } 9463 if (ulp_command == CMD_GEN_REQUEST64_CR) 9464 list_add_tail(&piocb->dlist, &abort_list); 9465 } 9466 9467 if (phba->sli_rev == LPFC_SLI_REV4) 9468 spin_unlock(&pring->ring_lock); 9469 spin_unlock_irqrestore(&phba->hbalock, iflags); 9470 9471 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9472 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9473 spin_lock_irqsave(&phba->hbalock, iflags); 9474 list_del_init(&piocb->dlist); 9475 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9476 spin_unlock_irqrestore(&phba->hbalock, iflags); 9477 } 9478 /* Make sure HBA is alive */ 9479 lpfc_issue_hb_tmo(phba); 9480 9481 if (!list_empty(&abort_list)) 9482 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9483 "3387 abort list for txq not empty\n"); 9484 INIT_LIST_HEAD(&abort_list); 9485 9486 spin_lock_irqsave(&phba->hbalock, iflags); 9487 if (phba->sli_rev == LPFC_SLI_REV4) 9488 spin_lock(&pring->ring_lock); 9489 9490 /* No need to abort the txq list, 9491 * just queue them up for lpfc_sli_cancel_iocbs 9492 */ 9493 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9494 ulp_command = get_job_cmnd(phba, piocb); 9495 9496 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9497 continue; 9498 9499 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9500 if (ulp_command == CMD_QUE_RING_BUF_CN || 9501 ulp_command == CMD_QUE_RING_BUF64_CN || 9502 ulp_command == CMD_CLOSE_XRI_CN || 9503 ulp_command == CMD_ABORT_XRI_CN || 9504 ulp_command == CMD_ABORT_XRI_CX) 9505 continue; 9506 9507 if (piocb->vport != vport) 9508 continue; 9509 9510 list_del_init(&piocb->list); 9511 list_add_tail(&piocb->list, &abort_list); 9512 } 9513 9514 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9515 if (vport == phba->pport) { 9516 list_for_each_entry_safe(piocb, tmp_iocb, 9517 &phba->fabric_iocb_list, list) { 9518 list_del_init(&piocb->list); 9519 list_add_tail(&piocb->list, &abort_list); 9520 } 9521 } 9522 9523 if (phba->sli_rev == LPFC_SLI_REV4) 9524 spin_unlock(&pring->ring_lock); 9525 spin_unlock_irqrestore(&phba->hbalock, iflags); 9526 9527 /* Cancel all the IOCBs from the completions list */ 9528 lpfc_sli_cancel_iocbs(phba, &abort_list, 9529 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9530 9531 return; 9532 } 9533 9534 /** 9535 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9536 * @phba: pointer to lpfc hba data structure. 9537 * 9538 * This routine is used to clean up all the outstanding ELS commands on a 9539 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9540 * routine. After that, it walks the ELS transmit queue to remove all the 9541 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9542 * the IOCBs with the completion callback function associated, the callback 9543 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9544 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9545 * callback function associated, the IOCB will simply be released. Finally, 9546 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9547 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9548 * management plane IOCBs that are not part of the discovery state machine) 9549 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9550 **/ 9551 void 9552 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9553 { 9554 struct lpfc_vport *vport; 9555 9556 spin_lock_irq(&phba->port_list_lock); 9557 list_for_each_entry(vport, &phba->port_list, listentry) 9558 lpfc_els_flush_cmd(vport); 9559 spin_unlock_irq(&phba->port_list_lock); 9560 9561 return; 9562 } 9563 9564 /** 9565 * lpfc_send_els_failure_event - Posts an ELS command failure event 9566 * @phba: Pointer to hba context object. 9567 * @cmdiocbp: Pointer to command iocb which reported error. 9568 * @rspiocbp: Pointer to response iocb which reported error. 9569 * 9570 * This function sends an event when there is an ELS command 9571 * failure. 9572 **/ 9573 void 9574 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9575 struct lpfc_iocbq *cmdiocbp, 9576 struct lpfc_iocbq *rspiocbp) 9577 { 9578 struct lpfc_vport *vport = cmdiocbp->vport; 9579 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9580 struct lpfc_lsrjt_event lsrjt_event; 9581 struct lpfc_fabric_event_header fabric_event; 9582 struct ls_rjt stat; 9583 struct lpfc_nodelist *ndlp; 9584 uint32_t *pcmd; 9585 u32 ulp_status, ulp_word4; 9586 9587 ndlp = cmdiocbp->context1; 9588 if (!ndlp) 9589 return; 9590 9591 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9592 ulp_word4 = get_job_word4(phba, rspiocbp); 9593 9594 if (ulp_status == IOSTAT_LS_RJT) { 9595 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9596 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9597 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9598 sizeof(struct lpfc_name)); 9599 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9600 sizeof(struct lpfc_name)); 9601 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9602 cmdiocbp->context2)->virt); 9603 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9604 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9605 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9606 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9607 fc_host_post_vendor_event(shost, 9608 fc_get_event_number(), 9609 sizeof(lsrjt_event), 9610 (char *)&lsrjt_event, 9611 LPFC_NL_VENDOR_ID); 9612 return; 9613 } 9614 if (ulp_status == IOSTAT_NPORT_BSY || 9615 ulp_status == IOSTAT_FABRIC_BSY) { 9616 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9617 if (ulp_status == IOSTAT_NPORT_BSY) 9618 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9619 else 9620 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9621 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9622 sizeof(struct lpfc_name)); 9623 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9624 sizeof(struct lpfc_name)); 9625 fc_host_post_vendor_event(shost, 9626 fc_get_event_number(), 9627 sizeof(fabric_event), 9628 (char *)&fabric_event, 9629 LPFC_NL_VENDOR_ID); 9630 return; 9631 } 9632 9633 } 9634 9635 /** 9636 * lpfc_send_els_event - Posts unsolicited els event 9637 * @vport: Pointer to vport object. 9638 * @ndlp: Pointer FC node object. 9639 * @payload: ELS command code type. 9640 * 9641 * This function posts an event when there is an incoming 9642 * unsolicited ELS command. 9643 **/ 9644 static void 9645 lpfc_send_els_event(struct lpfc_vport *vport, 9646 struct lpfc_nodelist *ndlp, 9647 uint32_t *payload) 9648 { 9649 struct lpfc_els_event_header *els_data = NULL; 9650 struct lpfc_logo_event *logo_data = NULL; 9651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9652 9653 if (*payload == ELS_CMD_LOGO) { 9654 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9655 if (!logo_data) { 9656 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9657 "0148 Failed to allocate memory " 9658 "for LOGO event\n"); 9659 return; 9660 } 9661 els_data = &logo_data->header; 9662 } else { 9663 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9664 GFP_KERNEL); 9665 if (!els_data) { 9666 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9667 "0149 Failed to allocate memory " 9668 "for ELS event\n"); 9669 return; 9670 } 9671 } 9672 els_data->event_type = FC_REG_ELS_EVENT; 9673 switch (*payload) { 9674 case ELS_CMD_PLOGI: 9675 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9676 break; 9677 case ELS_CMD_PRLO: 9678 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9679 break; 9680 case ELS_CMD_ADISC: 9681 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9682 break; 9683 case ELS_CMD_LOGO: 9684 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9685 /* Copy the WWPN in the LOGO payload */ 9686 memcpy(logo_data->logo_wwpn, &payload[2], 9687 sizeof(struct lpfc_name)); 9688 break; 9689 default: 9690 kfree(els_data); 9691 return; 9692 } 9693 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9694 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9695 if (*payload == ELS_CMD_LOGO) { 9696 fc_host_post_vendor_event(shost, 9697 fc_get_event_number(), 9698 sizeof(struct lpfc_logo_event), 9699 (char *)logo_data, 9700 LPFC_NL_VENDOR_ID); 9701 kfree(logo_data); 9702 } else { 9703 fc_host_post_vendor_event(shost, 9704 fc_get_event_number(), 9705 sizeof(struct lpfc_els_event_header), 9706 (char *)els_data, 9707 LPFC_NL_VENDOR_ID); 9708 kfree(els_data); 9709 } 9710 9711 return; 9712 } 9713 9714 9715 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9716 FC_FPIN_LI_EVT_TYPES_INIT); 9717 9718 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9719 FC_FPIN_DELI_EVT_TYPES_INIT); 9720 9721 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9722 FC_FPIN_CONGN_EVT_TYPES_INIT); 9723 9724 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9725 fc_fpin_congn_severity_types, 9726 FC_FPIN_CONGN_SEVERITY_INIT); 9727 9728 9729 /** 9730 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9731 * @phba: Pointer to phba object. 9732 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9733 * @cnt: count of WWPNs in FPIN payload 9734 * 9735 * This routine is called by LI and PC descriptors. 9736 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9737 */ 9738 static void 9739 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9740 { 9741 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9742 __be64 wwn; 9743 u64 wwpn; 9744 int i, len; 9745 int line = 0; 9746 int wcnt = 0; 9747 bool endit = false; 9748 9749 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9750 for (i = 0; i < cnt; i++) { 9751 /* Are we on the last WWPN */ 9752 if (i == (cnt - 1)) 9753 endit = true; 9754 9755 /* Extract the next WWPN from the payload */ 9756 wwn = *wwnlist++; 9757 wwpn = be64_to_cpu(wwn); 9758 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9759 " %016llx", wwpn); 9760 9761 /* Log a message if we are on the last WWPN 9762 * or if we hit the max allowed per message. 9763 */ 9764 wcnt++; 9765 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9766 buf[len] = 0; 9767 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9768 "4686 %s\n", buf); 9769 9770 /* Check if we reached the last WWPN */ 9771 if (endit) 9772 return; 9773 9774 /* Limit the number of log message displayed per FPIN */ 9775 line++; 9776 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9777 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9778 "4687 %d WWPNs Truncated\n", 9779 cnt - i - 1); 9780 return; 9781 } 9782 9783 /* Start over with next log message */ 9784 wcnt = 0; 9785 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9786 "Additional WWPNs:"); 9787 } 9788 } 9789 } 9790 9791 /** 9792 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9793 * @phba: Pointer to phba object. 9794 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9795 * 9796 * This function processes a Link Integrity FPIN event by logging a message. 9797 **/ 9798 static void 9799 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9800 { 9801 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9802 const char *li_evt_str; 9803 u32 li_evt, cnt; 9804 9805 li_evt = be16_to_cpu(li->event_type); 9806 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9807 cnt = be32_to_cpu(li->pname_count); 9808 9809 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9810 "4680 FPIN Link Integrity %s (x%x) " 9811 "Detecting PN x%016llx Attached PN x%016llx " 9812 "Duration %d mSecs Count %d Port Cnt %d\n", 9813 li_evt_str, li_evt, 9814 be64_to_cpu(li->detecting_wwpn), 9815 be64_to_cpu(li->attached_wwpn), 9816 be32_to_cpu(li->event_threshold), 9817 be32_to_cpu(li->event_count), cnt); 9818 9819 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9820 } 9821 9822 /** 9823 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9824 * @phba: Pointer to hba object. 9825 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9826 * 9827 * This function processes a Delivery FPIN event by logging a message. 9828 **/ 9829 static void 9830 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9831 { 9832 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9833 const char *del_rsn_str; 9834 u32 del_rsn; 9835 __be32 *frame; 9836 9837 del_rsn = be16_to_cpu(del->deli_reason_code); 9838 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9839 9840 /* Skip over desc_tag/desc_len header to payload */ 9841 frame = (__be32 *)(del + 1); 9842 9843 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9844 "4681 FPIN Delivery %s (x%x) " 9845 "Detecting PN x%016llx Attached PN x%016llx " 9846 "DiscHdr0 x%08x " 9847 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9848 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9849 del_rsn_str, del_rsn, 9850 be64_to_cpu(del->detecting_wwpn), 9851 be64_to_cpu(del->attached_wwpn), 9852 be32_to_cpu(frame[0]), 9853 be32_to_cpu(frame[1]), 9854 be32_to_cpu(frame[2]), 9855 be32_to_cpu(frame[3]), 9856 be32_to_cpu(frame[4]), 9857 be32_to_cpu(frame[5])); 9858 } 9859 9860 /** 9861 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9862 * @phba: Pointer to hba object. 9863 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9864 * 9865 * This function processes a Peer Congestion FPIN event by logging a message. 9866 **/ 9867 static void 9868 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9869 { 9870 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9871 const char *pc_evt_str; 9872 u32 pc_evt, cnt; 9873 9874 pc_evt = be16_to_cpu(pc->event_type); 9875 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9876 cnt = be32_to_cpu(pc->pname_count); 9877 9878 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9879 "4684 FPIN Peer Congestion %s (x%x) " 9880 "Duration %d mSecs " 9881 "Detecting PN x%016llx Attached PN x%016llx " 9882 "Impacted Port Cnt %d\n", 9883 pc_evt_str, pc_evt, 9884 be32_to_cpu(pc->event_period), 9885 be64_to_cpu(pc->detecting_wwpn), 9886 be64_to_cpu(pc->attached_wwpn), 9887 cnt); 9888 9889 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9890 } 9891 9892 /** 9893 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9894 * @phba: Pointer to hba object. 9895 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9896 * 9897 * This function processes an FPIN Congestion Notifiction. The notification 9898 * could be an Alarm or Warning. This routine feeds that data into driver's 9899 * running congestion algorithm. It also processes the FPIN by 9900 * logging a message. It returns 1 to indicate deliver this message 9901 * to the upper layer or 0 to indicate don't deliver it. 9902 **/ 9903 static int 9904 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9905 { 9906 struct lpfc_cgn_info *cp; 9907 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9908 const char *cgn_evt_str; 9909 u32 cgn_evt; 9910 const char *cgn_sev_str; 9911 u32 cgn_sev; 9912 uint16_t value; 9913 u32 crc; 9914 bool nm_log = false; 9915 int rc = 1; 9916 9917 cgn_evt = be16_to_cpu(cgn->event_type); 9918 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9919 cgn_sev = cgn->severity; 9920 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9921 9922 /* The driver only takes action on a Credit Stall or Oversubscription 9923 * event type to engage the IO algorithm. The driver prints an 9924 * unmaskable message only for Lost Credit and Credit Stall. 9925 * TODO: Still need to have definition of host action on clear, 9926 * lost credit and device specific event types. 9927 */ 9928 switch (cgn_evt) { 9929 case FPIN_CONGN_LOST_CREDIT: 9930 nm_log = true; 9931 break; 9932 case FPIN_CONGN_CREDIT_STALL: 9933 nm_log = true; 9934 fallthrough; 9935 case FPIN_CONGN_OVERSUBSCRIPTION: 9936 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9937 nm_log = false; 9938 switch (cgn_sev) { 9939 case FPIN_CONGN_SEVERITY_ERROR: 9940 /* Take action here for an Alarm event */ 9941 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9942 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9943 /* Track of alarm cnt for cgn_info */ 9944 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9945 /* Track of alarm cnt for SYNC_WQE */ 9946 atomic_inc(&phba->cgn_sync_alarm_cnt); 9947 } 9948 goto cleanup; 9949 } 9950 break; 9951 case FPIN_CONGN_SEVERITY_WARNING: 9952 /* Take action here for a Warning event */ 9953 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9954 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9955 /* Track of warning cnt for cgn_info */ 9956 atomic_inc(&phba->cgn_fabric_warn_cnt); 9957 /* Track of warning cnt for SYNC_WQE */ 9958 atomic_inc(&phba->cgn_sync_warn_cnt); 9959 } 9960 cleanup: 9961 /* Save frequency in ms */ 9962 phba->cgn_fpin_frequency = 9963 be32_to_cpu(cgn->event_period); 9964 value = phba->cgn_fpin_frequency; 9965 if (phba->cgn_i) { 9966 cp = (struct lpfc_cgn_info *) 9967 phba->cgn_i->virt; 9968 if (phba->cgn_reg_fpin & 9969 LPFC_CGN_FPIN_ALARM) 9970 cp->cgn_alarm_freq = 9971 cpu_to_le16(value); 9972 if (phba->cgn_reg_fpin & 9973 LPFC_CGN_FPIN_WARN) 9974 cp->cgn_warn_freq = 9975 cpu_to_le16(value); 9976 crc = lpfc_cgn_calc_crc32 9977 (cp, 9978 LPFC_CGN_INFO_SZ, 9979 LPFC_CGN_CRC32_SEED); 9980 cp->cgn_info_crc = cpu_to_le32(crc); 9981 } 9982 9983 /* Don't deliver to upper layer since 9984 * driver took action on this tlv. 9985 */ 9986 rc = 0; 9987 } 9988 break; 9989 } 9990 break; 9991 } 9992 9993 /* Change the log level to unmaskable for the following event types. */ 9994 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9995 LOG_CGN_MGMT | LOG_ELS, 9996 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9997 "Duration %d mSecs\n", 9998 cgn_sev_str, cgn_evt_str, cgn_evt, 9999 be32_to_cpu(cgn->event_period)); 10000 return rc; 10001 } 10002 10003 void 10004 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10005 { 10006 struct lpfc_hba *phba = vport->phba; 10007 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10008 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10009 const char *dtag_nm; 10010 int desc_cnt = 0, bytes_remain, cnt; 10011 u32 dtag, deliver = 0; 10012 int len; 10013 10014 /* FPINs handled only if we are in the right discovery state */ 10015 if (vport->port_state < LPFC_DISC_AUTH) 10016 return; 10017 10018 /* make sure there is the full fpin header */ 10019 if (fpin_length < sizeof(struct fc_els_fpin)) 10020 return; 10021 10022 /* Sanity check descriptor length. The desc_len value does not 10023 * include space for the ELS command and the desc_len fields. 10024 */ 10025 len = be32_to_cpu(fpin->desc_len); 10026 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10027 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10028 "4671 Bad ELS FPIN length %d: %d\n", 10029 len, fpin_length); 10030 return; 10031 } 10032 10033 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10034 first_tlv = tlv; 10035 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10036 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10037 10038 /* process each descriptor separately */ 10039 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10040 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10041 dtag = be32_to_cpu(tlv->desc_tag); 10042 switch (dtag) { 10043 case ELS_DTAG_LNK_INTEGRITY: 10044 lpfc_els_rcv_fpin_li(phba, tlv); 10045 deliver = 1; 10046 break; 10047 case ELS_DTAG_DELIVERY: 10048 lpfc_els_rcv_fpin_del(phba, tlv); 10049 deliver = 1; 10050 break; 10051 case ELS_DTAG_PEER_CONGEST: 10052 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10053 deliver = 1; 10054 break; 10055 case ELS_DTAG_CONGESTION: 10056 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10057 break; 10058 default: 10059 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10060 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10061 "4678 unknown FPIN descriptor[%d]: " 10062 "tag x%x (%s)\n", 10063 desc_cnt, dtag, dtag_nm); 10064 10065 /* If descriptor is bad, drop the rest of the data */ 10066 return; 10067 } 10068 lpfc_cgn_update_stat(phba, dtag); 10069 cnt = be32_to_cpu(tlv->desc_len); 10070 10071 /* Sanity check descriptor length. The desc_len value does not 10072 * include space for the desc_tag and the desc_len fields. 10073 */ 10074 len -= (cnt + sizeof(struct fc_tlv_desc)); 10075 if (len < 0) { 10076 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10077 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10078 "4672 Bad FPIN descriptor TLV length " 10079 "%d: %d %d %s\n", 10080 cnt, len, fpin_length, dtag_nm); 10081 return; 10082 } 10083 10084 current_tlv = tlv; 10085 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10086 tlv = fc_tlv_next_desc(tlv); 10087 10088 /* Format payload such that the FPIN delivered to the 10089 * upper layer is a single descriptor FPIN. 10090 */ 10091 if (desc_cnt) 10092 memcpy(first_tlv, current_tlv, 10093 (cnt + sizeof(struct fc_els_fpin))); 10094 10095 /* Adjust the length so that it only reflects a 10096 * single descriptor FPIN. 10097 */ 10098 fpin_length = cnt + sizeof(struct fc_els_fpin); 10099 fpin->desc_len = cpu_to_be32(fpin_length); 10100 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10101 10102 /* Send every descriptor individually to the upper layer */ 10103 if (deliver) 10104 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10105 fpin_length, (char *)fpin); 10106 desc_cnt++; 10107 } 10108 } 10109 10110 /** 10111 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10112 * @phba: pointer to lpfc hba data structure. 10113 * @pring: pointer to a SLI ring. 10114 * @vport: pointer to a host virtual N_Port data structure. 10115 * @elsiocb: pointer to lpfc els command iocb data structure. 10116 * 10117 * This routine is used for processing the IOCB associated with a unsolicited 10118 * event. It first determines whether there is an existing ndlp that matches 10119 * the DID from the unsolicited IOCB. If not, it will create a new one with 10120 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10121 * IOCB is then used to invoke the proper routine and to set up proper state 10122 * of the discovery state machine. 10123 **/ 10124 static void 10125 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10126 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10127 { 10128 struct lpfc_nodelist *ndlp; 10129 struct ls_rjt stat; 10130 u32 *payload, payload_len; 10131 u32 cmd = 0, did = 0, newnode, status = 0; 10132 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10133 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10134 LPFC_MBOXQ_t *mbox; 10135 10136 if (!vport || !(elsiocb->context2)) 10137 goto dropit; 10138 10139 newnode = 0; 10140 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10141 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 10142 if (phba->sli_rev == LPFC_SLI_REV4) 10143 payload_len = wcqe_cmpl->total_data_placed; 10144 else 10145 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10146 status = get_job_ulpstatus(phba, elsiocb); 10147 cmd = *payload; 10148 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10149 lpfc_sli3_post_buffer(phba, pring, 1); 10150 10151 did = get_job_els_rsp64_did(phba, elsiocb); 10152 if (status) { 10153 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10154 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10155 status, get_job_word4(phba, elsiocb), did); 10156 goto dropit; 10157 } 10158 10159 /* Check to see if link went down during discovery */ 10160 if (lpfc_els_chk_latt(vport)) 10161 goto dropit; 10162 10163 /* Ignore traffic received during vport shutdown. */ 10164 if (vport->load_flag & FC_UNLOADING) 10165 goto dropit; 10166 10167 /* If NPort discovery is delayed drop incoming ELS */ 10168 if ((vport->fc_flag & FC_DISC_DELAYED) && 10169 (cmd != ELS_CMD_PLOGI)) 10170 goto dropit; 10171 10172 ndlp = lpfc_findnode_did(vport, did); 10173 if (!ndlp) { 10174 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10175 ndlp = lpfc_nlp_init(vport, did); 10176 if (!ndlp) 10177 goto dropit; 10178 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10179 newnode = 1; 10180 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10181 ndlp->nlp_type |= NLP_FABRIC; 10182 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10183 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10184 newnode = 1; 10185 } 10186 10187 phba->fc_stat.elsRcvFrame++; 10188 10189 /* 10190 * Do not process any unsolicited ELS commands 10191 * if the ndlp is in DEV_LOSS 10192 */ 10193 spin_lock_irq(&ndlp->lock); 10194 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10195 spin_unlock_irq(&ndlp->lock); 10196 if (newnode) 10197 lpfc_nlp_put(ndlp); 10198 goto dropit; 10199 } 10200 spin_unlock_irq(&ndlp->lock); 10201 10202 elsiocb->context1 = lpfc_nlp_get(ndlp); 10203 if (!elsiocb->context1) 10204 goto dropit; 10205 elsiocb->vport = vport; 10206 10207 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10208 cmd &= ELS_CMD_MASK; 10209 } 10210 /* ELS command <elsCmd> received from NPORT <did> */ 10211 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10212 "0112 ELS command x%x received from NPORT x%x " 10213 "refcnt %d Data: x%x x%x x%x x%x\n", 10214 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10215 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10216 10217 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10218 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10219 (cmd != ELS_CMD_FLOGI) && 10220 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10221 rjt_err = LSRJT_LOGICAL_BSY; 10222 rjt_exp = LSEXP_NOTHING_MORE; 10223 goto lsrjt; 10224 } 10225 10226 switch (cmd) { 10227 case ELS_CMD_PLOGI: 10228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10229 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10230 did, vport->port_state, ndlp->nlp_flag); 10231 10232 phba->fc_stat.elsRcvPLOGI++; 10233 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10234 if (phba->sli_rev == LPFC_SLI_REV4 && 10235 (phba->pport->fc_flag & FC_PT2PT)) { 10236 vport->fc_prevDID = vport->fc_myDID; 10237 /* Our DID needs to be updated before registering 10238 * the vfi. This is done in lpfc_rcv_plogi but 10239 * that is called after the reg_vfi. 10240 */ 10241 vport->fc_myDID = 10242 bf_get(els_rsp64_sid, 10243 &elsiocb->wqe.xmit_els_rsp); 10244 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10245 "3312 Remote port assigned DID x%x " 10246 "%x\n", vport->fc_myDID, 10247 vport->fc_prevDID); 10248 } 10249 10250 lpfc_send_els_event(vport, ndlp, payload); 10251 10252 /* If Nport discovery is delayed, reject PLOGIs */ 10253 if (vport->fc_flag & FC_DISC_DELAYED) { 10254 rjt_err = LSRJT_UNABLE_TPC; 10255 rjt_exp = LSEXP_NOTHING_MORE; 10256 break; 10257 } 10258 10259 if (vport->port_state < LPFC_DISC_AUTH) { 10260 if (!(phba->pport->fc_flag & FC_PT2PT) || 10261 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10262 rjt_err = LSRJT_UNABLE_TPC; 10263 rjt_exp = LSEXP_NOTHING_MORE; 10264 break; 10265 } 10266 } 10267 10268 spin_lock_irq(&ndlp->lock); 10269 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10270 spin_unlock_irq(&ndlp->lock); 10271 10272 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10273 NLP_EVT_RCV_PLOGI); 10274 10275 break; 10276 case ELS_CMD_FLOGI: 10277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10278 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10279 did, vport->port_state, ndlp->nlp_flag); 10280 10281 phba->fc_stat.elsRcvFLOGI++; 10282 10283 /* If the driver believes fabric discovery is done and is ready, 10284 * bounce the link. There is some descrepancy. 10285 */ 10286 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10287 vport->fc_flag & FC_PT2PT && 10288 vport->rcv_flogi_cnt >= 1) { 10289 rjt_err = LSRJT_LOGICAL_BSY; 10290 rjt_exp = LSEXP_NOTHING_MORE; 10291 init_link++; 10292 goto lsrjt; 10293 } 10294 10295 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10296 /* retain node if our response is deferred */ 10297 if (phba->defer_flogi_acc_flag) 10298 break; 10299 if (newnode) 10300 lpfc_disc_state_machine(vport, ndlp, NULL, 10301 NLP_EVT_DEVICE_RM); 10302 break; 10303 case ELS_CMD_LOGO: 10304 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10305 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10306 did, vport->port_state, ndlp->nlp_flag); 10307 10308 phba->fc_stat.elsRcvLOGO++; 10309 lpfc_send_els_event(vport, ndlp, payload); 10310 if (vport->port_state < LPFC_DISC_AUTH) { 10311 rjt_err = LSRJT_UNABLE_TPC; 10312 rjt_exp = LSEXP_NOTHING_MORE; 10313 break; 10314 } 10315 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10316 if (newnode) 10317 lpfc_disc_state_machine(vport, ndlp, NULL, 10318 NLP_EVT_DEVICE_RM); 10319 break; 10320 case ELS_CMD_PRLO: 10321 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10322 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10323 did, vport->port_state, ndlp->nlp_flag); 10324 10325 phba->fc_stat.elsRcvPRLO++; 10326 lpfc_send_els_event(vport, ndlp, payload); 10327 if (vport->port_state < LPFC_DISC_AUTH) { 10328 rjt_err = LSRJT_UNABLE_TPC; 10329 rjt_exp = LSEXP_NOTHING_MORE; 10330 break; 10331 } 10332 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10333 break; 10334 case ELS_CMD_LCB: 10335 phba->fc_stat.elsRcvLCB++; 10336 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10337 break; 10338 case ELS_CMD_RDP: 10339 phba->fc_stat.elsRcvRDP++; 10340 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10341 break; 10342 case ELS_CMD_RSCN: 10343 phba->fc_stat.elsRcvRSCN++; 10344 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10345 if (newnode) 10346 lpfc_disc_state_machine(vport, ndlp, NULL, 10347 NLP_EVT_DEVICE_RM); 10348 break; 10349 case ELS_CMD_ADISC: 10350 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10351 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10352 did, vport->port_state, ndlp->nlp_flag); 10353 10354 lpfc_send_els_event(vport, ndlp, payload); 10355 phba->fc_stat.elsRcvADISC++; 10356 if (vport->port_state < LPFC_DISC_AUTH) { 10357 rjt_err = LSRJT_UNABLE_TPC; 10358 rjt_exp = LSEXP_NOTHING_MORE; 10359 break; 10360 } 10361 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10362 NLP_EVT_RCV_ADISC); 10363 break; 10364 case ELS_CMD_PDISC: 10365 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10366 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10367 did, vport->port_state, ndlp->nlp_flag); 10368 10369 phba->fc_stat.elsRcvPDISC++; 10370 if (vport->port_state < LPFC_DISC_AUTH) { 10371 rjt_err = LSRJT_UNABLE_TPC; 10372 rjt_exp = LSEXP_NOTHING_MORE; 10373 break; 10374 } 10375 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10376 NLP_EVT_RCV_PDISC); 10377 break; 10378 case ELS_CMD_FARPR: 10379 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10380 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10381 did, vport->port_state, ndlp->nlp_flag); 10382 10383 phba->fc_stat.elsRcvFARPR++; 10384 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10385 break; 10386 case ELS_CMD_FARP: 10387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10388 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10389 did, vport->port_state, ndlp->nlp_flag); 10390 10391 phba->fc_stat.elsRcvFARP++; 10392 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10393 break; 10394 case ELS_CMD_FAN: 10395 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10396 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10397 did, vport->port_state, ndlp->nlp_flag); 10398 10399 phba->fc_stat.elsRcvFAN++; 10400 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10401 break; 10402 case ELS_CMD_PRLI: 10403 case ELS_CMD_NVMEPRLI: 10404 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10405 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10406 did, vport->port_state, ndlp->nlp_flag); 10407 10408 phba->fc_stat.elsRcvPRLI++; 10409 if ((vport->port_state < LPFC_DISC_AUTH) && 10410 (vport->fc_flag & FC_FABRIC)) { 10411 rjt_err = LSRJT_UNABLE_TPC; 10412 rjt_exp = LSEXP_NOTHING_MORE; 10413 break; 10414 } 10415 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10416 break; 10417 case ELS_CMD_LIRR: 10418 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10419 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10420 did, vport->port_state, ndlp->nlp_flag); 10421 10422 phba->fc_stat.elsRcvLIRR++; 10423 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10424 if (newnode) 10425 lpfc_disc_state_machine(vport, ndlp, NULL, 10426 NLP_EVT_DEVICE_RM); 10427 break; 10428 case ELS_CMD_RLS: 10429 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10430 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10431 did, vport->port_state, ndlp->nlp_flag); 10432 10433 phba->fc_stat.elsRcvRLS++; 10434 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10435 if (newnode) 10436 lpfc_disc_state_machine(vport, ndlp, NULL, 10437 NLP_EVT_DEVICE_RM); 10438 break; 10439 case ELS_CMD_RPL: 10440 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10441 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10442 did, vport->port_state, ndlp->nlp_flag); 10443 10444 phba->fc_stat.elsRcvRPL++; 10445 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10446 if (newnode) 10447 lpfc_disc_state_machine(vport, ndlp, NULL, 10448 NLP_EVT_DEVICE_RM); 10449 break; 10450 case ELS_CMD_RNID: 10451 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10452 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10453 did, vport->port_state, ndlp->nlp_flag); 10454 10455 phba->fc_stat.elsRcvRNID++; 10456 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10457 if (newnode) 10458 lpfc_disc_state_machine(vport, ndlp, NULL, 10459 NLP_EVT_DEVICE_RM); 10460 break; 10461 case ELS_CMD_RTV: 10462 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10463 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10464 did, vport->port_state, ndlp->nlp_flag); 10465 phba->fc_stat.elsRcvRTV++; 10466 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10467 if (newnode) 10468 lpfc_disc_state_machine(vport, ndlp, NULL, 10469 NLP_EVT_DEVICE_RM); 10470 break; 10471 case ELS_CMD_RRQ: 10472 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10473 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10474 did, vport->port_state, ndlp->nlp_flag); 10475 10476 phba->fc_stat.elsRcvRRQ++; 10477 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10478 if (newnode) 10479 lpfc_disc_state_machine(vport, ndlp, NULL, 10480 NLP_EVT_DEVICE_RM); 10481 break; 10482 case ELS_CMD_ECHO: 10483 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10484 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10485 did, vport->port_state, ndlp->nlp_flag); 10486 10487 phba->fc_stat.elsRcvECHO++; 10488 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10489 if (newnode) 10490 lpfc_disc_state_machine(vport, ndlp, NULL, 10491 NLP_EVT_DEVICE_RM); 10492 break; 10493 case ELS_CMD_REC: 10494 /* receive this due to exchange closed */ 10495 rjt_err = LSRJT_UNABLE_TPC; 10496 rjt_exp = LSEXP_INVALID_OX_RX; 10497 break; 10498 case ELS_CMD_FPIN: 10499 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10500 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10501 did, vport->port_state, ndlp->nlp_flag); 10502 10503 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10504 payload_len); 10505 10506 /* There are no replies, so no rjt codes */ 10507 break; 10508 case ELS_CMD_EDC: 10509 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10510 break; 10511 case ELS_CMD_RDF: 10512 phba->fc_stat.elsRcvRDF++; 10513 /* Accept RDF only from fabric controller */ 10514 if (did != Fabric_Cntl_DID) { 10515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10516 "1115 Received RDF from invalid DID " 10517 "x%x\n", did); 10518 rjt_err = LSRJT_PROTOCOL_ERR; 10519 rjt_exp = LSEXP_NOTHING_MORE; 10520 goto lsrjt; 10521 } 10522 10523 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10524 break; 10525 default: 10526 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10527 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10528 cmd, did, vport->port_state); 10529 10530 /* Unsupported ELS command, reject */ 10531 rjt_err = LSRJT_CMD_UNSUPPORTED; 10532 rjt_exp = LSEXP_NOTHING_MORE; 10533 10534 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10535 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10536 "0115 Unknown ELS command x%x " 10537 "received from NPORT x%x\n", cmd, did); 10538 if (newnode) 10539 lpfc_disc_state_machine(vport, ndlp, NULL, 10540 NLP_EVT_DEVICE_RM); 10541 break; 10542 } 10543 10544 lsrjt: 10545 /* check if need to LS_RJT received ELS cmd */ 10546 if (rjt_err) { 10547 memset(&stat, 0, sizeof(stat)); 10548 stat.un.b.lsRjtRsnCode = rjt_err; 10549 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10550 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10551 NULL); 10552 /* Remove the reference from above for new nodes. */ 10553 if (newnode) 10554 lpfc_disc_state_machine(vport, ndlp, NULL, 10555 NLP_EVT_DEVICE_RM); 10556 } 10557 10558 /* Release the reference on this elsiocb, not the ndlp. */ 10559 lpfc_nlp_put(elsiocb->context1); 10560 elsiocb->context1 = NULL; 10561 10562 /* Special case. Driver received an unsolicited command that 10563 * unsupportable given the driver's current state. Reset the 10564 * link and start over. 10565 */ 10566 if (init_link) { 10567 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10568 if (!mbox) 10569 return; 10570 lpfc_linkdown(phba); 10571 lpfc_init_link(phba, mbox, 10572 phba->cfg_topology, 10573 phba->cfg_link_speed); 10574 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10575 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10576 mbox->vport = vport; 10577 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10578 MBX_NOT_FINISHED) 10579 mempool_free(mbox, phba->mbox_mem_pool); 10580 } 10581 10582 return; 10583 10584 dropit: 10585 if (vport && !(vport->load_flag & FC_UNLOADING)) 10586 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10587 "0111 Dropping received ELS cmd " 10588 "Data: x%x x%x x%x x%x\n", 10589 cmd, status, get_job_word4(phba, elsiocb), did); 10590 10591 phba->fc_stat.elsRcvDrop++; 10592 } 10593 10594 /** 10595 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10596 * @phba: pointer to lpfc hba data structure. 10597 * @pring: pointer to a SLI ring. 10598 * @elsiocb: pointer to lpfc els iocb data structure. 10599 * 10600 * This routine is used to process an unsolicited event received from a SLI 10601 * (Service Level Interface) ring. The actual processing of the data buffer 10602 * associated with the unsolicited event is done by invoking the routine 10603 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10604 * SLI ring on which the unsolicited event was received. 10605 **/ 10606 void 10607 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10608 struct lpfc_iocbq *elsiocb) 10609 { 10610 struct lpfc_vport *vport = elsiocb->vport; 10611 u32 ulp_command, status, parameter, bde_count = 0; 10612 IOCB_t *icmd; 10613 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10614 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 10615 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 10616 dma_addr_t paddr; 10617 10618 elsiocb->context1 = NULL; 10619 elsiocb->context2 = NULL; 10620 elsiocb->context3 = NULL; 10621 10622 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10623 ulp_command = get_job_cmnd(phba, elsiocb); 10624 status = get_job_ulpstatus(phba, elsiocb); 10625 parameter = get_job_word4(phba, elsiocb); 10626 if (phba->sli_rev == LPFC_SLI_REV4) 10627 bde_count = wcqe_cmpl->word3; 10628 else 10629 bde_count = elsiocb->iocb.ulpBdeCount; 10630 10631 if (status == IOSTAT_NEED_BUFFER) { 10632 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10633 } else if (status == IOSTAT_LOCAL_REJECT && 10634 (parameter & IOERR_PARAM_MASK) == 10635 IOERR_RCV_BUFFER_WAITING) { 10636 phba->fc_stat.NoRcvBuf++; 10637 /* Not enough posted buffers; Try posting more buffers */ 10638 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10639 lpfc_sli3_post_buffer(phba, pring, 0); 10640 return; 10641 } 10642 10643 if (phba->sli_rev == LPFC_SLI_REV3) { 10644 icmd = &elsiocb->iocb; 10645 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10646 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10647 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10648 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10649 vport = phba->pport; 10650 else 10651 vport = lpfc_find_vport_by_vpid(phba, 10652 icmd->unsli3.rcvsli3.vpi); 10653 } 10654 } 10655 10656 /* If there are no BDEs associated 10657 * with this IOCB, there is nothing to do. 10658 */ 10659 if (bde_count == 0) 10660 return; 10661 10662 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10663 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10664 elsiocb->context2 = bdeBuf1; 10665 if (bde_count == 2) 10666 elsiocb->context3 = bdeBuf2; 10667 } else { 10668 icmd = &elsiocb->iocb; 10669 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10670 icmd->un.cont64[0].addrLow); 10671 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 10672 paddr); 10673 if (bde_count == 2) { 10674 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10675 icmd->un.cont64[1].addrLow); 10676 elsiocb->context3 = lpfc_sli_ringpostbuf_get(phba, 10677 pring, 10678 paddr); 10679 } 10680 } 10681 10682 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10683 /* 10684 * The different unsolicited event handlers would tell us 10685 * if they are done with "mp" by setting context2 to NULL. 10686 */ 10687 if (elsiocb->context2) { 10688 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 10689 elsiocb->context2 = NULL; 10690 } 10691 10692 if (elsiocb->context3) { 10693 lpfc_in_buf_free(phba, elsiocb->context3); 10694 elsiocb->context3 = NULL; 10695 } 10696 } 10697 10698 static void 10699 lpfc_start_fdmi(struct lpfc_vport *vport) 10700 { 10701 struct lpfc_nodelist *ndlp; 10702 10703 /* If this is the first time, allocate an ndlp and initialize 10704 * it. Otherwise, make sure the node is enabled and then do the 10705 * login. 10706 */ 10707 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10708 if (!ndlp) { 10709 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10710 if (ndlp) { 10711 ndlp->nlp_type |= NLP_FABRIC; 10712 } else { 10713 return; 10714 } 10715 } 10716 10717 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10718 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10719 } 10720 10721 /** 10722 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10723 * @phba: pointer to lpfc hba data structure. 10724 * @vport: pointer to a virtual N_Port data structure. 10725 * 10726 * This routine issues a Port Login (PLOGI) to the Name Server with 10727 * State Change Request (SCR) for a @vport. This routine will create an 10728 * ndlp for the Name Server associated to the @vport if such node does 10729 * not already exist. The PLOGI to Name Server is issued by invoking the 10730 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10731 * (FDMI) is configured to the @vport, a FDMI node will be created and 10732 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10733 **/ 10734 void 10735 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10736 { 10737 struct lpfc_nodelist *ndlp; 10738 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10739 10740 /* 10741 * If lpfc_delay_discovery parameter is set and the clean address 10742 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10743 * discovery. 10744 */ 10745 spin_lock_irq(shost->host_lock); 10746 if (vport->fc_flag & FC_DISC_DELAYED) { 10747 spin_unlock_irq(shost->host_lock); 10748 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10749 "3334 Delay fc port discovery for %d secs\n", 10750 phba->fc_ratov); 10751 mod_timer(&vport->delayed_disc_tmo, 10752 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10753 return; 10754 } 10755 spin_unlock_irq(shost->host_lock); 10756 10757 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10758 if (!ndlp) { 10759 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10760 if (!ndlp) { 10761 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10762 lpfc_disc_start(vport); 10763 return; 10764 } 10765 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10766 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10767 "0251 NameServer login: no memory\n"); 10768 return; 10769 } 10770 } 10771 10772 ndlp->nlp_type |= NLP_FABRIC; 10773 10774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10775 10776 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10777 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10778 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10779 "0252 Cannot issue NameServer login\n"); 10780 return; 10781 } 10782 10783 if ((phba->cfg_enable_SmartSAN || 10784 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10785 (vport->load_flag & FC_ALLOW_FDMI)) 10786 lpfc_start_fdmi(vport); 10787 } 10788 10789 /** 10790 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10791 * @phba: pointer to lpfc hba data structure. 10792 * @pmb: pointer to the driver internal queue element for mailbox command. 10793 * 10794 * This routine is the completion callback function to register new vport 10795 * mailbox command. If the new vport mailbox command completes successfully, 10796 * the fabric registration login shall be performed on physical port (the 10797 * new vport created is actually a physical port, with VPI 0) or the port 10798 * login to Name Server for State Change Request (SCR) will be performed 10799 * on virtual port (real virtual port, with VPI greater than 0). 10800 **/ 10801 static void 10802 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10803 { 10804 struct lpfc_vport *vport = pmb->vport; 10805 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10806 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 10807 MAILBOX_t *mb = &pmb->u.mb; 10808 int rc; 10809 10810 spin_lock_irq(shost->host_lock); 10811 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10812 spin_unlock_irq(shost->host_lock); 10813 10814 if (mb->mbxStatus) { 10815 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10816 "0915 Register VPI failed : Status: x%x" 10817 " upd bit: x%x \n", mb->mbxStatus, 10818 mb->un.varRegVpi.upd); 10819 if (phba->sli_rev == LPFC_SLI_REV4 && 10820 mb->un.varRegVpi.upd) 10821 goto mbox_err_exit ; 10822 10823 switch (mb->mbxStatus) { 10824 case 0x11: /* unsupported feature */ 10825 case 0x9603: /* max_vpi exceeded */ 10826 case 0x9602: /* Link event since CLEAR_LA */ 10827 /* giving up on vport registration */ 10828 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10829 spin_lock_irq(shost->host_lock); 10830 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10831 spin_unlock_irq(shost->host_lock); 10832 lpfc_can_disctmo(vport); 10833 break; 10834 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10835 case 0x20: 10836 spin_lock_irq(shost->host_lock); 10837 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10838 spin_unlock_irq(shost->host_lock); 10839 lpfc_init_vpi(phba, pmb, vport->vpi); 10840 pmb->vport = vport; 10841 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10842 rc = lpfc_sli_issue_mbox(phba, pmb, 10843 MBX_NOWAIT); 10844 if (rc == MBX_NOT_FINISHED) { 10845 lpfc_printf_vlog(vport, KERN_ERR, 10846 LOG_TRACE_EVENT, 10847 "2732 Failed to issue INIT_VPI" 10848 " mailbox command\n"); 10849 } else { 10850 lpfc_nlp_put(ndlp); 10851 return; 10852 } 10853 fallthrough; 10854 default: 10855 /* Try to recover from this error */ 10856 if (phba->sli_rev == LPFC_SLI_REV4) 10857 lpfc_sli4_unreg_all_rpis(vport); 10858 lpfc_mbx_unreg_vpi(vport); 10859 spin_lock_irq(shost->host_lock); 10860 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10861 spin_unlock_irq(shost->host_lock); 10862 if (mb->mbxStatus == MBX_NOT_FINISHED) 10863 break; 10864 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10865 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10866 if (phba->sli_rev == LPFC_SLI_REV4) 10867 lpfc_issue_init_vfi(vport); 10868 else 10869 lpfc_initial_flogi(vport); 10870 } else { 10871 lpfc_initial_fdisc(vport); 10872 } 10873 break; 10874 } 10875 } else { 10876 spin_lock_irq(shost->host_lock); 10877 vport->vpi_state |= LPFC_VPI_REGISTERED; 10878 spin_unlock_irq(shost->host_lock); 10879 if (vport == phba->pport) { 10880 if (phba->sli_rev < LPFC_SLI_REV4) 10881 lpfc_issue_fabric_reglogin(vport); 10882 else { 10883 /* 10884 * If the physical port is instantiated using 10885 * FDISC, do not start vport discovery. 10886 */ 10887 if (vport->port_state != LPFC_FDISC) 10888 lpfc_start_fdiscs(phba); 10889 lpfc_do_scr_ns_plogi(phba, vport); 10890 } 10891 } else { 10892 lpfc_do_scr_ns_plogi(phba, vport); 10893 } 10894 } 10895 mbox_err_exit: 10896 /* Now, we decrement the ndlp reference count held for this 10897 * callback function 10898 */ 10899 lpfc_nlp_put(ndlp); 10900 10901 mempool_free(pmb, phba->mbox_mem_pool); 10902 return; 10903 } 10904 10905 /** 10906 * lpfc_register_new_vport - Register a new vport with a HBA 10907 * @phba: pointer to lpfc hba data structure. 10908 * @vport: pointer to a host virtual N_Port data structure. 10909 * @ndlp: pointer to a node-list data structure. 10910 * 10911 * This routine registers the @vport as a new virtual port with a HBA. 10912 * It is done through a registering vpi mailbox command. 10913 **/ 10914 void 10915 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10916 struct lpfc_nodelist *ndlp) 10917 { 10918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10919 LPFC_MBOXQ_t *mbox; 10920 10921 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10922 if (mbox) { 10923 lpfc_reg_vpi(vport, mbox); 10924 mbox->vport = vport; 10925 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10926 if (!mbox->ctx_ndlp) { 10927 mempool_free(mbox, phba->mbox_mem_pool); 10928 goto mbox_err_exit; 10929 } 10930 10931 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10932 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10933 == MBX_NOT_FINISHED) { 10934 /* mailbox command not success, decrement ndlp 10935 * reference count for this command 10936 */ 10937 lpfc_nlp_put(ndlp); 10938 mempool_free(mbox, phba->mbox_mem_pool); 10939 10940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10941 "0253 Register VPI: Can't send mbox\n"); 10942 goto mbox_err_exit; 10943 } 10944 } else { 10945 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10946 "0254 Register VPI: no memory\n"); 10947 goto mbox_err_exit; 10948 } 10949 return; 10950 10951 mbox_err_exit: 10952 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10953 spin_lock_irq(shost->host_lock); 10954 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10955 spin_unlock_irq(shost->host_lock); 10956 return; 10957 } 10958 10959 /** 10960 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10961 * @phba: pointer to lpfc hba data structure. 10962 * 10963 * This routine cancels the retry delay timers to all the vports. 10964 **/ 10965 void 10966 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10967 { 10968 struct lpfc_vport **vports; 10969 struct lpfc_nodelist *ndlp; 10970 uint32_t link_state; 10971 int i; 10972 10973 /* Treat this failure as linkdown for all vports */ 10974 link_state = phba->link_state; 10975 lpfc_linkdown(phba); 10976 phba->link_state = link_state; 10977 10978 vports = lpfc_create_vport_work_array(phba); 10979 10980 if (vports) { 10981 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10982 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10983 if (ndlp) 10984 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10985 lpfc_els_flush_cmd(vports[i]); 10986 } 10987 lpfc_destroy_vport_work_array(phba, vports); 10988 } 10989 } 10990 10991 /** 10992 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10993 * @phba: pointer to lpfc hba data structure. 10994 * 10995 * This routine abort all pending discovery commands and 10996 * start a timer to retry FLOGI for the physical port 10997 * discovery. 10998 **/ 10999 void 11000 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11001 { 11002 struct lpfc_nodelist *ndlp; 11003 11004 /* Cancel the all vports retry delay retry timers */ 11005 lpfc_cancel_all_vport_retry_delay_timer(phba); 11006 11007 /* If fabric require FLOGI, then re-instantiate physical login */ 11008 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11009 if (!ndlp) 11010 return; 11011 11012 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11013 spin_lock_irq(&ndlp->lock); 11014 ndlp->nlp_flag |= NLP_DELAY_TMO; 11015 spin_unlock_irq(&ndlp->lock); 11016 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11017 phba->pport->port_state = LPFC_FLOGI; 11018 return; 11019 } 11020 11021 /** 11022 * lpfc_fabric_login_reqd - Check if FLOGI required. 11023 * @phba: pointer to lpfc hba data structure. 11024 * @cmdiocb: pointer to FDISC command iocb. 11025 * @rspiocb: pointer to FDISC response iocb. 11026 * 11027 * This routine checks if a FLOGI is reguired for FDISC 11028 * to succeed. 11029 **/ 11030 static int 11031 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11032 struct lpfc_iocbq *cmdiocb, 11033 struct lpfc_iocbq *rspiocb) 11034 { 11035 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11036 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11037 11038 if (ulp_status != IOSTAT_FABRIC_RJT || 11039 ulp_word4 != RJT_LOGIN_REQUIRED) 11040 return 0; 11041 else 11042 return 1; 11043 } 11044 11045 /** 11046 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11047 * @phba: pointer to lpfc hba data structure. 11048 * @cmdiocb: pointer to lpfc command iocb data structure. 11049 * @rspiocb: pointer to lpfc response iocb data structure. 11050 * 11051 * This routine is the completion callback function to a Fabric Discover 11052 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11053 * single threaded, each FDISC completion callback function will reset 11054 * the discovery timer for all vports such that the timers will not get 11055 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11056 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11057 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11058 * assigned to the vport has been changed with the completion of the FDISC 11059 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11060 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11061 * routine is invoked to register new vport with the HBA. Otherwise, the 11062 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11063 * Server for State Change Request (SCR). 11064 **/ 11065 static void 11066 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11067 struct lpfc_iocbq *rspiocb) 11068 { 11069 struct lpfc_vport *vport = cmdiocb->vport; 11070 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11071 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 11072 struct lpfc_nodelist *np; 11073 struct lpfc_nodelist *next_np; 11074 struct lpfc_iocbq *piocb; 11075 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 11076 struct serv_parm *sp; 11077 uint8_t fabric_param_changed; 11078 u32 ulp_status, ulp_word4; 11079 11080 ulp_status = get_job_ulpstatus(phba, rspiocb); 11081 ulp_word4 = get_job_word4(phba, rspiocb); 11082 11083 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11084 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11085 ulp_status, ulp_word4, 11086 vport->fc_prevDID); 11087 /* Since all FDISCs are being single threaded, we 11088 * must reset the discovery timer for ALL vports 11089 * waiting to send FDISC when one completes. 11090 */ 11091 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11092 lpfc_set_disctmo(piocb->vport); 11093 } 11094 11095 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11096 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11097 ulp_status, ulp_word4, vport->fc_prevDID); 11098 11099 if (ulp_status) { 11100 11101 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11102 lpfc_retry_pport_discovery(phba); 11103 goto out; 11104 } 11105 11106 /* Check for retry */ 11107 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11108 goto out; 11109 /* FDISC failed */ 11110 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11111 "0126 FDISC failed. (x%x/x%x)\n", 11112 ulp_status, ulp_word4); 11113 goto fdisc_failed; 11114 } 11115 11116 lpfc_check_nlp_post_devloss(vport, ndlp); 11117 11118 spin_lock_irq(shost->host_lock); 11119 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11120 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11121 vport->fc_flag |= FC_FABRIC; 11122 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11123 vport->fc_flag |= FC_PUBLIC_LOOP; 11124 spin_unlock_irq(shost->host_lock); 11125 11126 vport->fc_myDID = ulp_word4 & Mask_DID; 11127 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11128 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11129 if (!prsp) 11130 goto out; 11131 sp = prsp->virt + sizeof(uint32_t); 11132 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11133 memcpy(&vport->fabric_portname, &sp->portName, 11134 sizeof(struct lpfc_name)); 11135 memcpy(&vport->fabric_nodename, &sp->nodeName, 11136 sizeof(struct lpfc_name)); 11137 if (fabric_param_changed && 11138 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11139 /* If our NportID changed, we need to ensure all 11140 * remaining NPORTs get unreg_login'ed so we can 11141 * issue unreg_vpi. 11142 */ 11143 list_for_each_entry_safe(np, next_np, 11144 &vport->fc_nodes, nlp_listp) { 11145 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11146 !(np->nlp_flag & NLP_NPR_ADISC)) 11147 continue; 11148 spin_lock_irq(&ndlp->lock); 11149 np->nlp_flag &= ~NLP_NPR_ADISC; 11150 spin_unlock_irq(&ndlp->lock); 11151 lpfc_unreg_rpi(vport, np); 11152 } 11153 lpfc_cleanup_pending_mbox(vport); 11154 11155 if (phba->sli_rev == LPFC_SLI_REV4) 11156 lpfc_sli4_unreg_all_rpis(vport); 11157 11158 lpfc_mbx_unreg_vpi(vport); 11159 spin_lock_irq(shost->host_lock); 11160 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11161 if (phba->sli_rev == LPFC_SLI_REV4) 11162 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11163 else 11164 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11165 spin_unlock_irq(shost->host_lock); 11166 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11167 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11168 /* 11169 * Driver needs to re-reg VPI in order for f/w 11170 * to update the MAC address. 11171 */ 11172 lpfc_register_new_vport(phba, vport, ndlp); 11173 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11174 goto out; 11175 } 11176 11177 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11178 lpfc_issue_init_vpi(vport); 11179 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11180 lpfc_register_new_vport(phba, vport, ndlp); 11181 else 11182 lpfc_do_scr_ns_plogi(phba, vport); 11183 11184 /* The FDISC completed successfully. Move the fabric ndlp to 11185 * UNMAPPED state and register with the transport. 11186 */ 11187 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11188 goto out; 11189 11190 fdisc_failed: 11191 if (vport->fc_vport && 11192 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11193 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11194 /* Cancel discovery timer */ 11195 lpfc_can_disctmo(vport); 11196 out: 11197 lpfc_els_free_iocb(phba, cmdiocb); 11198 lpfc_nlp_put(ndlp); 11199 } 11200 11201 /** 11202 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11203 * @vport: pointer to a virtual N_Port data structure. 11204 * @ndlp: pointer to a node-list data structure. 11205 * @retry: number of retries to the command IOCB. 11206 * 11207 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11208 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11209 * routine to issue the IOCB, which makes sure only one outstanding fabric 11210 * IOCB will be sent off HBA at any given time. 11211 * 11212 * Note that the ndlp reference count will be incremented by 1 for holding the 11213 * ndlp and the reference to ndlp will be stored into the context1 field of 11214 * the IOCB for the completion callback function to the FDISC ELS command. 11215 * 11216 * Return code 11217 * 0 - Successfully issued fdisc iocb command 11218 * 1 - Failed to issue fdisc iocb command 11219 **/ 11220 static int 11221 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11222 uint8_t retry) 11223 { 11224 struct lpfc_hba *phba = vport->phba; 11225 IOCB_t *icmd; 11226 union lpfc_wqe128 *wqe = NULL; 11227 struct lpfc_iocbq *elsiocb; 11228 struct serv_parm *sp; 11229 uint8_t *pcmd; 11230 uint16_t cmdsize; 11231 int did = ndlp->nlp_DID; 11232 int rc; 11233 11234 vport->port_state = LPFC_FDISC; 11235 vport->fc_myDID = 0; 11236 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11237 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11238 ELS_CMD_FDISC); 11239 if (!elsiocb) { 11240 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11241 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11242 "0255 Issue FDISC: no IOCB\n"); 11243 return 1; 11244 } 11245 11246 if (phba->sli_rev == LPFC_SLI_REV4) { 11247 wqe = &elsiocb->wqe; 11248 bf_set(els_req64_sid, &wqe->els_req, 0); 11249 bf_set(els_req64_sp, &wqe->els_req, 1); 11250 } else { 11251 icmd = &elsiocb->iocb; 11252 icmd->un.elsreq64.myID = 0; 11253 icmd->un.elsreq64.fl = 1; 11254 icmd->ulpCt_h = 1; 11255 icmd->ulpCt_l = 0; 11256 } 11257 11258 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 11259 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11260 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11261 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11262 sp = (struct serv_parm *) pcmd; 11263 /* Setup CSPs accordingly for Fabric */ 11264 sp->cmn.e_d_tov = 0; 11265 sp->cmn.w2.r_a_tov = 0; 11266 sp->cmn.virtual_fabric_support = 0; 11267 sp->cls1.classValid = 0; 11268 sp->cls2.seqDelivery = 1; 11269 sp->cls3.seqDelivery = 1; 11270 11271 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11272 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11273 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11274 pcmd += sizeof(uint32_t); /* Port Name */ 11275 memcpy(pcmd, &vport->fc_portname, 8); 11276 pcmd += sizeof(uint32_t); /* Node Name */ 11277 pcmd += sizeof(uint32_t); /* Node Name */ 11278 memcpy(pcmd, &vport->fc_nodename, 8); 11279 sp->cmn.valid_vendor_ver_level = 0; 11280 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11281 lpfc_set_disctmo(vport); 11282 11283 phba->fc_stat.elsXmitFDISC++; 11284 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11285 11286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11287 "Issue FDISC: did:x%x", 11288 did, 0, 0); 11289 11290 elsiocb->context1 = lpfc_nlp_get(ndlp); 11291 if (!elsiocb->context1) 11292 goto err_out; 11293 11294 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11295 if (rc == IOCB_ERROR) { 11296 lpfc_nlp_put(ndlp); 11297 goto err_out; 11298 } 11299 11300 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11301 return 0; 11302 11303 err_out: 11304 lpfc_els_free_iocb(phba, elsiocb); 11305 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11306 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11307 "0256 Issue FDISC: Cannot send IOCB\n"); 11308 return 1; 11309 } 11310 11311 /** 11312 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11313 * @phba: pointer to lpfc hba data structure. 11314 * @cmdiocb: pointer to lpfc command iocb data structure. 11315 * @rspiocb: pointer to lpfc response iocb data structure. 11316 * 11317 * This routine is the completion callback function to the issuing of a LOGO 11318 * ELS command off a vport. It frees the command IOCB and then decrement the 11319 * reference count held on ndlp for this completion function, indicating that 11320 * the reference to the ndlp is no long needed. Note that the 11321 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11322 * callback function and an additional explicit ndlp reference decrementation 11323 * will trigger the actual release of the ndlp. 11324 **/ 11325 static void 11326 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11327 struct lpfc_iocbq *rspiocb) 11328 { 11329 struct lpfc_vport *vport = cmdiocb->vport; 11330 IOCB_t *irsp; 11331 struct lpfc_nodelist *ndlp; 11332 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11333 u32 ulp_status, ulp_word4, did, tmo; 11334 11335 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 11336 11337 ulp_status = get_job_ulpstatus(phba, rspiocb); 11338 ulp_word4 = get_job_word4(phba, rspiocb); 11339 11340 if (phba->sli_rev == LPFC_SLI_REV4) { 11341 did = get_job_els_rsp64_did(phba, cmdiocb); 11342 tmo = get_wqe_tmo(cmdiocb); 11343 } else { 11344 irsp = &rspiocb->iocb; 11345 did = get_job_els_rsp64_did(phba, rspiocb); 11346 tmo = irsp->ulpTimeout; 11347 } 11348 11349 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11350 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11351 ulp_status, ulp_word4, did); 11352 11353 /* NPIV LOGO completes to NPort <nlp_DID> */ 11354 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11355 "2928 NPIV LOGO completes to NPort x%x " 11356 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11357 ndlp->nlp_DID, ulp_status, ulp_word4, 11358 tmo, vport->num_disc_nodes, 11359 kref_read(&ndlp->kref), ndlp->nlp_flag, 11360 ndlp->fc4_xpt_flags); 11361 11362 if (ulp_status == IOSTAT_SUCCESS) { 11363 spin_lock_irq(shost->host_lock); 11364 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11365 vport->fc_flag &= ~FC_FABRIC; 11366 spin_unlock_irq(shost->host_lock); 11367 lpfc_can_disctmo(vport); 11368 } 11369 11370 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11371 /* Wake up lpfc_vport_delete if waiting...*/ 11372 if (ndlp->logo_waitq) 11373 wake_up(ndlp->logo_waitq); 11374 spin_lock_irq(&ndlp->lock); 11375 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11376 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11377 spin_unlock_irq(&ndlp->lock); 11378 } 11379 11380 /* Safe to release resources now. */ 11381 lpfc_els_free_iocb(phba, cmdiocb); 11382 lpfc_nlp_put(ndlp); 11383 } 11384 11385 /** 11386 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11387 * @vport: pointer to a virtual N_Port data structure. 11388 * @ndlp: pointer to a node-list data structure. 11389 * 11390 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11391 * 11392 * Note that the ndlp reference count will be incremented by 1 for holding the 11393 * ndlp and the reference to ndlp will be stored into the context1 field of 11394 * the IOCB for the completion callback function to the LOGO ELS command. 11395 * 11396 * Return codes 11397 * 0 - Successfully issued logo off the @vport 11398 * 1 - Failed to issue logo off the @vport 11399 **/ 11400 int 11401 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11402 { 11403 int rc = 0; 11404 struct lpfc_hba *phba = vport->phba; 11405 struct lpfc_iocbq *elsiocb; 11406 uint8_t *pcmd; 11407 uint16_t cmdsize; 11408 11409 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11410 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11411 ELS_CMD_LOGO); 11412 if (!elsiocb) 11413 return 1; 11414 11415 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 11416 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11417 pcmd += sizeof(uint32_t); 11418 11419 /* Fill in LOGO payload */ 11420 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11421 pcmd += sizeof(uint32_t); 11422 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11423 11424 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11425 "Issue LOGO npiv did:x%x flg:x%x", 11426 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11427 11428 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11429 spin_lock_irq(&ndlp->lock); 11430 ndlp->nlp_flag |= NLP_LOGO_SND; 11431 spin_unlock_irq(&ndlp->lock); 11432 elsiocb->context1 = lpfc_nlp_get(ndlp); 11433 if (!elsiocb->context1) { 11434 lpfc_els_free_iocb(phba, elsiocb); 11435 goto err; 11436 } 11437 11438 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11439 if (rc == IOCB_ERROR) { 11440 lpfc_els_free_iocb(phba, elsiocb); 11441 lpfc_nlp_put(ndlp); 11442 goto err; 11443 } 11444 return 0; 11445 11446 err: 11447 spin_lock_irq(&ndlp->lock); 11448 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11449 spin_unlock_irq(&ndlp->lock); 11450 return 1; 11451 } 11452 11453 /** 11454 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11455 * @t: timer context used to obtain the lpfc hba. 11456 * 11457 * This routine is invoked by the fabric iocb block timer after 11458 * timeout. It posts the fabric iocb block timeout event by setting the 11459 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11460 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11461 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11462 * posted event WORKER_FABRIC_BLOCK_TMO. 11463 **/ 11464 void 11465 lpfc_fabric_block_timeout(struct timer_list *t) 11466 { 11467 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11468 unsigned long iflags; 11469 uint32_t tmo_posted; 11470 11471 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11472 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11473 if (!tmo_posted) 11474 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11475 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11476 11477 if (!tmo_posted) 11478 lpfc_worker_wake_up(phba); 11479 return; 11480 } 11481 11482 /** 11483 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11484 * @phba: pointer to lpfc hba data structure. 11485 * 11486 * This routine issues one fabric iocb from the driver internal list to 11487 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11488 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11489 * remove one pending fabric iocb from the driver internal list and invokes 11490 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11491 **/ 11492 static void 11493 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11494 { 11495 struct lpfc_iocbq *iocb; 11496 unsigned long iflags; 11497 int ret; 11498 11499 repeat: 11500 iocb = NULL; 11501 spin_lock_irqsave(&phba->hbalock, iflags); 11502 /* Post any pending iocb to the SLI layer */ 11503 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11504 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11505 list); 11506 if (iocb) 11507 /* Increment fabric iocb count to hold the position */ 11508 atomic_inc(&phba->fabric_iocb_count); 11509 } 11510 spin_unlock_irqrestore(&phba->hbalock, iflags); 11511 if (iocb) { 11512 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11513 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11514 iocb->cmd_flag |= LPFC_IO_FABRIC; 11515 11516 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11517 "Fabric sched1: ste:x%x", 11518 iocb->vport->port_state, 0, 0); 11519 11520 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11521 11522 if (ret == IOCB_ERROR) { 11523 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11524 iocb->fabric_cmd_cmpl = NULL; 11525 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11526 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11527 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11528 iocb->cmd_cmpl(phba, iocb, iocb); 11529 11530 atomic_dec(&phba->fabric_iocb_count); 11531 goto repeat; 11532 } 11533 } 11534 } 11535 11536 /** 11537 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11538 * @phba: pointer to lpfc hba data structure. 11539 * 11540 * This routine unblocks the issuing fabric iocb command. The function 11541 * will clear the fabric iocb block bit and then invoke the routine 11542 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11543 * from the driver internal fabric iocb list. 11544 **/ 11545 void 11546 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11547 { 11548 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11549 11550 lpfc_resume_fabric_iocbs(phba); 11551 return; 11552 } 11553 11554 /** 11555 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11556 * @phba: pointer to lpfc hba data structure. 11557 * 11558 * This routine blocks the issuing fabric iocb for a specified amount of 11559 * time (currently 100 ms). This is done by set the fabric iocb block bit 11560 * and set up a timeout timer for 100ms. When the block bit is set, no more 11561 * fabric iocb will be issued out of the HBA. 11562 **/ 11563 static void 11564 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11565 { 11566 int blocked; 11567 11568 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11569 /* Start a timer to unblock fabric iocbs after 100ms */ 11570 if (!blocked) 11571 mod_timer(&phba->fabric_block_timer, 11572 jiffies + msecs_to_jiffies(100)); 11573 11574 return; 11575 } 11576 11577 /** 11578 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11579 * @phba: pointer to lpfc hba data structure. 11580 * @cmdiocb: pointer to lpfc command iocb data structure. 11581 * @rspiocb: pointer to lpfc response iocb data structure. 11582 * 11583 * This routine is the callback function that is put to the fabric iocb's 11584 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11585 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11586 * function first restores and invokes the original iocb's callback function 11587 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11588 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11589 **/ 11590 static void 11591 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11592 struct lpfc_iocbq *rspiocb) 11593 { 11594 struct ls_rjt stat; 11595 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11596 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11597 11598 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11599 11600 switch (ulp_status) { 11601 case IOSTAT_NPORT_RJT: 11602 case IOSTAT_FABRIC_RJT: 11603 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11604 lpfc_block_fabric_iocbs(phba); 11605 break; 11606 11607 case IOSTAT_NPORT_BSY: 11608 case IOSTAT_FABRIC_BSY: 11609 lpfc_block_fabric_iocbs(phba); 11610 break; 11611 11612 case IOSTAT_LS_RJT: 11613 stat.un.ls_rjt_error_be = 11614 cpu_to_be32(ulp_word4); 11615 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11616 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11617 lpfc_block_fabric_iocbs(phba); 11618 break; 11619 } 11620 11621 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11622 11623 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11624 cmdiocb->fabric_cmd_cmpl = NULL; 11625 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11626 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11627 11628 atomic_dec(&phba->fabric_iocb_count); 11629 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11630 /* Post any pending iocbs to HBA */ 11631 lpfc_resume_fabric_iocbs(phba); 11632 } 11633 } 11634 11635 /** 11636 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11637 * @phba: pointer to lpfc hba data structure. 11638 * @iocb: pointer to lpfc command iocb data structure. 11639 * 11640 * This routine is used as the top-level API for issuing a fabric iocb command 11641 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11642 * function makes sure that only one fabric bound iocb will be outstanding at 11643 * any given time. As such, this function will first check to see whether there 11644 * is already an outstanding fabric iocb on the wire. If so, it will put the 11645 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11646 * issued later. Otherwise, it will issue the iocb on the wire and update the 11647 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11648 * 11649 * Note, this implementation has a potential sending out fabric IOCBs out of 11650 * order. The problem is caused by the construction of the "ready" boolen does 11651 * not include the condition that the internal fabric IOCB list is empty. As 11652 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11653 * ahead of the fabric IOCBs in the internal list. 11654 * 11655 * Return code 11656 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11657 * IOCB_ERROR - failed to issue fabric iocb 11658 **/ 11659 static int 11660 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11661 { 11662 unsigned long iflags; 11663 int ready; 11664 int ret; 11665 11666 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11667 11668 spin_lock_irqsave(&phba->hbalock, iflags); 11669 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11670 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11671 11672 if (ready) 11673 /* Increment fabric iocb count to hold the position */ 11674 atomic_inc(&phba->fabric_iocb_count); 11675 spin_unlock_irqrestore(&phba->hbalock, iflags); 11676 if (ready) { 11677 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11678 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11679 iocb->cmd_flag |= LPFC_IO_FABRIC; 11680 11681 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11682 "Fabric sched2: ste:x%x", 11683 iocb->vport->port_state, 0, 0); 11684 11685 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11686 11687 if (ret == IOCB_ERROR) { 11688 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11689 iocb->fabric_cmd_cmpl = NULL; 11690 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11691 atomic_dec(&phba->fabric_iocb_count); 11692 } 11693 } else { 11694 spin_lock_irqsave(&phba->hbalock, iflags); 11695 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11696 spin_unlock_irqrestore(&phba->hbalock, iflags); 11697 ret = IOCB_SUCCESS; 11698 } 11699 return ret; 11700 } 11701 11702 /** 11703 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11704 * @vport: pointer to a virtual N_Port data structure. 11705 * 11706 * This routine aborts all the IOCBs associated with a @vport from the 11707 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11708 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11709 * list, removes each IOCB associated with the @vport off the list, set the 11710 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11711 * associated with the IOCB. 11712 **/ 11713 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11714 { 11715 LIST_HEAD(completions); 11716 struct lpfc_hba *phba = vport->phba; 11717 struct lpfc_iocbq *tmp_iocb, *piocb; 11718 11719 spin_lock_irq(&phba->hbalock); 11720 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11721 list) { 11722 11723 if (piocb->vport != vport) 11724 continue; 11725 11726 list_move_tail(&piocb->list, &completions); 11727 } 11728 spin_unlock_irq(&phba->hbalock); 11729 11730 /* Cancel all the IOCBs from the completions list */ 11731 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11732 IOERR_SLI_ABORTED); 11733 } 11734 11735 /** 11736 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11737 * @ndlp: pointer to a node-list data structure. 11738 * 11739 * This routine aborts all the IOCBs associated with an @ndlp from the 11740 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11741 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11742 * list, removes each IOCB associated with the @ndlp off the list, set the 11743 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11744 * associated with the IOCB. 11745 **/ 11746 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11747 { 11748 LIST_HEAD(completions); 11749 struct lpfc_hba *phba = ndlp->phba; 11750 struct lpfc_iocbq *tmp_iocb, *piocb; 11751 struct lpfc_sli_ring *pring; 11752 11753 pring = lpfc_phba_elsring(phba); 11754 11755 if (unlikely(!pring)) 11756 return; 11757 11758 spin_lock_irq(&phba->hbalock); 11759 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11760 list) { 11761 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11762 11763 list_move_tail(&piocb->list, &completions); 11764 } 11765 } 11766 spin_unlock_irq(&phba->hbalock); 11767 11768 /* Cancel all the IOCBs from the completions list */ 11769 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11770 IOERR_SLI_ABORTED); 11771 } 11772 11773 /** 11774 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11775 * @phba: pointer to lpfc hba data structure. 11776 * 11777 * This routine aborts all the IOCBs currently on the driver internal 11778 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11779 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11780 * list, removes IOCBs off the list, set the status field to 11781 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11782 * the IOCB. 11783 **/ 11784 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11785 { 11786 LIST_HEAD(completions); 11787 11788 spin_lock_irq(&phba->hbalock); 11789 list_splice_init(&phba->fabric_iocb_list, &completions); 11790 spin_unlock_irq(&phba->hbalock); 11791 11792 /* Cancel all the IOCBs from the completions list */ 11793 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11794 IOERR_SLI_ABORTED); 11795 } 11796 11797 /** 11798 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11799 * @vport: pointer to lpfc vport data structure. 11800 * 11801 * This routine is invoked by the vport cleanup for deletions and the cleanup 11802 * for an ndlp on removal. 11803 **/ 11804 void 11805 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11806 { 11807 struct lpfc_hba *phba = vport->phba; 11808 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11809 struct lpfc_nodelist *ndlp = NULL; 11810 unsigned long iflag = 0; 11811 11812 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11813 list_for_each_entry_safe(sglq_entry, sglq_next, 11814 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11815 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11816 lpfc_nlp_put(sglq_entry->ndlp); 11817 ndlp = sglq_entry->ndlp; 11818 sglq_entry->ndlp = NULL; 11819 11820 /* If the xri on the abts_els_sgl list is for the Fport 11821 * node and the vport is unloading, the xri aborted wcqe 11822 * likely isn't coming back. Just release the sgl. 11823 */ 11824 if ((vport->load_flag & FC_UNLOADING) && 11825 ndlp->nlp_DID == Fabric_DID) { 11826 list_del(&sglq_entry->list); 11827 sglq_entry->state = SGL_FREED; 11828 list_add_tail(&sglq_entry->list, 11829 &phba->sli4_hba.lpfc_els_sgl_list); 11830 } 11831 } 11832 } 11833 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11834 return; 11835 } 11836 11837 /** 11838 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11839 * @phba: pointer to lpfc hba data structure. 11840 * @axri: pointer to the els xri abort wcqe structure. 11841 * 11842 * This routine is invoked by the worker thread to process a SLI4 slow-path 11843 * ELS aborted xri. 11844 **/ 11845 void 11846 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11847 struct sli4_wcqe_xri_aborted *axri) 11848 { 11849 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11850 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11851 uint16_t lxri = 0; 11852 11853 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11854 unsigned long iflag = 0; 11855 struct lpfc_nodelist *ndlp; 11856 struct lpfc_sli_ring *pring; 11857 11858 pring = lpfc_phba_elsring(phba); 11859 11860 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11861 list_for_each_entry_safe(sglq_entry, sglq_next, 11862 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11863 if (sglq_entry->sli4_xritag == xri) { 11864 list_del(&sglq_entry->list); 11865 ndlp = sglq_entry->ndlp; 11866 sglq_entry->ndlp = NULL; 11867 list_add_tail(&sglq_entry->list, 11868 &phba->sli4_hba.lpfc_els_sgl_list); 11869 sglq_entry->state = SGL_FREED; 11870 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11871 iflag); 11872 11873 if (ndlp) { 11874 lpfc_set_rrq_active(phba, ndlp, 11875 sglq_entry->sli4_lxritag, 11876 rxid, 1); 11877 lpfc_nlp_put(ndlp); 11878 } 11879 11880 /* Check if TXQ queue needs to be serviced */ 11881 if (pring && !list_empty(&pring->txq)) 11882 lpfc_worker_wake_up(phba); 11883 return; 11884 } 11885 } 11886 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11887 lxri = lpfc_sli4_xri_inrange(phba, xri); 11888 if (lxri == NO_XRI) 11889 return; 11890 11891 spin_lock_irqsave(&phba->hbalock, iflag); 11892 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11893 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11894 spin_unlock_irqrestore(&phba->hbalock, iflag); 11895 return; 11896 } 11897 sglq_entry->state = SGL_XRI_ABORTED; 11898 spin_unlock_irqrestore(&phba->hbalock, iflag); 11899 return; 11900 } 11901 11902 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11903 * @vport: pointer to virtual port object. 11904 * @ndlp: nodelist pointer for the impacted node. 11905 * 11906 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11907 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11908 * the driver is required to send a LOGO to the remote node before it 11909 * attempts to recover its login to the remote node. 11910 */ 11911 void 11912 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11913 struct lpfc_nodelist *ndlp) 11914 { 11915 struct Scsi_Host *shost; 11916 struct lpfc_hba *phba; 11917 unsigned long flags = 0; 11918 11919 shost = lpfc_shost_from_vport(vport); 11920 phba = vport->phba; 11921 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11922 lpfc_printf_log(phba, KERN_INFO, 11923 LOG_SLI, "3093 No rport recovery needed. " 11924 "rport in state 0x%x\n", ndlp->nlp_state); 11925 return; 11926 } 11927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11928 "3094 Start rport recovery on shost id 0x%x " 11929 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11930 "flags 0x%x\n", 11931 shost->host_no, ndlp->nlp_DID, 11932 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11933 ndlp->nlp_flag); 11934 /* 11935 * The rport is not responding. Remove the FCP-2 flag to prevent 11936 * an ADISC in the follow-up recovery code. 11937 */ 11938 spin_lock_irqsave(&ndlp->lock, flags); 11939 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11940 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11941 spin_unlock_irqrestore(&ndlp->lock, flags); 11942 lpfc_unreg_rpi(vport, ndlp); 11943 } 11944 11945 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11946 { 11947 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11948 } 11949 11950 static void 11951 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11952 { 11953 u32 i; 11954 11955 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11956 return; 11957 11958 for (i = min; i <= max; i++) 11959 set_bit(i, vport->vmid_priority_range); 11960 } 11961 11962 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11963 { 11964 set_bit(ctcl_vmid, vport->vmid_priority_range); 11965 } 11966 11967 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11968 { 11969 u32 i; 11970 11971 i = find_first_bit(vport->vmid_priority_range, 11972 LPFC_VMID_MAX_PRIORITY_RANGE); 11973 11974 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11975 return 0; 11976 11977 clear_bit(i, vport->vmid_priority_range); 11978 return i; 11979 } 11980 11981 #define MAX_PRIORITY_DESC 255 11982 11983 static void 11984 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11985 struct lpfc_iocbq *rspiocb) 11986 { 11987 struct lpfc_vport *vport = cmdiocb->vport; 11988 struct priority_range_desc *desc; 11989 struct lpfc_dmabuf *prsp = NULL; 11990 struct lpfc_vmid_priority_range *vmid_range = NULL; 11991 u32 *data; 11992 struct lpfc_dmabuf *dmabuf = cmdiocb->context2; 11993 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11994 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11995 u8 *pcmd, max_desc; 11996 u32 len, i; 11997 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 11998 11999 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12000 if (!prsp) 12001 goto out; 12002 12003 pcmd = prsp->virt; 12004 data = (u32 *)pcmd; 12005 if (data[0] == ELS_CMD_LS_RJT) { 12006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12007 "3277 QFPA LS_RJT x%x x%x\n", 12008 data[0], data[1]); 12009 goto out; 12010 } 12011 if (ulp_status) { 12012 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12013 "6529 QFPA failed with status x%x x%x\n", 12014 ulp_status, ulp_word4); 12015 goto out; 12016 } 12017 12018 if (!vport->qfpa_res) { 12019 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12020 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12021 GFP_KERNEL); 12022 if (!vport->qfpa_res) 12023 goto out; 12024 } 12025 12026 len = *((u32 *)(pcmd + 4)); 12027 len = be32_to_cpu(len); 12028 memcpy(vport->qfpa_res, pcmd, len + 8); 12029 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12030 12031 desc = (struct priority_range_desc *)(pcmd + 8); 12032 vmid_range = vport->vmid_priority.vmid_range; 12033 if (!vmid_range) { 12034 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12035 GFP_KERNEL); 12036 if (!vmid_range) { 12037 kfree(vport->qfpa_res); 12038 goto out; 12039 } 12040 vport->vmid_priority.vmid_range = vmid_range; 12041 } 12042 vport->vmid_priority.num_descriptors = len; 12043 12044 for (i = 0; i < len; i++, vmid_range++, desc++) { 12045 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12046 "6539 vmid values low=%d, high=%d, qos=%d, " 12047 "local ve id=%d\n", desc->lo_range, 12048 desc->hi_range, desc->qos_priority, 12049 desc->local_ve_id); 12050 12051 vmid_range->low = desc->lo_range << 1; 12052 if (desc->local_ve_id == QFPA_ODD_ONLY) 12053 vmid_range->low++; 12054 if (desc->qos_priority) 12055 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12056 vmid_range->qos = desc->qos_priority; 12057 12058 vmid_range->high = desc->hi_range << 1; 12059 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12060 (desc->local_ve_id == QFPA_EVEN_ODD)) 12061 vmid_range->high++; 12062 } 12063 lpfc_init_cs_ctl_bitmap(vport); 12064 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12065 lpfc_vmid_set_cs_ctl_range(vport, 12066 vport->vmid_priority.vmid_range[i].low, 12067 vport->vmid_priority.vmid_range[i].high); 12068 } 12069 12070 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12071 out: 12072 lpfc_els_free_iocb(phba, cmdiocb); 12073 lpfc_nlp_put(ndlp); 12074 } 12075 12076 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12077 { 12078 struct lpfc_hba *phba = vport->phba; 12079 struct lpfc_nodelist *ndlp; 12080 struct lpfc_iocbq *elsiocb; 12081 u8 *pcmd; 12082 int ret; 12083 12084 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12085 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12086 return -ENXIO; 12087 12088 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12089 ndlp->nlp_DID, ELS_CMD_QFPA); 12090 if (!elsiocb) 12091 return -ENOMEM; 12092 12093 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 12094 12095 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12096 pcmd += 4; 12097 12098 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12099 12100 elsiocb->context1 = lpfc_nlp_get(ndlp); 12101 if (!elsiocb->context1) { 12102 lpfc_els_free_iocb(vport->phba, elsiocb); 12103 return -ENXIO; 12104 } 12105 12106 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12107 if (ret != IOCB_SUCCESS) { 12108 lpfc_els_free_iocb(phba, elsiocb); 12109 lpfc_nlp_put(ndlp); 12110 return -EIO; 12111 } 12112 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12113 return 0; 12114 } 12115 12116 int 12117 lpfc_vmid_uvem(struct lpfc_vport *vport, 12118 struct lpfc_vmid *vmid, bool instantiated) 12119 { 12120 struct lpfc_vem_id_desc *vem_id_desc; 12121 struct lpfc_nodelist *ndlp; 12122 struct lpfc_iocbq *elsiocb; 12123 struct instantiated_ve_desc *inst_desc; 12124 struct lpfc_vmid_context *vmid_context; 12125 u8 *pcmd; 12126 u32 *len; 12127 int ret = 0; 12128 12129 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12130 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12131 return -ENXIO; 12132 12133 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12134 if (!vmid_context) 12135 return -ENOMEM; 12136 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12137 ndlp, Fabric_DID, ELS_CMD_UVEM); 12138 if (!elsiocb) 12139 goto out; 12140 12141 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12142 "3427 Host vmid %s %d\n", 12143 vmid->host_vmid, instantiated); 12144 vmid_context->vmp = vmid; 12145 vmid_context->nlp = ndlp; 12146 vmid_context->instantiated = instantiated; 12147 elsiocb->vmid_tag.vmid_context = vmid_context; 12148 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 12149 12150 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12151 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12152 LPFC_COMPRESS_VMID_SIZE); 12153 12154 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12155 len = (u32 *)(pcmd + 4); 12156 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12157 12158 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12159 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12160 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12161 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12162 LPFC_COMPRESS_VMID_SIZE); 12163 12164 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12165 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12166 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12167 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12168 LPFC_COMPRESS_VMID_SIZE); 12169 12170 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12171 bf_set(lpfc_instantiated_local_id, inst_desc, 12172 vmid->un.cs_ctl_vmid); 12173 if (instantiated) { 12174 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12175 } else { 12176 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12177 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12178 } 12179 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12180 12181 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12182 12183 elsiocb->context1 = lpfc_nlp_get(ndlp); 12184 if (!elsiocb->context1) { 12185 lpfc_els_free_iocb(vport->phba, elsiocb); 12186 goto out; 12187 } 12188 12189 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12190 if (ret != IOCB_SUCCESS) { 12191 lpfc_els_free_iocb(vport->phba, elsiocb); 12192 lpfc_nlp_put(ndlp); 12193 goto out; 12194 } 12195 12196 return 0; 12197 out: 12198 kfree(vmid_context); 12199 return -EIO; 12200 } 12201 12202 static void 12203 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12204 struct lpfc_iocbq *rspiocb) 12205 { 12206 struct lpfc_vport *vport = icmdiocb->vport; 12207 struct lpfc_dmabuf *prsp = NULL; 12208 struct lpfc_vmid_context *vmid_context = 12209 icmdiocb->vmid_tag.vmid_context; 12210 struct lpfc_nodelist *ndlp = icmdiocb->context1; 12211 u8 *pcmd; 12212 u32 *data; 12213 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12214 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12215 struct lpfc_dmabuf *dmabuf = icmdiocb->context2; 12216 struct lpfc_vmid *vmid; 12217 12218 vmid = vmid_context->vmp; 12219 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12220 ndlp = NULL; 12221 12222 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12223 if (!prsp) 12224 goto out; 12225 pcmd = prsp->virt; 12226 data = (u32 *)pcmd; 12227 if (data[0] == ELS_CMD_LS_RJT) { 12228 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12229 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12230 goto out; 12231 } 12232 if (ulp_status) { 12233 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12234 "4533 UVEM error status %x: %x\n", 12235 ulp_status, ulp_word4); 12236 goto out; 12237 } 12238 spin_lock(&phba->hbalock); 12239 /* Set IN USE flag */ 12240 vport->vmid_flag |= LPFC_VMID_IN_USE; 12241 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12242 spin_unlock(&phba->hbalock); 12243 12244 if (vmid_context->instantiated) { 12245 write_lock(&vport->vmid_lock); 12246 vmid->flag |= LPFC_VMID_REGISTERED; 12247 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12248 write_unlock(&vport->vmid_lock); 12249 } 12250 12251 out: 12252 kfree(vmid_context); 12253 lpfc_els_free_iocb(phba, icmdiocb); 12254 lpfc_nlp_put(ndlp); 12255 } 12256