1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 956 /* Check to see if link went down during discovery */ 957 if (lpfc_els_chk_latt(vport)) { 958 /* One additional decrement on node reference count to 959 * trigger the release of the node 960 */ 961 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 962 lpfc_nlp_put(ndlp); 963 goto out; 964 } 965 966 ulp_status = get_job_ulpstatus(phba, rspiocb); 967 ulp_word4 = get_job_word4(phba, rspiocb); 968 969 if (phba->sli_rev == LPFC_SLI_REV4) { 970 tmo = get_wqe_tmo(cmdiocb); 971 } else { 972 irsp = &rspiocb->iocb; 973 tmo = irsp->ulpTimeout; 974 } 975 976 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 977 "FLOGI cmpl: status:x%x/x%x state:x%x", 978 ulp_status, ulp_word4, 979 vport->port_state); 980 981 if (ulp_status) { 982 /* 983 * In case of FIP mode, perform roundrobin FCF failover 984 * due to new FCF discovery 985 */ 986 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 987 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 988 if (phba->link_state < LPFC_LINK_UP) 989 goto stop_rr_fcf_flogi; 990 if ((phba->fcoe_cvl_eventtag_attn == 991 phba->fcoe_cvl_eventtag) && 992 (ulp_status == IOSTAT_LOCAL_REJECT) && 993 ((ulp_word4 & IOERR_PARAM_MASK) == 994 IOERR_SLI_ABORTED)) 995 goto stop_rr_fcf_flogi; 996 else 997 phba->fcoe_cvl_eventtag_attn = 998 phba->fcoe_cvl_eventtag; 999 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1000 "2611 FLOGI failed on FCF (x%x), " 1001 "status:x%x/x%x, tmo:x%x, perform " 1002 "roundrobin FCF failover\n", 1003 phba->fcf.current_rec.fcf_indx, 1004 ulp_status, ulp_word4, tmo); 1005 lpfc_sli4_set_fcf_flogi_fail(phba, 1006 phba->fcf.current_rec.fcf_indx); 1007 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1008 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1009 if (rc) 1010 goto out; 1011 } 1012 1013 stop_rr_fcf_flogi: 1014 /* FLOGI failure */ 1015 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1016 ((ulp_word4 & IOERR_PARAM_MASK) == 1017 IOERR_LOOP_OPEN_FAILURE))) 1018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1019 "2858 FLOGI failure Status:x%x/x%x TMO" 1020 ":x%x Data x%x x%x\n", 1021 ulp_status, ulp_word4, tmo, 1022 phba->hba_flag, phba->fcf.fcf_flag); 1023 1024 /* Check for retry */ 1025 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1026 goto out; 1027 1028 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1029 "0150 FLOGI failure Status:x%x/x%x " 1030 "xri x%x TMO:x%x refcnt %d\n", 1031 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1032 tmo, kref_read(&ndlp->kref)); 1033 1034 /* If this is not a loop open failure, bail out */ 1035 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1036 ((ulp_word4 & IOERR_PARAM_MASK) == 1037 IOERR_LOOP_OPEN_FAILURE))) { 1038 /* FLOGI failure */ 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "0100 FLOGI failure Status:x%x/x%x " 1041 "TMO:x%x\n", 1042 ulp_status, ulp_word4, tmo); 1043 goto flogifail; 1044 } 1045 1046 /* FLOGI failed, so there is no fabric */ 1047 spin_lock_irq(shost->host_lock); 1048 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1049 FC_PT2PT_NO_NVME); 1050 spin_unlock_irq(shost->host_lock); 1051 1052 /* If private loop, then allow max outstanding els to be 1053 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1054 * alpa map would take too long otherwise. 1055 */ 1056 if (phba->alpa_map[0] == 0) 1057 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1058 if ((phba->sli_rev == LPFC_SLI_REV4) && 1059 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1060 (vport->fc_prevDID != vport->fc_myDID) || 1061 phba->fc_topology_changed)) { 1062 if (vport->fc_flag & FC_VFI_REGISTERED) { 1063 if (phba->fc_topology_changed) { 1064 lpfc_unregister_fcf_prep(phba); 1065 spin_lock_irq(shost->host_lock); 1066 vport->fc_flag &= ~FC_VFI_REGISTERED; 1067 spin_unlock_irq(shost->host_lock); 1068 phba->fc_topology_changed = 0; 1069 } else { 1070 lpfc_sli4_unreg_all_rpis(vport); 1071 } 1072 } 1073 1074 /* Do not register VFI if the driver aborted FLOGI */ 1075 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 1076 lpfc_issue_reg_vfi(vport); 1077 1078 lpfc_nlp_put(ndlp); 1079 goto out; 1080 } 1081 goto flogifail; 1082 } 1083 spin_lock_irq(shost->host_lock); 1084 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1085 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1086 spin_unlock_irq(shost->host_lock); 1087 1088 /* 1089 * The FLogI succeeded. Sync the data for the CPU before 1090 * accessing it. 1091 */ 1092 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1093 if (!prsp) 1094 goto out; 1095 sp = prsp->virt + sizeof(uint32_t); 1096 1097 /* FLOGI completes successfully */ 1098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1099 "0101 FLOGI completes successfully, I/O tag:x%x " 1100 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1101 cmdiocb->iotag, cmdiocb->sli4_xritag, 1102 ulp_word4, sp->cmn.e_d_tov, 1103 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1104 vport->port_state, vport->fc_flag, 1105 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1106 1107 if (sp->cmn.priority_tagging) 1108 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1109 LPFC_VMID_TYPE_PRIO); 1110 1111 if (vport->port_state == LPFC_FLOGI) { 1112 /* 1113 * If Common Service Parameters indicate Nport 1114 * we are point to point, if Fport we are Fabric. 1115 */ 1116 if (sp->cmn.fPort) 1117 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1118 ulp_word4); 1119 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1120 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1121 else { 1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1123 "2831 FLOGI response with cleared Fabric " 1124 "bit fcf_index 0x%x " 1125 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1126 "Fabric Name " 1127 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1128 phba->fcf.current_rec.fcf_indx, 1129 phba->fcf.current_rec.switch_name[0], 1130 phba->fcf.current_rec.switch_name[1], 1131 phba->fcf.current_rec.switch_name[2], 1132 phba->fcf.current_rec.switch_name[3], 1133 phba->fcf.current_rec.switch_name[4], 1134 phba->fcf.current_rec.switch_name[5], 1135 phba->fcf.current_rec.switch_name[6], 1136 phba->fcf.current_rec.switch_name[7], 1137 phba->fcf.current_rec.fabric_name[0], 1138 phba->fcf.current_rec.fabric_name[1], 1139 phba->fcf.current_rec.fabric_name[2], 1140 phba->fcf.current_rec.fabric_name[3], 1141 phba->fcf.current_rec.fabric_name[4], 1142 phba->fcf.current_rec.fabric_name[5], 1143 phba->fcf.current_rec.fabric_name[6], 1144 phba->fcf.current_rec.fabric_name[7]); 1145 1146 lpfc_nlp_put(ndlp); 1147 spin_lock_irq(&phba->hbalock); 1148 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1149 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1150 spin_unlock_irq(&phba->hbalock); 1151 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1152 goto out; 1153 } 1154 if (!rc) { 1155 /* Mark the FCF discovery process done */ 1156 if (phba->hba_flag & HBA_FIP_SUPPORT) 1157 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1158 LOG_ELS, 1159 "2769 FLOGI to FCF (x%x) " 1160 "completed successfully\n", 1161 phba->fcf.current_rec.fcf_indx); 1162 spin_lock_irq(&phba->hbalock); 1163 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1164 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1165 spin_unlock_irq(&phba->hbalock); 1166 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1167 goto out; 1168 } 1169 } else if (vport->port_state > LPFC_FLOGI && 1170 vport->fc_flag & FC_PT2PT) { 1171 /* 1172 * In a p2p topology, it is possible that discovery has 1173 * already progressed, and this completion can be ignored. 1174 * Recheck the indicated topology. 1175 */ 1176 if (!sp->cmn.fPort) 1177 goto out; 1178 } 1179 1180 flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) { 1186 /* FLOGI failed, so just use loop map to make discovery list */ 1187 lpfc_disc_list_loopmap(vport); 1188 1189 /* Start discovery */ 1190 lpfc_disc_start(vport); 1191 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1192 (((ulp_word4 & IOERR_PARAM_MASK) != 1193 IOERR_SLI_ABORTED) && 1194 ((ulp_word4 & IOERR_PARAM_MASK) != 1195 IOERR_SLI_DOWN))) && 1196 (phba->link_state != LPFC_CLEAR_LA)) { 1197 /* If FLOGI failed enable link interrupt. */ 1198 lpfc_issue_clear_la(phba, vport); 1199 } 1200 out: 1201 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1202 lpfc_els_free_iocb(phba, cmdiocb); 1203 lpfc_nlp_put(ndlp); 1204 } 1205 1206 /** 1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1208 * aborted during a link down 1209 * @phba: pointer to lpfc hba data structure. 1210 * @cmdiocb: pointer to lpfc command iocb data structure. 1211 * @rspiocb: pointer to lpfc response iocb data structure. 1212 * 1213 */ 1214 static void 1215 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1216 struct lpfc_iocbq *rspiocb) 1217 { 1218 uint32_t *pcmd; 1219 uint32_t cmd; 1220 u32 ulp_status, ulp_word4; 1221 1222 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1223 cmd = *pcmd; 1224 1225 ulp_status = get_job_ulpstatus(phba, rspiocb); 1226 ulp_word4 = get_job_word4(phba, rspiocb); 1227 1228 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1229 "6445 ELS completes after LINK_DOWN: " 1230 " Status %x/%x cmd x%x flg x%x\n", 1231 ulp_status, ulp_word4, cmd, 1232 cmdiocb->cmd_flag); 1233 1234 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1235 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1236 atomic_dec(&phba->fabric_iocb_count); 1237 } 1238 lpfc_els_free_iocb(phba, cmdiocb); 1239 } 1240 1241 /** 1242 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1243 * @vport: pointer to a host virtual N_Port data structure. 1244 * @ndlp: pointer to a node-list data structure. 1245 * @retry: number of retries to the command IOCB. 1246 * 1247 * This routine issues a Fabric Login (FLOGI) Request ELS command 1248 * for a @vport. The initiator service parameters are put into the payload 1249 * of the FLOGI Request IOCB and the top-level callback function pointer 1250 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1251 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1252 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1253 * 1254 * Note that the ndlp reference count will be incremented by 1 for holding the 1255 * ndlp and the reference to ndlp will be stored into the ndlp field of 1256 * the IOCB for the completion callback function to the FLOGI ELS command. 1257 * 1258 * Return code 1259 * 0 - successfully issued flogi iocb for @vport 1260 * 1 - failed to issue flogi iocb for @vport 1261 **/ 1262 static int 1263 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1264 uint8_t retry) 1265 { 1266 struct lpfc_hba *phba = vport->phba; 1267 struct serv_parm *sp; 1268 union lpfc_wqe128 *wqe = NULL; 1269 IOCB_t *icmd = NULL; 1270 struct lpfc_iocbq *elsiocb; 1271 struct lpfc_iocbq defer_flogi_acc; 1272 u8 *pcmd, ct; 1273 uint16_t cmdsize; 1274 uint32_t tmo, did; 1275 int rc; 1276 1277 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1278 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1279 ndlp->nlp_DID, ELS_CMD_FLOGI); 1280 1281 if (!elsiocb) 1282 return 1; 1283 1284 wqe = &elsiocb->wqe; 1285 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1286 icmd = &elsiocb->iocb; 1287 1288 /* For FLOGI request, remainder of payload is service parameters */ 1289 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1290 pcmd += sizeof(uint32_t); 1291 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1292 sp = (struct serv_parm *) pcmd; 1293 1294 /* Setup CSPs accordingly for Fabric */ 1295 sp->cmn.e_d_tov = 0; 1296 sp->cmn.w2.r_a_tov = 0; 1297 sp->cmn.virtual_fabric_support = 0; 1298 sp->cls1.classValid = 0; 1299 if (sp->cmn.fcphLow < FC_PH3) 1300 sp->cmn.fcphLow = FC_PH3; 1301 if (sp->cmn.fcphHigh < FC_PH3) 1302 sp->cmn.fcphHigh = FC_PH3; 1303 1304 /* Determine if switch supports priority tagging */ 1305 if (phba->cfg_vmid_priority_tagging) { 1306 sp->cmn.priority_tagging = 1; 1307 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1308 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1309 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1310 sizeof(phba->wwpn)); 1311 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1312 sizeof(phba->wwnn)); 1313 } 1314 } 1315 1316 if (phba->sli_rev == LPFC_SLI_REV4) { 1317 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1318 LPFC_SLI_INTF_IF_TYPE_0) { 1319 /* FLOGI needs to be 3 for WQE FCFI */ 1320 ct = SLI4_CT_FCFI; 1321 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1322 1323 /* Set the fcfi to the fcfi we registered with */ 1324 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1325 phba->fcf.fcfi); 1326 } 1327 1328 /* Can't do SLI4 class2 without support sequence coalescing */ 1329 sp->cls2.classValid = 0; 1330 sp->cls2.seqDelivery = 0; 1331 } else { 1332 /* Historical, setting sequential-delivery bit for SLI3 */ 1333 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1334 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1335 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1336 sp->cmn.request_multiple_Nport = 1; 1337 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1338 icmd->ulpCt_h = 1; 1339 icmd->ulpCt_l = 0; 1340 } else { 1341 sp->cmn.request_multiple_Nport = 0; 1342 } 1343 1344 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1345 icmd->un.elsreq64.myID = 0; 1346 icmd->un.elsreq64.fl = 1; 1347 } 1348 } 1349 1350 tmo = phba->fc_ratov; 1351 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1352 lpfc_set_disctmo(vport); 1353 phba->fc_ratov = tmo; 1354 1355 phba->fc_stat.elsXmitFLOGI++; 1356 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1357 1358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1359 "Issue FLOGI: opt:x%x", 1360 phba->sli3_options, 0, 0); 1361 1362 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1363 if (!elsiocb->ndlp) { 1364 lpfc_els_free_iocb(phba, elsiocb); 1365 return 1; 1366 } 1367 1368 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1369 if (rc == IOCB_ERROR) { 1370 lpfc_els_free_iocb(phba, elsiocb); 1371 lpfc_nlp_put(ndlp); 1372 return 1; 1373 } 1374 1375 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1376 1377 /* Clear external loopback plug detected flag */ 1378 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1379 1380 /* Check for a deferred FLOGI ACC condition */ 1381 if (phba->defer_flogi_acc_flag) { 1382 /* lookup ndlp for received FLOGI */ 1383 ndlp = lpfc_findnode_did(vport, 0); 1384 if (!ndlp) 1385 return 0; 1386 1387 did = vport->fc_myDID; 1388 vport->fc_myDID = Fabric_DID; 1389 1390 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1391 1392 if (phba->sli_rev == LPFC_SLI_REV4) { 1393 bf_set(wqe_ctxt_tag, 1394 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1395 phba->defer_flogi_acc_rx_id); 1396 bf_set(wqe_rcvoxid, 1397 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1398 phba->defer_flogi_acc_ox_id); 1399 } else { 1400 icmd = &defer_flogi_acc.iocb; 1401 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1402 icmd->unsli3.rcvsli3.ox_id = 1403 phba->defer_flogi_acc_ox_id; 1404 } 1405 1406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1407 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1408 " ox_id: x%x, hba_flag x%x\n", 1409 phba->defer_flogi_acc_rx_id, 1410 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1411 1412 /* Send deferred FLOGI ACC */ 1413 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1414 ndlp, NULL); 1415 1416 phba->defer_flogi_acc_flag = false; 1417 vport->fc_myDID = did; 1418 1419 /* Decrement ndlp reference count to indicate the node can be 1420 * released when other references are removed. 1421 */ 1422 lpfc_nlp_put(ndlp); 1423 } 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1430 * @phba: pointer to lpfc hba data structure. 1431 * 1432 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1433 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1434 * list and issues an abort IOCB commond on each outstanding IOCB that 1435 * contains a active Fabric_DID ndlp. Note that this function is to issue 1436 * the abort IOCB command on all the outstanding IOCBs, thus when this 1437 * function returns, it does not guarantee all the IOCBs are actually aborted. 1438 * 1439 * Return code 1440 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1441 **/ 1442 int 1443 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1444 { 1445 struct lpfc_sli_ring *pring; 1446 struct lpfc_iocbq *iocb, *next_iocb; 1447 struct lpfc_nodelist *ndlp; 1448 u32 ulp_command; 1449 1450 /* Abort outstanding I/O on NPort <nlp_DID> */ 1451 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1452 "0201 Abort outstanding I/O on NPort x%x\n", 1453 Fabric_DID); 1454 1455 pring = lpfc_phba_elsring(phba); 1456 if (unlikely(!pring)) 1457 return -EIO; 1458 1459 /* 1460 * Check the txcmplq for an iocb that matches the nport the driver is 1461 * searching for. 1462 */ 1463 spin_lock_irq(&phba->hbalock); 1464 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1465 ulp_command = get_job_cmnd(phba, iocb); 1466 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1467 ndlp = iocb->ndlp; 1468 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1469 if ((phba->pport->fc_flag & FC_PT2PT) && 1470 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1471 iocb->fabric_cmd_cmpl = 1472 lpfc_ignore_els_cmpl; 1473 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1474 NULL); 1475 } 1476 } 1477 } 1478 /* Make sure HBA is alive */ 1479 lpfc_issue_hb_tmo(phba); 1480 1481 spin_unlock_irq(&phba->hbalock); 1482 1483 return 0; 1484 } 1485 1486 /** 1487 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1488 * @vport: pointer to a host virtual N_Port data structure. 1489 * 1490 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1494 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1495 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1496 * @vport. 1497 * 1498 * Return code 1499 * 0 - failed to issue initial flogi for @vport 1500 * 1 - successfully issued initial flogi for @vport 1501 **/ 1502 int 1503 lpfc_initial_flogi(struct lpfc_vport *vport) 1504 { 1505 struct lpfc_nodelist *ndlp; 1506 1507 vport->port_state = LPFC_FLOGI; 1508 lpfc_set_disctmo(vport); 1509 1510 /* First look for the Fabric ndlp */ 1511 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1512 if (!ndlp) { 1513 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1514 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1515 if (!ndlp) 1516 return 0; 1517 /* Set the node type */ 1518 ndlp->nlp_type |= NLP_FABRIC; 1519 1520 /* Put ndlp onto node list */ 1521 lpfc_enqueue_node(vport, ndlp); 1522 } 1523 1524 /* Reset the Fabric flag, topology change may have happened */ 1525 vport->fc_flag &= ~FC_FABRIC; 1526 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1527 /* A node reference should be retained while registered with a 1528 * transport or dev-loss-evt work is pending. 1529 * Otherwise, decrement node reference to trigger release. 1530 */ 1531 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1532 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1533 lpfc_nlp_put(ndlp); 1534 return 0; 1535 } 1536 return 1; 1537 } 1538 1539 /** 1540 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1541 * @vport: pointer to a host virtual N_Port data structure. 1542 * 1543 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1544 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1545 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1546 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1547 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1548 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1549 * @vport. 1550 * 1551 * Return code 1552 * 0 - failed to issue initial fdisc for @vport 1553 * 1 - successfully issued initial fdisc for @vport 1554 **/ 1555 int 1556 lpfc_initial_fdisc(struct lpfc_vport *vport) 1557 { 1558 struct lpfc_nodelist *ndlp; 1559 1560 /* First look for the Fabric ndlp */ 1561 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1562 if (!ndlp) { 1563 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1564 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1565 if (!ndlp) 1566 return 0; 1567 1568 /* NPIV is only supported in Fabrics. */ 1569 ndlp->nlp_type |= NLP_FABRIC; 1570 1571 /* Put ndlp onto node list */ 1572 lpfc_enqueue_node(vport, ndlp); 1573 } 1574 1575 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1576 /* A node reference should be retained while registered with a 1577 * transport or dev-loss-evt work is pending. 1578 * Otherwise, decrement node reference to trigger release. 1579 */ 1580 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1581 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1582 lpfc_nlp_put(ndlp); 1583 return 0; 1584 } 1585 return 1; 1586 } 1587 1588 /** 1589 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1590 * @vport: pointer to a host virtual N_Port data structure. 1591 * 1592 * This routine checks whether there are more remaining Port Logins 1593 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1594 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1595 * to issue ELS PLOGIs up to the configured discover threads with the 1596 * @vport (@vport->cfg_discovery_threads). The function also decrement 1597 * the @vport's num_disc_node by 1 if it is not already 0. 1598 **/ 1599 void 1600 lpfc_more_plogi(struct lpfc_vport *vport) 1601 { 1602 if (vport->num_disc_nodes) 1603 vport->num_disc_nodes--; 1604 1605 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1606 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1607 "0232 Continue discovery with %d PLOGIs to go " 1608 "Data: x%x x%x x%x\n", 1609 vport->num_disc_nodes, vport->fc_plogi_cnt, 1610 vport->fc_flag, vport->port_state); 1611 /* Check to see if there are more PLOGIs to be sent */ 1612 if (vport->fc_flag & FC_NLP_MORE) 1613 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1614 lpfc_els_disc_plogi(vport); 1615 1616 return; 1617 } 1618 1619 /** 1620 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1621 * @phba: pointer to lpfc hba data structure. 1622 * @prsp: pointer to response IOCB payload. 1623 * @ndlp: pointer to a node-list data structure. 1624 * 1625 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1626 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1627 * The following cases are considered N_Port confirmed: 1628 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1629 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1630 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1631 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1632 * 1) if there is a node on vport list other than the @ndlp with the same 1633 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1634 * on that node to release the RPI associated with the node; 2) if there is 1635 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1636 * into, a new node shall be allocated (or activated). In either case, the 1637 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1638 * be released and the new_ndlp shall be put on to the vport node list and 1639 * its pointer returned as the confirmed node. 1640 * 1641 * Note that before the @ndlp got "released", the keepDID from not-matching 1642 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1643 * of the @ndlp. This is because the release of @ndlp is actually to put it 1644 * into an inactive state on the vport node list and the vport node list 1645 * management algorithm does not allow two node with a same DID. 1646 * 1647 * Return code 1648 * pointer to the PLOGI N_Port @ndlp 1649 **/ 1650 static struct lpfc_nodelist * 1651 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1652 struct lpfc_nodelist *ndlp) 1653 { 1654 struct lpfc_vport *vport = ndlp->vport; 1655 struct lpfc_nodelist *new_ndlp; 1656 struct serv_parm *sp; 1657 uint8_t name[sizeof(struct lpfc_name)]; 1658 uint32_t keepDID = 0, keep_nlp_flag = 0; 1659 uint32_t keep_new_nlp_flag = 0; 1660 uint16_t keep_nlp_state; 1661 u32 keep_nlp_fc4_type = 0; 1662 struct lpfc_nvme_rport *keep_nrport = NULL; 1663 unsigned long *active_rrqs_xri_bitmap = NULL; 1664 1665 /* Fabric nodes can have the same WWPN so we don't bother searching 1666 * by WWPN. Just return the ndlp that was given to us. 1667 */ 1668 if (ndlp->nlp_type & NLP_FABRIC) 1669 return ndlp; 1670 1671 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1672 memset(name, 0, sizeof(struct lpfc_name)); 1673 1674 /* Now we find out if the NPort we are logging into, matches the WWPN 1675 * we have for that ndlp. If not, we have some work to do. 1676 */ 1677 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1678 1679 /* return immediately if the WWPN matches ndlp */ 1680 if (!new_ndlp || (new_ndlp == ndlp)) 1681 return ndlp; 1682 1683 /* 1684 * Unregister from backend if not done yet. Could have been skipped 1685 * due to ADISC 1686 */ 1687 lpfc_nlp_unreg_node(vport, new_ndlp); 1688 1689 if (phba->sli_rev == LPFC_SLI_REV4) { 1690 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1691 GFP_KERNEL); 1692 if (active_rrqs_xri_bitmap) 1693 memset(active_rrqs_xri_bitmap, 0, 1694 phba->cfg_rrq_xri_bitmap_sz); 1695 } 1696 1697 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1698 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1699 "new_ndlp x%x x%x x%x\n", 1700 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1701 (new_ndlp ? new_ndlp->nlp_DID : 0), 1702 (new_ndlp ? new_ndlp->nlp_flag : 0), 1703 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1704 1705 keepDID = new_ndlp->nlp_DID; 1706 1707 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1708 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1709 phba->cfg_rrq_xri_bitmap_sz); 1710 1711 /* At this point in this routine, we know new_ndlp will be 1712 * returned. however, any previous GID_FTs that were done 1713 * would have updated nlp_fc4_type in ndlp, so we must ensure 1714 * new_ndlp has the right value. 1715 */ 1716 if (vport->fc_flag & FC_FABRIC) { 1717 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1718 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1719 } 1720 1721 lpfc_unreg_rpi(vport, new_ndlp); 1722 new_ndlp->nlp_DID = ndlp->nlp_DID; 1723 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1724 if (phba->sli_rev == LPFC_SLI_REV4) 1725 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1726 ndlp->active_rrqs_xri_bitmap, 1727 phba->cfg_rrq_xri_bitmap_sz); 1728 1729 /* Lock both ndlps */ 1730 spin_lock_irq(&ndlp->lock); 1731 spin_lock_irq(&new_ndlp->lock); 1732 keep_new_nlp_flag = new_ndlp->nlp_flag; 1733 keep_nlp_flag = ndlp->nlp_flag; 1734 new_ndlp->nlp_flag = ndlp->nlp_flag; 1735 1736 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1737 if (keep_new_nlp_flag & NLP_UNREG_INP) 1738 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1739 else 1740 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1741 1742 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1743 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1744 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1745 else 1746 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1747 1748 /* 1749 * Retain the DROPPED flag. This will take care of the init 1750 * refcount when affecting the state change 1751 */ 1752 if (keep_new_nlp_flag & NLP_DROPPED) 1753 new_ndlp->nlp_flag |= NLP_DROPPED; 1754 else 1755 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1756 1757 ndlp->nlp_flag = keep_new_nlp_flag; 1758 1759 /* if ndlp had NLP_UNREG_INP set, keep it */ 1760 if (keep_nlp_flag & NLP_UNREG_INP) 1761 ndlp->nlp_flag |= NLP_UNREG_INP; 1762 else 1763 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1764 1765 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1766 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1767 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1768 else 1769 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1770 1771 /* 1772 * Retain the DROPPED flag. This will take care of the init 1773 * refcount when affecting the state change 1774 */ 1775 if (keep_nlp_flag & NLP_DROPPED) 1776 ndlp->nlp_flag |= NLP_DROPPED; 1777 else 1778 ndlp->nlp_flag &= ~NLP_DROPPED; 1779 1780 spin_unlock_irq(&new_ndlp->lock); 1781 spin_unlock_irq(&ndlp->lock); 1782 1783 /* Set nlp_states accordingly */ 1784 keep_nlp_state = new_ndlp->nlp_state; 1785 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1786 1787 /* interchange the nvme remoteport structs */ 1788 keep_nrport = new_ndlp->nrport; 1789 new_ndlp->nrport = ndlp->nrport; 1790 1791 /* Move this back to NPR state */ 1792 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1793 /* The ndlp doesn't have a portname yet, but does have an 1794 * NPort ID. The new_ndlp portname matches the Rport's 1795 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1796 */ 1797 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1798 "3179 PLOGI confirm NEW: %x %x\n", 1799 new_ndlp->nlp_DID, keepDID); 1800 1801 /* Two ndlps cannot have the same did on the nodelist. 1802 * The KeepDID and keep_nlp_fc4_type need to be swapped 1803 * because ndlp is inflight with no WWPN. 1804 */ 1805 ndlp->nlp_DID = keepDID; 1806 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1807 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1808 if (phba->sli_rev == LPFC_SLI_REV4 && 1809 active_rrqs_xri_bitmap) 1810 memcpy(ndlp->active_rrqs_xri_bitmap, 1811 active_rrqs_xri_bitmap, 1812 phba->cfg_rrq_xri_bitmap_sz); 1813 1814 } else { 1815 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1816 "3180 PLOGI confirm SWAP: %x %x\n", 1817 new_ndlp->nlp_DID, keepDID); 1818 1819 lpfc_unreg_rpi(vport, ndlp); 1820 1821 /* The ndlp and new_ndlp both have WWPNs but are swapping 1822 * NPort Ids and attributes. 1823 */ 1824 ndlp->nlp_DID = keepDID; 1825 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1826 1827 if (phba->sli_rev == LPFC_SLI_REV4 && 1828 active_rrqs_xri_bitmap) 1829 memcpy(ndlp->active_rrqs_xri_bitmap, 1830 active_rrqs_xri_bitmap, 1831 phba->cfg_rrq_xri_bitmap_sz); 1832 1833 /* Since we are switching over to the new_ndlp, 1834 * reset the old ndlp state 1835 */ 1836 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1837 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1838 keep_nlp_state = NLP_STE_NPR_NODE; 1839 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1840 ndlp->nrport = keep_nrport; 1841 } 1842 1843 /* 1844 * If ndlp is not associated with any rport we can drop it here else 1845 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1846 */ 1847 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1848 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1849 1850 if (phba->sli_rev == LPFC_SLI_REV4 && 1851 active_rrqs_xri_bitmap) 1852 mempool_free(active_rrqs_xri_bitmap, 1853 phba->active_rrq_pool); 1854 1855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1856 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1857 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1858 new_ndlp->nlp_fc4_type); 1859 1860 return new_ndlp; 1861 } 1862 1863 /** 1864 * lpfc_end_rscn - Check and handle more rscn for a vport 1865 * @vport: pointer to a host virtual N_Port data structure. 1866 * 1867 * This routine checks whether more Registration State Change 1868 * Notifications (RSCNs) came in while the discovery state machine was in 1869 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1870 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1871 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1872 * handling the RSCNs. 1873 **/ 1874 void 1875 lpfc_end_rscn(struct lpfc_vport *vport) 1876 { 1877 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1878 1879 if (vport->fc_flag & FC_RSCN_MODE) { 1880 /* 1881 * Check to see if more RSCNs came in while we were 1882 * processing this one. 1883 */ 1884 if (vport->fc_rscn_id_cnt || 1885 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1886 lpfc_els_handle_rscn(vport); 1887 else { 1888 spin_lock_irq(shost->host_lock); 1889 vport->fc_flag &= ~FC_RSCN_MODE; 1890 spin_unlock_irq(shost->host_lock); 1891 } 1892 } 1893 } 1894 1895 /** 1896 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1897 * @phba: pointer to lpfc hba data structure. 1898 * @cmdiocb: pointer to lpfc command iocb data structure. 1899 * @rspiocb: pointer to lpfc response iocb data structure. 1900 * 1901 * This routine will call the clear rrq function to free the rrq and 1902 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1903 * exist then the clear_rrq is still called because the rrq needs to 1904 * be freed. 1905 **/ 1906 1907 static void 1908 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1909 struct lpfc_iocbq *rspiocb) 1910 { 1911 struct lpfc_vport *vport = cmdiocb->vport; 1912 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1913 struct lpfc_node_rrq *rrq; 1914 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1915 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1916 1917 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1918 rrq = cmdiocb->context_un.rrq; 1919 cmdiocb->rsp_iocb = rspiocb; 1920 1921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1922 "RRQ cmpl: status:x%x/x%x did:x%x", 1923 ulp_status, ulp_word4, 1924 get_job_els_rsp64_did(phba, cmdiocb)); 1925 1926 1927 /* rrq completes to NPort <nlp_DID> */ 1928 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1929 "2880 RRQ completes to DID x%x " 1930 "Data: x%x x%x x%x x%x x%x\n", 1931 ndlp->nlp_DID, ulp_status, ulp_word4, 1932 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1933 1934 if (ulp_status) { 1935 /* Check for retry */ 1936 /* RRQ failed Don't print the vport to vport rjts */ 1937 if (ulp_status != IOSTAT_LS_RJT || 1938 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1939 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1940 (phba)->pport->cfg_log_verbose & LOG_ELS) 1941 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1942 "2881 RRQ failure DID:%06X Status:" 1943 "x%x/x%x\n", 1944 ndlp->nlp_DID, ulp_status, 1945 ulp_word4); 1946 } 1947 1948 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1949 lpfc_els_free_iocb(phba, cmdiocb); 1950 lpfc_nlp_put(ndlp); 1951 return; 1952 } 1953 /** 1954 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1955 * @phba: pointer to lpfc hba data structure. 1956 * @cmdiocb: pointer to lpfc command iocb data structure. 1957 * @rspiocb: pointer to lpfc response iocb data structure. 1958 * 1959 * This routine is the completion callback function for issuing the Port 1960 * Login (PLOGI) command. For PLOGI completion, there must be an active 1961 * ndlp on the vport node list that matches the remote node ID from the 1962 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1963 * ignored and command IOCB released. The PLOGI response IOCB status is 1964 * checked for error conditions. If there is error status reported, PLOGI 1965 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1966 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1967 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1968 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1969 * there are additional N_Port nodes with the vport that need to perform 1970 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1971 * PLOGIs. 1972 **/ 1973 static void 1974 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1975 struct lpfc_iocbq *rspiocb) 1976 { 1977 struct lpfc_vport *vport = cmdiocb->vport; 1978 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1979 IOCB_t *irsp; 1980 struct lpfc_nodelist *ndlp, *free_ndlp; 1981 struct lpfc_dmabuf *prsp; 1982 int disc; 1983 struct serv_parm *sp = NULL; 1984 u32 ulp_status, ulp_word4, did, iotag; 1985 bool release_node = false; 1986 1987 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1988 cmdiocb->rsp_iocb = rspiocb; 1989 1990 ulp_status = get_job_ulpstatus(phba, rspiocb); 1991 ulp_word4 = get_job_word4(phba, rspiocb); 1992 did = get_job_els_rsp64_did(phba, cmdiocb); 1993 1994 if (phba->sli_rev == LPFC_SLI_REV4) { 1995 iotag = get_wqe_reqtag(cmdiocb); 1996 } else { 1997 irsp = &rspiocb->iocb; 1998 iotag = irsp->ulpIoTag; 1999 } 2000 2001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2002 "PLOGI cmpl: status:x%x/x%x did:x%x", 2003 ulp_status, ulp_word4, did); 2004 2005 ndlp = lpfc_findnode_did(vport, did); 2006 if (!ndlp) { 2007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2008 "0136 PLOGI completes to NPort x%x " 2009 "with no ndlp. Data: x%x x%x x%x\n", 2010 did, ulp_status, ulp_word4, iotag); 2011 goto out_freeiocb; 2012 } 2013 2014 /* Since ndlp can be freed in the disc state machine, note if this node 2015 * is being used during discovery. 2016 */ 2017 spin_lock_irq(&ndlp->lock); 2018 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2019 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2020 spin_unlock_irq(&ndlp->lock); 2021 2022 /* PLOGI completes to NPort <nlp_DID> */ 2023 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2024 "0102 PLOGI completes to NPort x%06x " 2025 "Data: x%x x%x x%x x%x x%x\n", 2026 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2027 ulp_status, ulp_word4, 2028 disc, vport->num_disc_nodes); 2029 2030 /* Check to see if link went down during discovery */ 2031 if (lpfc_els_chk_latt(vport)) { 2032 spin_lock_irq(&ndlp->lock); 2033 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2034 spin_unlock_irq(&ndlp->lock); 2035 goto out; 2036 } 2037 2038 if (ulp_status) { 2039 /* Check for retry */ 2040 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2041 /* ELS command is being retried */ 2042 if (disc) { 2043 spin_lock_irq(&ndlp->lock); 2044 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2045 spin_unlock_irq(&ndlp->lock); 2046 } 2047 goto out; 2048 } 2049 /* PLOGI failed Don't print the vport to vport rjts */ 2050 if (ulp_status != IOSTAT_LS_RJT || 2051 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2052 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2053 (phba)->pport->cfg_log_verbose & LOG_ELS) 2054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2055 "2753 PLOGI failure DID:%06X " 2056 "Status:x%x/x%x\n", 2057 ndlp->nlp_DID, ulp_status, 2058 ulp_word4); 2059 2060 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2061 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2062 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2063 NLP_EVT_CMPL_PLOGI); 2064 2065 /* If a PLOGI collision occurred, the node needs to continue 2066 * with the reglogin process. 2067 */ 2068 spin_lock_irq(&ndlp->lock); 2069 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2070 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2071 spin_unlock_irq(&ndlp->lock); 2072 goto out; 2073 } 2074 2075 /* No PLOGI collision and the node is not registered with the 2076 * scsi or nvme transport. It is no longer an active node. Just 2077 * start the device remove process. 2078 */ 2079 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2080 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2081 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2082 release_node = true; 2083 } 2084 spin_unlock_irq(&ndlp->lock); 2085 2086 if (release_node) 2087 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2088 NLP_EVT_DEVICE_RM); 2089 } else { 2090 /* Good status, call state machine */ 2091 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2092 struct lpfc_dmabuf, list); 2093 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2094 2095 sp = (struct serv_parm *)((u8 *)prsp->virt + 2096 sizeof(u32)); 2097 2098 ndlp->vmid_support = 0; 2099 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2100 (phba->cfg_vmid_priority_tagging && 2101 sp->cmn.priority_tagging)) { 2102 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2103 "4018 app_hdr_support %d tagging %d DID x%x\n", 2104 sp->cmn.app_hdr_support, 2105 sp->cmn.priority_tagging, 2106 ndlp->nlp_DID); 2107 /* if the dest port supports VMID, mark it in ndlp */ 2108 ndlp->vmid_support = 1; 2109 } 2110 2111 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2112 NLP_EVT_CMPL_PLOGI); 2113 } 2114 2115 if (disc && vport->num_disc_nodes) { 2116 /* Check to see if there are more PLOGIs to be sent */ 2117 lpfc_more_plogi(vport); 2118 2119 if (vport->num_disc_nodes == 0) { 2120 spin_lock_irq(shost->host_lock); 2121 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2122 spin_unlock_irq(shost->host_lock); 2123 2124 lpfc_can_disctmo(vport); 2125 lpfc_end_rscn(vport); 2126 } 2127 } 2128 2129 out: 2130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2131 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2132 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2133 2134 out_freeiocb: 2135 /* Release the reference on the original I/O request. */ 2136 free_ndlp = cmdiocb->ndlp; 2137 2138 lpfc_els_free_iocb(phba, cmdiocb); 2139 lpfc_nlp_put(free_ndlp); 2140 return; 2141 } 2142 2143 /** 2144 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2145 * @vport: pointer to a host virtual N_Port data structure. 2146 * @did: destination port identifier. 2147 * @retry: number of retries to the command IOCB. 2148 * 2149 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2150 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2151 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2152 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2153 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2154 * 2155 * Note that the ndlp reference count will be incremented by 1 for holding 2156 * the ndlp and the reference to ndlp will be stored into the ndlp field 2157 * of the IOCB for the completion callback function to the PLOGI ELS command. 2158 * 2159 * Return code 2160 * 0 - Successfully issued a plogi for @vport 2161 * 1 - failed to issue a plogi for @vport 2162 **/ 2163 int 2164 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2165 { 2166 struct lpfc_hba *phba = vport->phba; 2167 struct serv_parm *sp; 2168 struct lpfc_nodelist *ndlp; 2169 struct lpfc_iocbq *elsiocb; 2170 uint8_t *pcmd; 2171 uint16_t cmdsize; 2172 int ret; 2173 2174 ndlp = lpfc_findnode_did(vport, did); 2175 if (!ndlp) 2176 return 1; 2177 2178 /* Defer the processing of the issue PLOGI until after the 2179 * outstanding UNREG_RPI mbox command completes, unless we 2180 * are going offline. This logic does not apply for Fabric DIDs 2181 */ 2182 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2183 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2184 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2185 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2186 "4110 Issue PLOGI x%x deferred " 2187 "on NPort x%x rpi x%x Data: x%px\n", 2188 ndlp->nlp_defer_did, ndlp->nlp_DID, 2189 ndlp->nlp_rpi, ndlp); 2190 2191 /* We can only defer 1st PLOGI */ 2192 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2193 ndlp->nlp_defer_did = did; 2194 return 0; 2195 } 2196 2197 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2198 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2199 ELS_CMD_PLOGI); 2200 if (!elsiocb) 2201 return 1; 2202 2203 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2204 2205 /* For PLOGI request, remainder of payload is service parameters */ 2206 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2207 pcmd += sizeof(uint32_t); 2208 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2209 sp = (struct serv_parm *) pcmd; 2210 2211 /* 2212 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2213 * to device on remote loops work. 2214 */ 2215 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2216 sp->cmn.altBbCredit = 1; 2217 2218 if (sp->cmn.fcphLow < FC_PH_4_3) 2219 sp->cmn.fcphLow = FC_PH_4_3; 2220 2221 if (sp->cmn.fcphHigh < FC_PH3) 2222 sp->cmn.fcphHigh = FC_PH3; 2223 2224 sp->cmn.valid_vendor_ver_level = 0; 2225 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2226 sp->cmn.bbRcvSizeMsb &= 0xF; 2227 2228 /* Check if the destination port supports VMID */ 2229 ndlp->vmid_support = 0; 2230 if (vport->vmid_priority_tagging) 2231 sp->cmn.priority_tagging = 1; 2232 else if (phba->cfg_vmid_app_header && 2233 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2234 sp->cmn.app_hdr_support = 1; 2235 2236 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2237 "Issue PLOGI: did:x%x", 2238 did, 0, 0); 2239 2240 /* If our firmware supports this feature, convey that 2241 * information to the target using the vendor specific field. 2242 */ 2243 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2244 sp->cmn.valid_vendor_ver_level = 1; 2245 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2246 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2247 } 2248 2249 phba->fc_stat.elsXmitPLOGI++; 2250 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2251 2252 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2253 "Issue PLOGI: did:x%x refcnt %d", 2254 did, kref_read(&ndlp->kref), 0); 2255 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2256 if (!elsiocb->ndlp) { 2257 lpfc_els_free_iocb(phba, elsiocb); 2258 return 1; 2259 } 2260 2261 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2262 if (ret) { 2263 lpfc_els_free_iocb(phba, elsiocb); 2264 lpfc_nlp_put(ndlp); 2265 return 1; 2266 } 2267 2268 return 0; 2269 } 2270 2271 /** 2272 * lpfc_cmpl_els_prli - Completion callback function for prli 2273 * @phba: pointer to lpfc hba data structure. 2274 * @cmdiocb: pointer to lpfc command iocb data structure. 2275 * @rspiocb: pointer to lpfc response iocb data structure. 2276 * 2277 * This routine is the completion callback function for a Process Login 2278 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2279 * status. If there is error status reported, PRLI retry shall be attempted 2280 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2281 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2282 * ndlp to mark the PRLI completion. 2283 **/ 2284 static void 2285 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2286 struct lpfc_iocbq *rspiocb) 2287 { 2288 struct lpfc_vport *vport = cmdiocb->vport; 2289 struct lpfc_nodelist *ndlp; 2290 char *mode; 2291 u32 loglevel; 2292 u32 ulp_status; 2293 u32 ulp_word4; 2294 bool release_node = false; 2295 2296 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2297 cmdiocb->rsp_iocb = rspiocb; 2298 2299 ndlp = cmdiocb->ndlp; 2300 2301 ulp_status = get_job_ulpstatus(phba, rspiocb); 2302 ulp_word4 = get_job_word4(phba, rspiocb); 2303 2304 spin_lock_irq(&ndlp->lock); 2305 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2306 2307 /* Driver supports multiple FC4 types. Counters matter. */ 2308 vport->fc_prli_sent--; 2309 ndlp->fc4_prli_sent--; 2310 spin_unlock_irq(&ndlp->lock); 2311 2312 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2313 "PRLI cmpl: status:x%x/x%x did:x%x", 2314 ulp_status, ulp_word4, 2315 ndlp->nlp_DID); 2316 2317 /* PRLI completes to NPort <nlp_DID> */ 2318 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2319 "0103 PRLI completes to NPort x%06x " 2320 "Data: x%x x%x x%x x%x\n", 2321 ndlp->nlp_DID, ulp_status, ulp_word4, 2322 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2323 2324 /* Check to see if link went down during discovery */ 2325 if (lpfc_els_chk_latt(vport)) 2326 goto out; 2327 2328 if (ulp_status) { 2329 /* Check for retry */ 2330 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2331 /* ELS command is being retried */ 2332 goto out; 2333 } 2334 2335 /* If we don't send GFT_ID to Fabric, a PRLI error 2336 * could be expected. 2337 */ 2338 if ((vport->fc_flag & FC_FABRIC) || 2339 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2340 mode = KERN_ERR; 2341 loglevel = LOG_TRACE_EVENT; 2342 } else { 2343 mode = KERN_INFO; 2344 loglevel = LOG_ELS; 2345 } 2346 2347 /* PRLI failed */ 2348 lpfc_printf_vlog(vport, mode, loglevel, 2349 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2350 "data: x%x\n", 2351 ndlp->nlp_DID, ulp_status, 2352 ulp_word4, ndlp->fc4_prli_sent); 2353 2354 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2355 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2356 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2357 NLP_EVT_CMPL_PRLI); 2358 2359 /* 2360 * For P2P topology, retain the node so that PLOGI can be 2361 * attempted on it again. 2362 */ 2363 if (vport->fc_flag & FC_PT2PT) 2364 goto out; 2365 2366 /* As long as this node is not registered with the SCSI 2367 * or NVMe transport and no other PRLIs are outstanding, 2368 * it is no longer an active node. Otherwise devloss 2369 * handles the final cleanup. 2370 */ 2371 spin_lock_irq(&ndlp->lock); 2372 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2373 !ndlp->fc4_prli_sent) { 2374 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2375 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2376 release_node = true; 2377 } 2378 spin_unlock_irq(&ndlp->lock); 2379 2380 if (release_node) 2381 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2382 NLP_EVT_DEVICE_RM); 2383 } else { 2384 /* Good status, call state machine. However, if another 2385 * PRLI is outstanding, don't call the state machine 2386 * because final disposition to Mapped or Unmapped is 2387 * completed there. 2388 */ 2389 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2390 NLP_EVT_CMPL_PRLI); 2391 } 2392 2393 out: 2394 lpfc_els_free_iocb(phba, cmdiocb); 2395 lpfc_nlp_put(ndlp); 2396 return; 2397 } 2398 2399 /** 2400 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2401 * @vport: pointer to a host virtual N_Port data structure. 2402 * @ndlp: pointer to a node-list data structure. 2403 * @retry: number of retries to the command IOCB. 2404 * 2405 * This routine issues a Process Login (PRLI) ELS command for the 2406 * @vport. The PRLI service parameters are set up in the payload of the 2407 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2408 * is put to the IOCB completion callback func field before invoking the 2409 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2410 * 2411 * Note that the ndlp reference count will be incremented by 1 for holding the 2412 * ndlp and the reference to ndlp will be stored into the ndlp field of 2413 * the IOCB for the completion callback function to the PRLI ELS command. 2414 * 2415 * Return code 2416 * 0 - successfully issued prli iocb command for @vport 2417 * 1 - failed to issue prli iocb command for @vport 2418 **/ 2419 int 2420 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2421 uint8_t retry) 2422 { 2423 int rc = 0; 2424 struct lpfc_hba *phba = vport->phba; 2425 PRLI *npr; 2426 struct lpfc_nvme_prli *npr_nvme; 2427 struct lpfc_iocbq *elsiocb; 2428 uint8_t *pcmd; 2429 uint16_t cmdsize; 2430 u32 local_nlp_type, elscmd; 2431 2432 /* 2433 * If we are in RSCN mode, the FC4 types supported from a 2434 * previous GFT_ID command may not be accurate. So, if we 2435 * are a NVME Initiator, always look for the possibility of 2436 * the remote NPort beng a NVME Target. 2437 */ 2438 if (phba->sli_rev == LPFC_SLI_REV4 && 2439 vport->fc_flag & FC_RSCN_MODE && 2440 vport->nvmei_support) 2441 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2442 local_nlp_type = ndlp->nlp_fc4_type; 2443 2444 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2445 * fields here before any of them can complete. 2446 */ 2447 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2448 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2449 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2450 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2451 ndlp->nvme_fb_size = 0; 2452 2453 send_next_prli: 2454 if (local_nlp_type & NLP_FC4_FCP) { 2455 /* Payload is 4 + 16 = 20 x14 bytes. */ 2456 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2457 elscmd = ELS_CMD_PRLI; 2458 } else if (local_nlp_type & NLP_FC4_NVME) { 2459 /* Payload is 4 + 20 = 24 x18 bytes. */ 2460 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2461 elscmd = ELS_CMD_NVMEPRLI; 2462 } else { 2463 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2464 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2465 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2466 return 1; 2467 } 2468 2469 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2470 * FC4 type, implicitly LOGO. 2471 */ 2472 if (phba->sli_rev == LPFC_SLI_REV3 && 2473 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2474 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2475 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2476 ndlp->nlp_type); 2477 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2478 return 1; 2479 } 2480 2481 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2482 ndlp->nlp_DID, elscmd); 2483 if (!elsiocb) 2484 return 1; 2485 2486 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2487 2488 /* For PRLI request, remainder of payload is service parameters */ 2489 memset(pcmd, 0, cmdsize); 2490 2491 if (local_nlp_type & NLP_FC4_FCP) { 2492 /* Remainder of payload is FCP PRLI parameter page. 2493 * Note: this data structure is defined as 2494 * BE/LE in the structure definition so no 2495 * byte swap call is made. 2496 */ 2497 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2498 pcmd += sizeof(uint32_t); 2499 npr = (PRLI *)pcmd; 2500 2501 /* 2502 * If our firmware version is 3.20 or later, 2503 * set the following bits for FC-TAPE support. 2504 */ 2505 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2506 npr->ConfmComplAllowed = 1; 2507 npr->Retry = 1; 2508 npr->TaskRetryIdReq = 1; 2509 } 2510 npr->estabImagePair = 1; 2511 npr->readXferRdyDis = 1; 2512 if (vport->cfg_first_burst_size) 2513 npr->writeXferRdyDis = 1; 2514 2515 /* For FCP support */ 2516 npr->prliType = PRLI_FCP_TYPE; 2517 npr->initiatorFunc = 1; 2518 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2519 2520 /* Remove FCP type - processed. */ 2521 local_nlp_type &= ~NLP_FC4_FCP; 2522 } else if (local_nlp_type & NLP_FC4_NVME) { 2523 /* Remainder of payload is NVME PRLI parameter page. 2524 * This data structure is the newer definition that 2525 * uses bf macros so a byte swap is required. 2526 */ 2527 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2528 pcmd += sizeof(uint32_t); 2529 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2530 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2531 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2532 if (phba->nsler) { 2533 bf_set(prli_nsler, npr_nvme, 1); 2534 bf_set(prli_conf, npr_nvme, 1); 2535 } 2536 2537 /* Only initiators request first burst. */ 2538 if ((phba->cfg_nvme_enable_fb) && 2539 !phba->nvmet_support) 2540 bf_set(prli_fba, npr_nvme, 1); 2541 2542 if (phba->nvmet_support) { 2543 bf_set(prli_tgt, npr_nvme, 1); 2544 bf_set(prli_disc, npr_nvme, 1); 2545 } else { 2546 bf_set(prli_init, npr_nvme, 1); 2547 bf_set(prli_conf, npr_nvme, 1); 2548 } 2549 2550 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2551 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2552 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2553 2554 /* Remove NVME type - processed. */ 2555 local_nlp_type &= ~NLP_FC4_NVME; 2556 } 2557 2558 phba->fc_stat.elsXmitPRLI++; 2559 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2560 2561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2562 "Issue PRLI: did:x%x refcnt %d", 2563 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2564 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2565 if (!elsiocb->ndlp) { 2566 lpfc_els_free_iocb(phba, elsiocb); 2567 return 1; 2568 } 2569 2570 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2571 if (rc == IOCB_ERROR) { 2572 lpfc_els_free_iocb(phba, elsiocb); 2573 lpfc_nlp_put(ndlp); 2574 return 1; 2575 } 2576 2577 /* The vport counters are used for lpfc_scan_finished, but 2578 * the ndlp is used to track outstanding PRLIs for different 2579 * FC4 types. 2580 */ 2581 spin_lock_irq(&ndlp->lock); 2582 ndlp->nlp_flag |= NLP_PRLI_SND; 2583 vport->fc_prli_sent++; 2584 ndlp->fc4_prli_sent++; 2585 spin_unlock_irq(&ndlp->lock); 2586 2587 /* The driver supports 2 FC4 types. Make sure 2588 * a PRLI is issued for all types before exiting. 2589 */ 2590 if (phba->sli_rev == LPFC_SLI_REV4 && 2591 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2592 goto send_next_prli; 2593 else 2594 return 0; 2595 } 2596 2597 /** 2598 * lpfc_rscn_disc - Perform rscn discovery for a vport 2599 * @vport: pointer to a host virtual N_Port data structure. 2600 * 2601 * This routine performs Registration State Change Notification (RSCN) 2602 * discovery for a @vport. If the @vport's node port recovery count is not 2603 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2604 * the nodes that need recovery. If none of the PLOGI were needed through 2605 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2606 * invoked to check and handle possible more RSCN came in during the period 2607 * of processing the current ones. 2608 **/ 2609 static void 2610 lpfc_rscn_disc(struct lpfc_vport *vport) 2611 { 2612 lpfc_can_disctmo(vport); 2613 2614 /* RSCN discovery */ 2615 /* go thru NPR nodes and issue ELS PLOGIs */ 2616 if (vport->fc_npr_cnt) 2617 if (lpfc_els_disc_plogi(vport)) 2618 return; 2619 2620 lpfc_end_rscn(vport); 2621 } 2622 2623 /** 2624 * lpfc_adisc_done - Complete the adisc phase of discovery 2625 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2626 * 2627 * This function is called when the final ADISC is completed during discovery. 2628 * This function handles clearing link attention or issuing reg_vpi depending 2629 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2630 * discovery. 2631 * This function is called with no locks held. 2632 **/ 2633 static void 2634 lpfc_adisc_done(struct lpfc_vport *vport) 2635 { 2636 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2637 struct lpfc_hba *phba = vport->phba; 2638 2639 /* 2640 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2641 * and continue discovery. 2642 */ 2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2644 !(vport->fc_flag & FC_RSCN_MODE) && 2645 (phba->sli_rev < LPFC_SLI_REV4)) { 2646 2647 /* 2648 * If link is down, clear_la and reg_vpi will be done after 2649 * flogi following a link up event 2650 */ 2651 if (!lpfc_is_link_up(phba)) 2652 return; 2653 2654 /* The ADISCs are complete. Doesn't matter if they 2655 * succeeded or failed because the ADISC completion 2656 * routine guarantees to call the state machine and 2657 * the RPI is either unregistered (failed ADISC response) 2658 * or the RPI is still valid and the node is marked 2659 * mapped for a target. The exchanges should be in the 2660 * correct state. This code is specific to SLI3. 2661 */ 2662 lpfc_issue_clear_la(phba, vport); 2663 lpfc_issue_reg_vpi(phba, vport); 2664 return; 2665 } 2666 /* 2667 * For SLI2, we need to set port_state to READY 2668 * and continue discovery. 2669 */ 2670 if (vport->port_state < LPFC_VPORT_READY) { 2671 /* If we get here, there is nothing to ADISC */ 2672 lpfc_issue_clear_la(phba, vport); 2673 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2674 vport->num_disc_nodes = 0; 2675 /* go thru NPR list, issue ELS PLOGIs */ 2676 if (vport->fc_npr_cnt) 2677 lpfc_els_disc_plogi(vport); 2678 if (!vport->num_disc_nodes) { 2679 spin_lock_irq(shost->host_lock); 2680 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2681 spin_unlock_irq(shost->host_lock); 2682 lpfc_can_disctmo(vport); 2683 lpfc_end_rscn(vport); 2684 } 2685 } 2686 vport->port_state = LPFC_VPORT_READY; 2687 } else 2688 lpfc_rscn_disc(vport); 2689 } 2690 2691 /** 2692 * lpfc_more_adisc - Issue more adisc as needed 2693 * @vport: pointer to a host virtual N_Port data structure. 2694 * 2695 * This routine determines whether there are more ndlps on a @vport 2696 * node list need to have Address Discover (ADISC) issued. If so, it will 2697 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2698 * remaining nodes which need to have ADISC sent. 2699 **/ 2700 void 2701 lpfc_more_adisc(struct lpfc_vport *vport) 2702 { 2703 if (vport->num_disc_nodes) 2704 vport->num_disc_nodes--; 2705 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2706 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2707 "0210 Continue discovery with %d ADISCs to go " 2708 "Data: x%x x%x x%x\n", 2709 vport->num_disc_nodes, vport->fc_adisc_cnt, 2710 vport->fc_flag, vport->port_state); 2711 /* Check to see if there are more ADISCs to be sent */ 2712 if (vport->fc_flag & FC_NLP_MORE) { 2713 lpfc_set_disctmo(vport); 2714 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2715 lpfc_els_disc_adisc(vport); 2716 } 2717 if (!vport->num_disc_nodes) 2718 lpfc_adisc_done(vport); 2719 return; 2720 } 2721 2722 /** 2723 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2724 * @phba: pointer to lpfc hba data structure. 2725 * @cmdiocb: pointer to lpfc command iocb data structure. 2726 * @rspiocb: pointer to lpfc response iocb data structure. 2727 * 2728 * This routine is the completion function for issuing the Address Discover 2729 * (ADISC) command. It first checks to see whether link went down during 2730 * the discovery process. If so, the node will be marked as node port 2731 * recovery for issuing discover IOCB by the link attention handler and 2732 * exit. Otherwise, the response status is checked. If error was reported 2733 * in the response status, the ADISC command shall be retried by invoking 2734 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2735 * the response status, the state machine is invoked to set transition 2736 * with respect to NLP_EVT_CMPL_ADISC event. 2737 **/ 2738 static void 2739 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2740 struct lpfc_iocbq *rspiocb) 2741 { 2742 struct lpfc_vport *vport = cmdiocb->vport; 2743 IOCB_t *irsp; 2744 struct lpfc_nodelist *ndlp; 2745 int disc; 2746 u32 ulp_status, ulp_word4, tmo; 2747 bool release_node = false; 2748 2749 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2750 cmdiocb->rsp_iocb = rspiocb; 2751 2752 ndlp = cmdiocb->ndlp; 2753 2754 ulp_status = get_job_ulpstatus(phba, rspiocb); 2755 ulp_word4 = get_job_word4(phba, rspiocb); 2756 2757 if (phba->sli_rev == LPFC_SLI_REV4) { 2758 tmo = get_wqe_tmo(cmdiocb); 2759 } else { 2760 irsp = &rspiocb->iocb; 2761 tmo = irsp->ulpTimeout; 2762 } 2763 2764 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2765 "ADISC cmpl: status:x%x/x%x did:x%x", 2766 ulp_status, ulp_word4, 2767 ndlp->nlp_DID); 2768 2769 /* Since ndlp can be freed in the disc state machine, note if this node 2770 * is being used during discovery. 2771 */ 2772 spin_lock_irq(&ndlp->lock); 2773 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2774 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2775 spin_unlock_irq(&ndlp->lock); 2776 /* ADISC completes to NPort <nlp_DID> */ 2777 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2778 "0104 ADISC completes to NPort x%x " 2779 "Data: x%x x%x x%x x%x x%x\n", 2780 ndlp->nlp_DID, ulp_status, ulp_word4, 2781 tmo, disc, vport->num_disc_nodes); 2782 /* Check to see if link went down during discovery */ 2783 if (lpfc_els_chk_latt(vport)) { 2784 spin_lock_irq(&ndlp->lock); 2785 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2786 spin_unlock_irq(&ndlp->lock); 2787 goto out; 2788 } 2789 2790 if (ulp_status) { 2791 /* Check for retry */ 2792 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2793 /* ELS command is being retried */ 2794 if (disc) { 2795 spin_lock_irq(&ndlp->lock); 2796 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2797 spin_unlock_irq(&ndlp->lock); 2798 lpfc_set_disctmo(vport); 2799 } 2800 goto out; 2801 } 2802 /* ADISC failed */ 2803 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2804 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2805 ndlp->nlp_DID, ulp_status, 2806 ulp_word4); 2807 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2808 NLP_EVT_CMPL_ADISC); 2809 2810 /* As long as this node is not registered with the SCSI or NVMe 2811 * transport, it is no longer an active node. Otherwise 2812 * devloss handles the final cleanup. 2813 */ 2814 spin_lock_irq(&ndlp->lock); 2815 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2816 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2817 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2818 release_node = true; 2819 } 2820 spin_unlock_irq(&ndlp->lock); 2821 2822 if (release_node) 2823 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2824 NLP_EVT_DEVICE_RM); 2825 } else 2826 /* Good status, call state machine */ 2827 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2828 NLP_EVT_CMPL_ADISC); 2829 2830 /* Check to see if there are more ADISCs to be sent */ 2831 if (disc && vport->num_disc_nodes) 2832 lpfc_more_adisc(vport); 2833 out: 2834 lpfc_els_free_iocb(phba, cmdiocb); 2835 lpfc_nlp_put(ndlp); 2836 return; 2837 } 2838 2839 /** 2840 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2841 * @vport: pointer to a virtual N_Port data structure. 2842 * @ndlp: pointer to a node-list data structure. 2843 * @retry: number of retries to the command IOCB. 2844 * 2845 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2846 * @vport. It prepares the payload of the ADISC ELS command, updates the 2847 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2848 * to issue the ADISC ELS command. 2849 * 2850 * Note that the ndlp reference count will be incremented by 1 for holding the 2851 * ndlp and the reference to ndlp will be stored into the ndlp field of 2852 * the IOCB for the completion callback function to the ADISC ELS command. 2853 * 2854 * Return code 2855 * 0 - successfully issued adisc 2856 * 1 - failed to issue adisc 2857 **/ 2858 int 2859 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2860 uint8_t retry) 2861 { 2862 int rc = 0; 2863 struct lpfc_hba *phba = vport->phba; 2864 ADISC *ap; 2865 struct lpfc_iocbq *elsiocb; 2866 uint8_t *pcmd; 2867 uint16_t cmdsize; 2868 2869 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2870 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2871 ndlp->nlp_DID, ELS_CMD_ADISC); 2872 if (!elsiocb) 2873 return 1; 2874 2875 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2876 2877 /* For ADISC request, remainder of payload is service parameters */ 2878 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2879 pcmd += sizeof(uint32_t); 2880 2881 /* Fill in ADISC payload */ 2882 ap = (ADISC *) pcmd; 2883 ap->hardAL_PA = phba->fc_pref_ALPA; 2884 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2885 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2886 ap->DID = be32_to_cpu(vport->fc_myDID); 2887 2888 phba->fc_stat.elsXmitADISC++; 2889 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2890 spin_lock_irq(&ndlp->lock); 2891 ndlp->nlp_flag |= NLP_ADISC_SND; 2892 spin_unlock_irq(&ndlp->lock); 2893 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2894 if (!elsiocb->ndlp) { 2895 lpfc_els_free_iocb(phba, elsiocb); 2896 goto err; 2897 } 2898 2899 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2900 "Issue ADISC: did:x%x refcnt %d", 2901 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2902 2903 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2904 if (rc == IOCB_ERROR) { 2905 lpfc_els_free_iocb(phba, elsiocb); 2906 lpfc_nlp_put(ndlp); 2907 goto err; 2908 } 2909 2910 return 0; 2911 2912 err: 2913 spin_lock_irq(&ndlp->lock); 2914 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2915 spin_unlock_irq(&ndlp->lock); 2916 return 1; 2917 } 2918 2919 /** 2920 * lpfc_cmpl_els_logo - Completion callback function for logo 2921 * @phba: pointer to lpfc hba data structure. 2922 * @cmdiocb: pointer to lpfc command iocb data structure. 2923 * @rspiocb: pointer to lpfc response iocb data structure. 2924 * 2925 * This routine is the completion function for issuing the ELS Logout (LOGO) 2926 * command. If no error status was reported from the LOGO response, the 2927 * state machine of the associated ndlp shall be invoked for transition with 2928 * respect to NLP_EVT_CMPL_LOGO event. 2929 **/ 2930 static void 2931 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2932 struct lpfc_iocbq *rspiocb) 2933 { 2934 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2935 struct lpfc_vport *vport = ndlp->vport; 2936 IOCB_t *irsp; 2937 unsigned long flags; 2938 uint32_t skip_recovery = 0; 2939 int wake_up_waiter = 0; 2940 u32 ulp_status; 2941 u32 ulp_word4; 2942 u32 tmo; 2943 2944 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2945 cmdiocb->rsp_iocb = rspiocb; 2946 2947 ulp_status = get_job_ulpstatus(phba, rspiocb); 2948 ulp_word4 = get_job_word4(phba, rspiocb); 2949 2950 if (phba->sli_rev == LPFC_SLI_REV4) { 2951 tmo = get_wqe_tmo(cmdiocb); 2952 } else { 2953 irsp = &rspiocb->iocb; 2954 tmo = irsp->ulpTimeout; 2955 } 2956 2957 spin_lock_irq(&ndlp->lock); 2958 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2959 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2960 wake_up_waiter = 1; 2961 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2962 } 2963 spin_unlock_irq(&ndlp->lock); 2964 2965 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2966 "LOGO cmpl: status:x%x/x%x did:x%x", 2967 ulp_status, ulp_word4, 2968 ndlp->nlp_DID); 2969 2970 /* LOGO completes to NPort <nlp_DID> */ 2971 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2972 "0105 LOGO completes to NPort x%x " 2973 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2974 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2975 ulp_status, ulp_word4, 2976 tmo, vport->num_disc_nodes); 2977 2978 if (lpfc_els_chk_latt(vport)) { 2979 skip_recovery = 1; 2980 goto out; 2981 } 2982 2983 /* The LOGO will not be retried on failure. A LOGO was 2984 * issued to the remote rport and a ACC or RJT or no Answer are 2985 * all acceptable. Note the failure and move forward with 2986 * discovery. The PLOGI will retry. 2987 */ 2988 if (ulp_status) { 2989 /* LOGO failed */ 2990 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2991 "2756 LOGO failure, No Retry DID:%06X " 2992 "Status:x%x/x%x\n", 2993 ndlp->nlp_DID, ulp_status, 2994 ulp_word4); 2995 2996 if (lpfc_error_lost_link(ulp_status, ulp_word4)) { 2997 skip_recovery = 1; 2998 goto out; 2999 } 3000 } 3001 3002 /* Call state machine. This will unregister the rpi if needed. */ 3003 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3004 3005 /* The driver sets this flag for an NPIV instance that doesn't want to 3006 * log into the remote port. 3007 */ 3008 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3009 spin_lock_irq(&ndlp->lock); 3010 if (phba->sli_rev == LPFC_SLI_REV4) 3011 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3012 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3013 spin_unlock_irq(&ndlp->lock); 3014 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3015 NLP_EVT_DEVICE_RM); 3016 goto out_rsrc_free; 3017 } 3018 3019 out: 3020 /* At this point, the LOGO processing is complete. NOTE: For a 3021 * pt2pt topology, we are assuming the NPortID will only change 3022 * on link up processing. For a LOGO / PLOGI initiated by the 3023 * Initiator, we are assuming the NPortID is not going to change. 3024 */ 3025 3026 if (wake_up_waiter && ndlp->logo_waitq) 3027 wake_up(ndlp->logo_waitq); 3028 /* 3029 * If the node is a target, the handling attempts to recover the port. 3030 * For any other port type, the rpi is unregistered as an implicit 3031 * LOGO. 3032 */ 3033 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3034 skip_recovery == 0) { 3035 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3036 spin_lock_irqsave(&ndlp->lock, flags); 3037 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3038 spin_unlock_irqrestore(&ndlp->lock, flags); 3039 3040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3041 "3187 LOGO completes to NPort x%x: Start " 3042 "Recovery Data: x%x x%x x%x x%x\n", 3043 ndlp->nlp_DID, ulp_status, 3044 ulp_word4, tmo, 3045 vport->num_disc_nodes); 3046 3047 lpfc_els_free_iocb(phba, cmdiocb); 3048 lpfc_nlp_put(ndlp); 3049 3050 lpfc_disc_start(vport); 3051 return; 3052 } 3053 3054 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3055 * driver sends a LOGO to the rport to cleanup. For fabric and 3056 * initiator ports cleanup the node as long as it the node is not 3057 * register with the transport. 3058 */ 3059 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3060 spin_lock_irq(&ndlp->lock); 3061 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3062 spin_unlock_irq(&ndlp->lock); 3063 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3064 NLP_EVT_DEVICE_RM); 3065 } 3066 out_rsrc_free: 3067 /* Driver is done with the I/O. */ 3068 lpfc_els_free_iocb(phba, cmdiocb); 3069 lpfc_nlp_put(ndlp); 3070 } 3071 3072 /** 3073 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3074 * @vport: pointer to a virtual N_Port data structure. 3075 * @ndlp: pointer to a node-list data structure. 3076 * @retry: number of retries to the command IOCB. 3077 * 3078 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3079 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3080 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3081 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3082 * 3083 * Note that the ndlp reference count will be incremented by 1 for holding the 3084 * ndlp and the reference to ndlp will be stored into the ndlp field of 3085 * the IOCB for the completion callback function to the LOGO ELS command. 3086 * 3087 * Callers of this routine are expected to unregister the RPI first 3088 * 3089 * Return code 3090 * 0 - successfully issued logo 3091 * 1 - failed to issue logo 3092 **/ 3093 int 3094 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3095 uint8_t retry) 3096 { 3097 struct lpfc_hba *phba = vport->phba; 3098 struct lpfc_iocbq *elsiocb; 3099 uint8_t *pcmd; 3100 uint16_t cmdsize; 3101 int rc; 3102 3103 spin_lock_irq(&ndlp->lock); 3104 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3105 spin_unlock_irq(&ndlp->lock); 3106 return 0; 3107 } 3108 spin_unlock_irq(&ndlp->lock); 3109 3110 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3112 ndlp->nlp_DID, ELS_CMD_LOGO); 3113 if (!elsiocb) 3114 return 1; 3115 3116 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3117 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3118 pcmd += sizeof(uint32_t); 3119 3120 /* Fill in LOGO payload */ 3121 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3122 pcmd += sizeof(uint32_t); 3123 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3124 3125 phba->fc_stat.elsXmitLOGO++; 3126 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3127 spin_lock_irq(&ndlp->lock); 3128 ndlp->nlp_flag |= NLP_LOGO_SND; 3129 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3130 spin_unlock_irq(&ndlp->lock); 3131 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3132 if (!elsiocb->ndlp) { 3133 lpfc_els_free_iocb(phba, elsiocb); 3134 goto err; 3135 } 3136 3137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3138 "Issue LOGO: did:x%x refcnt %d", 3139 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3140 3141 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3142 if (rc == IOCB_ERROR) { 3143 lpfc_els_free_iocb(phba, elsiocb); 3144 lpfc_nlp_put(ndlp); 3145 goto err; 3146 } 3147 3148 spin_lock_irq(&ndlp->lock); 3149 ndlp->nlp_prev_state = ndlp->nlp_state; 3150 spin_unlock_irq(&ndlp->lock); 3151 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3152 return 0; 3153 3154 err: 3155 spin_lock_irq(&ndlp->lock); 3156 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3157 spin_unlock_irq(&ndlp->lock); 3158 return 1; 3159 } 3160 3161 /** 3162 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3163 * @phba: pointer to lpfc hba data structure. 3164 * @cmdiocb: pointer to lpfc command iocb data structure. 3165 * @rspiocb: pointer to lpfc response iocb data structure. 3166 * 3167 * This routine is a generic completion callback function for ELS commands. 3168 * Specifically, it is the callback function which does not need to perform 3169 * any command specific operations. It is currently used by the ELS command 3170 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3171 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3172 * Other than certain debug loggings, this callback function simply invokes the 3173 * lpfc_els_chk_latt() routine to check whether link went down during the 3174 * discovery process. 3175 **/ 3176 static void 3177 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3178 struct lpfc_iocbq *rspiocb) 3179 { 3180 struct lpfc_vport *vport = cmdiocb->vport; 3181 struct lpfc_nodelist *free_ndlp; 3182 IOCB_t *irsp; 3183 u32 ulp_status, ulp_word4, tmo, did, iotag; 3184 3185 ulp_status = get_job_ulpstatus(phba, rspiocb); 3186 ulp_word4 = get_job_word4(phba, rspiocb); 3187 did = get_job_els_rsp64_did(phba, cmdiocb); 3188 3189 if (phba->sli_rev == LPFC_SLI_REV4) { 3190 tmo = get_wqe_tmo(cmdiocb); 3191 iotag = get_wqe_reqtag(cmdiocb); 3192 } else { 3193 irsp = &rspiocb->iocb; 3194 tmo = irsp->ulpTimeout; 3195 iotag = irsp->ulpIoTag; 3196 } 3197 3198 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3199 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3200 ulp_status, ulp_word4, did); 3201 3202 /* ELS cmd tag <ulpIoTag> completes */ 3203 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3204 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3205 iotag, ulp_status, ulp_word4, tmo); 3206 3207 /* Check to see if link went down during discovery */ 3208 lpfc_els_chk_latt(vport); 3209 3210 free_ndlp = cmdiocb->ndlp; 3211 3212 lpfc_els_free_iocb(phba, cmdiocb); 3213 lpfc_nlp_put(free_ndlp); 3214 } 3215 3216 /** 3217 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3218 * @vport: pointer to lpfc_vport data structure. 3219 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3220 * 3221 * This routine registers the rpi assigned to the fabric controller 3222 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3223 * state triggering a registration with the SCSI transport. 3224 * 3225 * This routine is single out because the fabric controller node 3226 * does not receive a PLOGI. This routine is consumed by the 3227 * SCR and RDF ELS commands. Callers are expected to qualify 3228 * with SLI4 first. 3229 **/ 3230 static int 3231 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3232 { 3233 int rc = 0; 3234 struct lpfc_hba *phba = vport->phba; 3235 struct lpfc_nodelist *ns_ndlp; 3236 LPFC_MBOXQ_t *mbox; 3237 3238 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3239 return rc; 3240 3241 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3242 if (!ns_ndlp) 3243 return -ENODEV; 3244 3245 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3246 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3247 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3248 ns_ndlp->nlp_state); 3249 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3250 return -ENODEV; 3251 3252 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3253 if (!mbox) { 3254 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3255 "0936 %s: no memory for reg_login " 3256 "Data: x%x x%x x%x x%x\n", __func__, 3257 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3258 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3259 return -ENOMEM; 3260 } 3261 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3262 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3263 if (rc) { 3264 rc = -EACCES; 3265 goto out; 3266 } 3267 3268 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3269 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3270 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3271 if (!mbox->ctx_ndlp) { 3272 rc = -ENOMEM; 3273 goto out; 3274 } 3275 3276 mbox->vport = vport; 3277 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3278 if (rc == MBX_NOT_FINISHED) { 3279 rc = -ENODEV; 3280 lpfc_nlp_put(fc_ndlp); 3281 goto out; 3282 } 3283 /* Success path. Exit. */ 3284 lpfc_nlp_set_state(vport, fc_ndlp, 3285 NLP_STE_REG_LOGIN_ISSUE); 3286 return 0; 3287 3288 out: 3289 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3290 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3291 "0938 %s: failed to format reg_login " 3292 "Data: x%x x%x x%x x%x\n", __func__, 3293 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3294 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3295 return rc; 3296 } 3297 3298 /** 3299 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3300 * @phba: pointer to lpfc hba data structure. 3301 * @cmdiocb: pointer to lpfc command iocb data structure. 3302 * @rspiocb: pointer to lpfc response iocb data structure. 3303 * 3304 * This routine is a generic completion callback function for Discovery ELS cmd. 3305 * Currently used by the ELS command issuing routines for the ELS State Change 3306 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3307 * These commands will be retried once only for ELS timeout errors. 3308 **/ 3309 static void 3310 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3311 struct lpfc_iocbq *rspiocb) 3312 { 3313 struct lpfc_vport *vport = cmdiocb->vport; 3314 IOCB_t *irsp; 3315 struct lpfc_els_rdf_rsp *prdf; 3316 struct lpfc_dmabuf *pcmd, *prsp; 3317 u32 *pdata; 3318 u32 cmd; 3319 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3320 u32 ulp_status, ulp_word4, tmo, did, iotag; 3321 3322 ulp_status = get_job_ulpstatus(phba, rspiocb); 3323 ulp_word4 = get_job_word4(phba, rspiocb); 3324 did = get_job_els_rsp64_did(phba, cmdiocb); 3325 3326 if (phba->sli_rev == LPFC_SLI_REV4) { 3327 tmo = get_wqe_tmo(cmdiocb); 3328 iotag = get_wqe_reqtag(cmdiocb); 3329 } else { 3330 irsp = &rspiocb->iocb; 3331 tmo = irsp->ulpTimeout; 3332 iotag = irsp->ulpIoTag; 3333 } 3334 3335 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3336 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3337 ulp_status, ulp_word4, did); 3338 3339 /* ELS cmd tag <ulpIoTag> completes */ 3340 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3341 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3342 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3343 3344 pcmd = cmdiocb->cmd_dmabuf; 3345 if (!pcmd) 3346 goto out; 3347 3348 pdata = (u32 *)pcmd->virt; 3349 if (!pdata) 3350 goto out; 3351 cmd = *pdata; 3352 3353 /* Only 1 retry for ELS Timeout only */ 3354 if (ulp_status == IOSTAT_LOCAL_REJECT && 3355 ((ulp_word4 & IOERR_PARAM_MASK) == 3356 IOERR_SEQUENCE_TIMEOUT)) { 3357 cmdiocb->retry++; 3358 if (cmdiocb->retry <= 1) { 3359 switch (cmd) { 3360 case ELS_CMD_SCR: 3361 lpfc_issue_els_scr(vport, cmdiocb->retry); 3362 break; 3363 case ELS_CMD_EDC: 3364 lpfc_issue_els_edc(vport, cmdiocb->retry); 3365 break; 3366 case ELS_CMD_RDF: 3367 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3368 break; 3369 } 3370 goto out; 3371 } 3372 phba->fc_stat.elsRetryExceeded++; 3373 } 3374 if (cmd == ELS_CMD_EDC) { 3375 /* must be called before checking uplStatus and returning */ 3376 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3377 return; 3378 } 3379 if (ulp_status) { 3380 /* ELS discovery cmd completes with error */ 3381 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3382 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3383 ulp_status, ulp_word4); 3384 goto out; 3385 } 3386 3387 /* The RDF response doesn't have any impact on the running driver 3388 * but the notification descriptors are dumped here for support. 3389 */ 3390 if (cmd == ELS_CMD_RDF) { 3391 int i; 3392 3393 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3394 if (!prsp) 3395 goto out; 3396 3397 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3398 if (!prdf) 3399 goto out; 3400 3401 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3402 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3403 lpfc_printf_vlog(vport, KERN_INFO, 3404 LOG_ELS | LOG_CGN_MGMT, 3405 "4677 Fabric RDF Notification Grant " 3406 "Data: 0x%08x Reg: %x %x\n", 3407 be32_to_cpu( 3408 prdf->reg_d1.desc_tags[i]), 3409 phba->cgn_reg_signal, 3410 phba->cgn_reg_fpin); 3411 } 3412 3413 out: 3414 /* Check to see if link went down during discovery */ 3415 lpfc_els_chk_latt(vport); 3416 lpfc_els_free_iocb(phba, cmdiocb); 3417 lpfc_nlp_put(ndlp); 3418 return; 3419 } 3420 3421 /** 3422 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3423 * @vport: pointer to a host virtual N_Port data structure. 3424 * @retry: retry counter for the command IOCB. 3425 * 3426 * This routine issues a State Change Request (SCR) to a fabric node 3427 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3428 * first search the @vport node list to find the matching ndlp. If no such 3429 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3430 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3431 * routine is invoked to send the SCR IOCB. 3432 * 3433 * Note that the ndlp reference count will be incremented by 1 for holding the 3434 * ndlp and the reference to ndlp will be stored into the ndlp field of 3435 * the IOCB for the completion callback function to the SCR ELS command. 3436 * 3437 * Return code 3438 * 0 - Successfully issued scr command 3439 * 1 - Failed to issue scr command 3440 **/ 3441 int 3442 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3443 { 3444 int rc = 0; 3445 struct lpfc_hba *phba = vport->phba; 3446 struct lpfc_iocbq *elsiocb; 3447 uint8_t *pcmd; 3448 uint16_t cmdsize; 3449 struct lpfc_nodelist *ndlp; 3450 3451 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3452 3453 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3454 if (!ndlp) { 3455 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3456 if (!ndlp) 3457 return 1; 3458 lpfc_enqueue_node(vport, ndlp); 3459 } 3460 3461 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3462 ndlp->nlp_DID, ELS_CMD_SCR); 3463 if (!elsiocb) 3464 return 1; 3465 3466 if (phba->sli_rev == LPFC_SLI_REV4) { 3467 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3468 if (rc) { 3469 lpfc_els_free_iocb(phba, elsiocb); 3470 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3471 "0937 %s: Failed to reg fc node, rc %d\n", 3472 __func__, rc); 3473 return 1; 3474 } 3475 } 3476 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3477 3478 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3479 pcmd += sizeof(uint32_t); 3480 3481 /* For SCR, remainder of payload is SCR parameter page */ 3482 memset(pcmd, 0, sizeof(SCR)); 3483 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3484 3485 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3486 "Issue SCR: did:x%x", 3487 ndlp->nlp_DID, 0, 0); 3488 3489 phba->fc_stat.elsXmitSCR++; 3490 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3491 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3492 if (!elsiocb->ndlp) { 3493 lpfc_els_free_iocb(phba, elsiocb); 3494 return 1; 3495 } 3496 3497 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3498 "Issue SCR: did:x%x refcnt %d", 3499 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3500 3501 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3502 if (rc == IOCB_ERROR) { 3503 lpfc_els_free_iocb(phba, elsiocb); 3504 lpfc_nlp_put(ndlp); 3505 return 1; 3506 } 3507 3508 return 0; 3509 } 3510 3511 /** 3512 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3513 * or the other nport (pt2pt). 3514 * @vport: pointer to a host virtual N_Port data structure. 3515 * @retry: number of retries to the command IOCB. 3516 * 3517 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3518 * when connected to a fabric, or to the remote port when connected 3519 * in point-to-point mode. When sent to the Fabric Controller, it will 3520 * replay the RSCN to registered recipients. 3521 * 3522 * Note that the ndlp reference count will be incremented by 1 for holding the 3523 * ndlp and the reference to ndlp will be stored into the ndlp field of 3524 * the IOCB for the completion callback function to the RSCN ELS command. 3525 * 3526 * Return code 3527 * 0 - Successfully issued RSCN command 3528 * 1 - Failed to issue RSCN command 3529 **/ 3530 int 3531 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3532 { 3533 int rc = 0; 3534 struct lpfc_hba *phba = vport->phba; 3535 struct lpfc_iocbq *elsiocb; 3536 struct lpfc_nodelist *ndlp; 3537 struct { 3538 struct fc_els_rscn rscn; 3539 struct fc_els_rscn_page portid; 3540 } *event; 3541 uint32_t nportid; 3542 uint16_t cmdsize = sizeof(*event); 3543 3544 /* Not supported for private loop */ 3545 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3546 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3547 return 1; 3548 3549 if (vport->fc_flag & FC_PT2PT) { 3550 /* find any mapped nport - that would be the other nport */ 3551 ndlp = lpfc_findnode_mapped(vport); 3552 if (!ndlp) 3553 return 1; 3554 } else { 3555 nportid = FC_FID_FCTRL; 3556 /* find the fabric controller node */ 3557 ndlp = lpfc_findnode_did(vport, nportid); 3558 if (!ndlp) { 3559 /* if one didn't exist, make one */ 3560 ndlp = lpfc_nlp_init(vport, nportid); 3561 if (!ndlp) 3562 return 1; 3563 lpfc_enqueue_node(vport, ndlp); 3564 } 3565 } 3566 3567 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3568 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3569 3570 if (!elsiocb) 3571 return 1; 3572 3573 event = elsiocb->cmd_dmabuf->virt; 3574 3575 event->rscn.rscn_cmd = ELS_RSCN; 3576 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3577 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3578 3579 nportid = vport->fc_myDID; 3580 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3581 event->portid.rscn_page_flags = 0; 3582 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3583 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3584 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3585 3586 phba->fc_stat.elsXmitRSCN++; 3587 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3588 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3589 if (!elsiocb->ndlp) { 3590 lpfc_els_free_iocb(phba, elsiocb); 3591 return 1; 3592 } 3593 3594 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3595 "Issue RSCN: did:x%x", 3596 ndlp->nlp_DID, 0, 0); 3597 3598 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3599 if (rc == IOCB_ERROR) { 3600 lpfc_els_free_iocb(phba, elsiocb); 3601 lpfc_nlp_put(ndlp); 3602 return 1; 3603 } 3604 3605 return 0; 3606 } 3607 3608 /** 3609 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3610 * @vport: pointer to a host virtual N_Port data structure. 3611 * @nportid: N_Port identifier to the remote node. 3612 * @retry: number of retries to the command IOCB. 3613 * 3614 * This routine issues a Fibre Channel Address Resolution Response 3615 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3616 * is passed into the function. It first search the @vport node list to find 3617 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3618 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3619 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3620 * 3621 * Note that the ndlp reference count will be incremented by 1 for holding the 3622 * ndlp and the reference to ndlp will be stored into the ndlp field of 3623 * the IOCB for the completion callback function to the FARPR ELS command. 3624 * 3625 * Return code 3626 * 0 - Successfully issued farpr command 3627 * 1 - Failed to issue farpr command 3628 **/ 3629 static int 3630 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3631 { 3632 int rc = 0; 3633 struct lpfc_hba *phba = vport->phba; 3634 struct lpfc_iocbq *elsiocb; 3635 FARP *fp; 3636 uint8_t *pcmd; 3637 uint32_t *lp; 3638 uint16_t cmdsize; 3639 struct lpfc_nodelist *ondlp; 3640 struct lpfc_nodelist *ndlp; 3641 3642 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3643 3644 ndlp = lpfc_findnode_did(vport, nportid); 3645 if (!ndlp) { 3646 ndlp = lpfc_nlp_init(vport, nportid); 3647 if (!ndlp) 3648 return 1; 3649 lpfc_enqueue_node(vport, ndlp); 3650 } 3651 3652 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3653 ndlp->nlp_DID, ELS_CMD_FARPR); 3654 if (!elsiocb) 3655 return 1; 3656 3657 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3658 3659 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3660 pcmd += sizeof(uint32_t); 3661 3662 /* Fill in FARPR payload */ 3663 fp = (FARP *) (pcmd); 3664 memset(fp, 0, sizeof(FARP)); 3665 lp = (uint32_t *) pcmd; 3666 *lp++ = be32_to_cpu(nportid); 3667 *lp++ = be32_to_cpu(vport->fc_myDID); 3668 fp->Rflags = 0; 3669 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3670 3671 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3672 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3673 ondlp = lpfc_findnode_did(vport, nportid); 3674 if (ondlp) { 3675 memcpy(&fp->OportName, &ondlp->nlp_portname, 3676 sizeof(struct lpfc_name)); 3677 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3678 sizeof(struct lpfc_name)); 3679 } 3680 3681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3682 "Issue FARPR: did:x%x", 3683 ndlp->nlp_DID, 0, 0); 3684 3685 phba->fc_stat.elsXmitFARPR++; 3686 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3687 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3688 if (!elsiocb->ndlp) { 3689 lpfc_els_free_iocb(phba, elsiocb); 3690 return 1; 3691 } 3692 3693 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3694 if (rc == IOCB_ERROR) { 3695 /* The additional lpfc_nlp_put will cause the following 3696 * lpfc_els_free_iocb routine to trigger the release of 3697 * the node. 3698 */ 3699 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_nlp_put(ndlp); 3701 return 1; 3702 } 3703 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3704 * trigger the release of the node. 3705 */ 3706 /* Don't release reference count as RDF is likely outstanding */ 3707 return 0; 3708 } 3709 3710 /** 3711 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3712 * @vport: pointer to a host virtual N_Port data structure. 3713 * @retry: retry counter for the command IOCB. 3714 * 3715 * This routine issues an ELS RDF to the Fabric Controller to register 3716 * for diagnostic functions. 3717 * 3718 * Note that the ndlp reference count will be incremented by 1 for holding the 3719 * ndlp and the reference to ndlp will be stored into the ndlp field of 3720 * the IOCB for the completion callback function to the RDF ELS command. 3721 * 3722 * Return code 3723 * 0 - Successfully issued rdf command 3724 * 1 - Failed to issue rdf command 3725 **/ 3726 int 3727 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3728 { 3729 struct lpfc_hba *phba = vport->phba; 3730 struct lpfc_iocbq *elsiocb; 3731 struct lpfc_els_rdf_req *prdf; 3732 struct lpfc_nodelist *ndlp; 3733 uint16_t cmdsize; 3734 int rc; 3735 3736 cmdsize = sizeof(*prdf); 3737 3738 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3739 if (!ndlp) { 3740 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3741 if (!ndlp) 3742 return -ENODEV; 3743 lpfc_enqueue_node(vport, ndlp); 3744 } 3745 3746 /* RDF ELS is not required on an NPIV VN_Port. */ 3747 if (vport->port_type == LPFC_NPIV_PORT) 3748 return -EACCES; 3749 3750 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3751 ndlp->nlp_DID, ELS_CMD_RDF); 3752 if (!elsiocb) 3753 return -ENOMEM; 3754 3755 /* Configure the payload for the supported FPIN events. */ 3756 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3757 memset(prdf, 0, cmdsize); 3758 prdf->rdf.fpin_cmd = ELS_RDF; 3759 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3760 sizeof(struct fc_els_rdf)); 3761 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3762 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3763 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3764 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3765 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3766 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3767 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3768 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3769 3770 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3771 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3772 ndlp->nlp_DID, phba->cgn_reg_signal, 3773 phba->cgn_reg_fpin); 3774 3775 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3776 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3777 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3778 if (!elsiocb->ndlp) { 3779 lpfc_els_free_iocb(phba, elsiocb); 3780 return -EIO; 3781 } 3782 3783 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3784 "Issue RDF: did:x%x refcnt %d", 3785 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3786 3787 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3788 if (rc == IOCB_ERROR) { 3789 lpfc_els_free_iocb(phba, elsiocb); 3790 lpfc_nlp_put(ndlp); 3791 return -EIO; 3792 } 3793 return 0; 3794 } 3795 3796 /** 3797 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3798 * @vport: pointer to a host virtual N_Port data structure. 3799 * @cmdiocb: pointer to lpfc command iocb data structure. 3800 * @ndlp: pointer to a node-list data structure. 3801 * 3802 * A received RDF implies a possible change to fabric supported diagnostic 3803 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3804 * RDF request to reregister for supported diagnostic functions. 3805 * 3806 * Return code 3807 * 0 - Success 3808 * -EIO - Failed to process received RDF 3809 **/ 3810 static int 3811 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3812 struct lpfc_nodelist *ndlp) 3813 { 3814 /* Send LS_ACC */ 3815 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3816 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3817 "1623 Failed to RDF_ACC from x%x for x%x\n", 3818 ndlp->nlp_DID, vport->fc_myDID); 3819 return -EIO; 3820 } 3821 3822 /* Issue new RDF for reregistering */ 3823 if (lpfc_issue_els_rdf(vport, 0)) { 3824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3825 "2623 Failed to re register RDF for x%x\n", 3826 vport->fc_myDID); 3827 return -EIO; 3828 } 3829 3830 return 0; 3831 } 3832 3833 /** 3834 * lpfc_least_capable_settings - helper function for EDC rsp processing 3835 * @phba: pointer to lpfc hba data structure. 3836 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3837 * 3838 * This helper routine determines the least capable setting for 3839 * congestion signals, signal freq, including scale, from the 3840 * congestion detection descriptor in the EDC rsp. The routine 3841 * sets @phba values in preparation for a set_featues mailbox. 3842 **/ 3843 static void 3844 lpfc_least_capable_settings(struct lpfc_hba *phba, 3845 struct fc_diag_cg_sig_desc *pcgd) 3846 { 3847 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3848 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3849 3850 /* Get rsp signal and frequency capabilities. */ 3851 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3852 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3853 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3854 3855 /* If the Fport does not support signals. Set FPIN only */ 3856 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3857 goto out_no_support; 3858 3859 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3860 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3861 * to milliSeconds. 3862 */ 3863 switch (rsp_sig_freq_scale) { 3864 case EDC_CG_SIGFREQ_SEC: 3865 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3866 break; 3867 case EDC_CG_SIGFREQ_MSEC: 3868 rsp_sig_freq_cyc = 1; 3869 break; 3870 default: 3871 goto out_no_support; 3872 } 3873 3874 /* Convenient shorthand. */ 3875 drv_sig_cap = phba->cgn_reg_signal; 3876 3877 /* Choose the least capable frequency. */ 3878 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3879 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3880 3881 /* Should be some common signals support. Settle on least capable 3882 * signal and adjust FPIN values. Initialize defaults to ease the 3883 * decision. 3884 */ 3885 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3886 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3887 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3888 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3889 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3890 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3891 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3892 } 3893 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3894 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3895 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3896 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3897 } 3898 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3899 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3900 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3901 } 3902 } 3903 3904 /* We are NOT recording signal frequency in congestion info buffer */ 3905 return; 3906 3907 out_no_support: 3908 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3909 phba->cgn_sig_freq = 0; 3910 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3911 } 3912 3913 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3914 FC_LS_TLV_DTAG_INIT); 3915 3916 /** 3917 * lpfc_cmpl_els_edc - Completion callback function for EDC 3918 * @phba: pointer to lpfc hba data structure. 3919 * @cmdiocb: pointer to lpfc command iocb data structure. 3920 * @rspiocb: pointer to lpfc response iocb data structure. 3921 * 3922 * This routine is the completion callback function for issuing the Exchange 3923 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3924 * notify the FPort of its Congestion and Link Fault capabilities. This 3925 * routine parses the FPort's response and decides on the least common 3926 * values applicable to both FPort and NPort for Warnings and Alarms that 3927 * are communicated via hardware signals. 3928 **/ 3929 static void 3930 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3931 struct lpfc_iocbq *rspiocb) 3932 { 3933 IOCB_t *irsp_iocb; 3934 struct fc_els_edc_resp *edc_rsp; 3935 struct fc_tlv_desc *tlv; 3936 struct fc_diag_cg_sig_desc *pcgd; 3937 struct fc_diag_lnkflt_desc *plnkflt; 3938 struct lpfc_dmabuf *pcmd, *prsp; 3939 const char *dtag_nm; 3940 u32 *pdata, dtag; 3941 int desc_cnt = 0, bytes_remain; 3942 bool rcv_cap_desc = false; 3943 struct lpfc_nodelist *ndlp; 3944 u32 ulp_status, ulp_word4, tmo, did, iotag; 3945 3946 ndlp = cmdiocb->ndlp; 3947 3948 ulp_status = get_job_ulpstatus(phba, rspiocb); 3949 ulp_word4 = get_job_word4(phba, rspiocb); 3950 did = get_job_els_rsp64_did(phba, rspiocb); 3951 3952 if (phba->sli_rev == LPFC_SLI_REV4) { 3953 tmo = get_wqe_tmo(rspiocb); 3954 iotag = get_wqe_reqtag(rspiocb); 3955 } else { 3956 irsp_iocb = &rspiocb->iocb; 3957 tmo = irsp_iocb->ulpTimeout; 3958 iotag = irsp_iocb->ulpIoTag; 3959 } 3960 3961 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3962 "EDC cmpl: status:x%x/x%x did:x%x", 3963 ulp_status, ulp_word4, did); 3964 3965 /* ELS cmd tag <ulpIoTag> completes */ 3966 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3967 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3968 iotag, ulp_status, ulp_word4, tmo); 3969 3970 pcmd = cmdiocb->cmd_dmabuf; 3971 if (!pcmd) 3972 goto out; 3973 3974 pdata = (u32 *)pcmd->virt; 3975 if (!pdata) 3976 goto out; 3977 3978 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3979 if (ulp_status) 3980 goto out; 3981 3982 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3983 if (!prsp) 3984 goto out; 3985 3986 edc_rsp = prsp->virt; 3987 if (!edc_rsp) 3988 goto out; 3989 3990 /* ELS cmd tag <ulpIoTag> completes */ 3991 lpfc_printf_log(phba, KERN_INFO, 3992 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 3993 "4676 Fabric EDC Rsp: " 3994 "0x%02x, 0x%08x\n", 3995 edc_rsp->acc_hdr.la_cmd, 3996 be32_to_cpu(edc_rsp->desc_list_len)); 3997 3998 /* 3999 * Payload length in bytes is the response descriptor list 4000 * length minus the 12 bytes of Link Service Request 4001 * Information descriptor in the reply. 4002 */ 4003 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4004 sizeof(struct fc_els_lsri_desc); 4005 if (bytes_remain <= 0) 4006 goto out; 4007 4008 tlv = edc_rsp->desc; 4009 4010 /* 4011 * cycle through EDC diagnostic descriptors to find the 4012 * congestion signaling capability descriptor 4013 */ 4014 while (bytes_remain) { 4015 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4016 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4017 "6461 Truncated TLV hdr on " 4018 "Diagnostic descriptor[%d]\n", 4019 desc_cnt); 4020 goto out; 4021 } 4022 4023 dtag = be32_to_cpu(tlv->desc_tag); 4024 switch (dtag) { 4025 case ELS_DTAG_LNK_FAULT_CAP: 4026 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4027 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4028 sizeof(struct fc_diag_lnkflt_desc)) { 4029 lpfc_printf_log(phba, KERN_WARNING, 4030 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4031 "6462 Truncated Link Fault Diagnostic " 4032 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4033 desc_cnt, bytes_remain, 4034 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4035 sizeof(struct fc_diag_lnkflt_desc)); 4036 goto out; 4037 } 4038 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4039 lpfc_printf_log(phba, KERN_INFO, 4040 LOG_ELS | LOG_LDS_EVENT, 4041 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4042 "0x%08x 0x%08x 0x%08x\n", 4043 be32_to_cpu(plnkflt->desc_tag), 4044 be32_to_cpu(plnkflt->desc_len), 4045 be32_to_cpu( 4046 plnkflt->degrade_activate_threshold), 4047 be32_to_cpu( 4048 plnkflt->degrade_deactivate_threshold), 4049 be32_to_cpu(plnkflt->fec_degrade_interval)); 4050 break; 4051 case ELS_DTAG_CG_SIGNAL_CAP: 4052 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4053 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4054 sizeof(struct fc_diag_cg_sig_desc)) { 4055 lpfc_printf_log( 4056 phba, KERN_WARNING, LOG_CGN_MGMT, 4057 "6463 Truncated Cgn Signal Diagnostic " 4058 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4059 desc_cnt, bytes_remain, 4060 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4061 sizeof(struct fc_diag_cg_sig_desc)); 4062 goto out; 4063 } 4064 4065 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4066 lpfc_printf_log( 4067 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4068 "4616 CGN Desc Data: 0x%08x 0x%08x " 4069 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4070 be32_to_cpu(pcgd->desc_tag), 4071 be32_to_cpu(pcgd->desc_len), 4072 be32_to_cpu(pcgd->xmt_signal_capability), 4073 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4074 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4075 be32_to_cpu(pcgd->rcv_signal_capability), 4076 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4077 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4078 4079 /* Compare driver and Fport capabilities and choose 4080 * least common. 4081 */ 4082 lpfc_least_capable_settings(phba, pcgd); 4083 rcv_cap_desc = true; 4084 break; 4085 default: 4086 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4087 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4088 "4919 unknown Diagnostic " 4089 "Descriptor[%d]: tag x%x (%s)\n", 4090 desc_cnt, dtag, dtag_nm); 4091 } 4092 4093 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4094 tlv = fc_tlv_next_desc(tlv); 4095 desc_cnt++; 4096 } 4097 4098 out: 4099 if (!rcv_cap_desc) { 4100 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4101 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4102 phba->cgn_sig_freq = 0; 4103 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4104 "4202 EDC rsp error - sending RDF " 4105 "for FPIN only.\n"); 4106 } 4107 4108 lpfc_config_cgn_signal(phba); 4109 4110 /* Check to see if link went down during discovery */ 4111 lpfc_els_chk_latt(phba->pport); 4112 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4113 "EDC Cmpl: did:x%x refcnt %d", 4114 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4115 lpfc_els_free_iocb(phba, cmdiocb); 4116 lpfc_nlp_put(ndlp); 4117 } 4118 4119 static void 4120 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4121 { 4122 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4123 4124 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4125 lft->desc_len = cpu_to_be32( 4126 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4127 4128 lft->degrade_activate_threshold = 4129 cpu_to_be32(phba->degrade_activate_threshold); 4130 lft->degrade_deactivate_threshold = 4131 cpu_to_be32(phba->degrade_deactivate_threshold); 4132 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4133 } 4134 4135 static void 4136 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4137 { 4138 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4139 4140 /* We are assuming cgd was zero'ed before calling this routine */ 4141 4142 /* Configure the congestion detection capability */ 4143 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4144 4145 /* Descriptor len doesn't include the tag or len fields. */ 4146 cgd->desc_len = cpu_to_be32( 4147 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4148 4149 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4150 * xmt_signal_frequency.count already set to 0. 4151 * xmt_signal_frequency.units already set to 0. 4152 */ 4153 4154 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4155 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4156 * rcv_signal_frequency.count already set to 0. 4157 * rcv_signal_frequency.units already set to 0. 4158 */ 4159 phba->cgn_sig_freq = 0; 4160 return; 4161 } 4162 switch (phba->cgn_reg_signal) { 4163 case EDC_CG_SIG_WARN_ONLY: 4164 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4165 break; 4166 case EDC_CG_SIG_WARN_ALARM: 4167 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4168 break; 4169 default: 4170 /* rcv_signal_capability left 0 thus no support */ 4171 break; 4172 } 4173 4174 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4175 * the completion we settle on the higher frequency. 4176 */ 4177 cgd->rcv_signal_frequency.count = 4178 cpu_to_be16(lpfc_fabric_cgn_frequency); 4179 cgd->rcv_signal_frequency.units = 4180 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4181 } 4182 4183 static bool 4184 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4185 { 4186 if (!(phba->lmt & LMT_64Gb)) 4187 return false; 4188 if (phba->sli_rev != LPFC_SLI_REV4) 4189 return false; 4190 4191 if (phba->sli4_hba.conf_trunk) { 4192 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4193 return true; 4194 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4195 return true; 4196 } 4197 return false; 4198 } 4199 4200 /** 4201 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4202 * @vport: pointer to a host virtual N_Port data structure. 4203 * @retry: retry counter for the command iocb. 4204 * 4205 * This routine issues an ELS EDC to the F-Port Controller to communicate 4206 * this N_Port's support of hardware signals in its Congestion 4207 * Capabilities Descriptor. 4208 * 4209 * Note: This routine does not check if one or more signals are 4210 * set in the cgn_reg_signal parameter. The caller makes the 4211 * decision to enforce cgn_reg_signal as nonzero or zero depending 4212 * on the conditions. During Fabric requests, the driver 4213 * requires cgn_reg_signals to be nonzero. But a dynamic request 4214 * to set the congestion mode to OFF from Monitor or Manage 4215 * would correctly issue an EDC with no signals enabled to 4216 * turn off switch functionality and then update the FW. 4217 * 4218 * Return code 4219 * 0 - Successfully issued edc command 4220 * 1 - Failed to issue edc command 4221 **/ 4222 int 4223 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4224 { 4225 struct lpfc_hba *phba = vport->phba; 4226 struct lpfc_iocbq *elsiocb; 4227 struct fc_els_edc *edc_req; 4228 struct fc_tlv_desc *tlv; 4229 u16 cmdsize; 4230 struct lpfc_nodelist *ndlp; 4231 u8 *pcmd = NULL; 4232 u32 cgn_desc_size, lft_desc_size; 4233 int rc; 4234 4235 if (vport->port_type == LPFC_NPIV_PORT) 4236 return -EACCES; 4237 4238 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4239 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4240 return -ENODEV; 4241 4242 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4243 sizeof(struct fc_diag_cg_sig_desc) : 0; 4244 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4245 sizeof(struct fc_diag_lnkflt_desc) : 0; 4246 cmdsize = cgn_desc_size + lft_desc_size; 4247 4248 /* Skip EDC if no applicable descriptors */ 4249 if (!cmdsize) 4250 goto try_rdf; 4251 4252 cmdsize += sizeof(struct fc_els_edc); 4253 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4254 ndlp->nlp_DID, ELS_CMD_EDC); 4255 if (!elsiocb) 4256 goto try_rdf; 4257 4258 /* Configure the payload for the supported Diagnostics capabilities. */ 4259 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4260 memset(pcmd, 0, cmdsize); 4261 edc_req = (struct fc_els_edc *)pcmd; 4262 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4263 edc_req->edc_cmd = ELS_EDC; 4264 tlv = edc_req->desc; 4265 4266 if (cgn_desc_size) { 4267 lpfc_format_edc_cgn_desc(phba, tlv); 4268 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4269 tlv = fc_tlv_next_desc(tlv); 4270 } 4271 4272 if (lft_desc_size) 4273 lpfc_format_edc_lft_desc(phba, tlv); 4274 4275 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4276 "4623 Xmit EDC to remote " 4277 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4278 ndlp->nlp_DID, phba->cgn_reg_signal, 4279 phba->cgn_reg_fpin); 4280 4281 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4282 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4283 if (!elsiocb->ndlp) { 4284 lpfc_els_free_iocb(phba, elsiocb); 4285 return -EIO; 4286 } 4287 4288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4289 "Issue EDC: did:x%x refcnt %d", 4290 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4291 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4292 if (rc == IOCB_ERROR) { 4293 /* The additional lpfc_nlp_put will cause the following 4294 * lpfc_els_free_iocb routine to trigger the rlease of 4295 * the node. 4296 */ 4297 lpfc_els_free_iocb(phba, elsiocb); 4298 lpfc_nlp_put(ndlp); 4299 goto try_rdf; 4300 } 4301 return 0; 4302 try_rdf: 4303 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4304 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4305 rc = lpfc_issue_els_rdf(vport, 0); 4306 return rc; 4307 } 4308 4309 /** 4310 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4311 * @vport: pointer to a host virtual N_Port data structure. 4312 * @nlp: pointer to a node-list data structure. 4313 * 4314 * This routine cancels the timer with a delayed IOCB-command retry for 4315 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4316 * removes the ELS retry event if it presents. In addition, if the 4317 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4318 * commands are sent for the @vport's nodes that require issuing discovery 4319 * ADISC. 4320 **/ 4321 void 4322 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4323 { 4324 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4325 struct lpfc_work_evt *evtp; 4326 4327 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4328 return; 4329 spin_lock_irq(&nlp->lock); 4330 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4331 spin_unlock_irq(&nlp->lock); 4332 del_timer_sync(&nlp->nlp_delayfunc); 4333 nlp->nlp_last_elscmd = 0; 4334 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4335 list_del_init(&nlp->els_retry_evt.evt_listp); 4336 /* Decrement nlp reference count held for the delayed retry */ 4337 evtp = &nlp->els_retry_evt; 4338 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4339 } 4340 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4341 spin_lock_irq(&nlp->lock); 4342 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4343 spin_unlock_irq(&nlp->lock); 4344 if (vport->num_disc_nodes) { 4345 if (vport->port_state < LPFC_VPORT_READY) { 4346 /* Check if there are more ADISCs to be sent */ 4347 lpfc_more_adisc(vport); 4348 } else { 4349 /* Check if there are more PLOGIs to be sent */ 4350 lpfc_more_plogi(vport); 4351 if (vport->num_disc_nodes == 0) { 4352 spin_lock_irq(shost->host_lock); 4353 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4354 spin_unlock_irq(shost->host_lock); 4355 lpfc_can_disctmo(vport); 4356 lpfc_end_rscn(vport); 4357 } 4358 } 4359 } 4360 } 4361 return; 4362 } 4363 4364 /** 4365 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4366 * @t: pointer to the timer function associated data (ndlp). 4367 * 4368 * This routine is invoked by the ndlp delayed-function timer to check 4369 * whether there is any pending ELS retry event(s) with the node. If not, it 4370 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4371 * adds the delayed events to the HBA work list and invokes the 4372 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4373 * event. Note that lpfc_nlp_get() is called before posting the event to 4374 * the work list to hold reference count of ndlp so that it guarantees the 4375 * reference to ndlp will still be available when the worker thread gets 4376 * to the event associated with the ndlp. 4377 **/ 4378 void 4379 lpfc_els_retry_delay(struct timer_list *t) 4380 { 4381 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4382 struct lpfc_vport *vport = ndlp->vport; 4383 struct lpfc_hba *phba = vport->phba; 4384 unsigned long flags; 4385 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4386 4387 spin_lock_irqsave(&phba->hbalock, flags); 4388 if (!list_empty(&evtp->evt_listp)) { 4389 spin_unlock_irqrestore(&phba->hbalock, flags); 4390 return; 4391 } 4392 4393 /* We need to hold the node by incrementing the reference 4394 * count until the queued work is done 4395 */ 4396 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4397 if (evtp->evt_arg1) { 4398 evtp->evt = LPFC_EVT_ELS_RETRY; 4399 list_add_tail(&evtp->evt_listp, &phba->work_list); 4400 lpfc_worker_wake_up(phba); 4401 } 4402 spin_unlock_irqrestore(&phba->hbalock, flags); 4403 return; 4404 } 4405 4406 /** 4407 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4408 * @ndlp: pointer to a node-list data structure. 4409 * 4410 * This routine is the worker-thread handler for processing the @ndlp delayed 4411 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4412 * the last ELS command from the associated ndlp and invokes the proper ELS 4413 * function according to the delayed ELS command to retry the command. 4414 **/ 4415 void 4416 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4417 { 4418 struct lpfc_vport *vport = ndlp->vport; 4419 uint32_t cmd, retry; 4420 4421 spin_lock_irq(&ndlp->lock); 4422 cmd = ndlp->nlp_last_elscmd; 4423 ndlp->nlp_last_elscmd = 0; 4424 4425 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4426 spin_unlock_irq(&ndlp->lock); 4427 return; 4428 } 4429 4430 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4431 spin_unlock_irq(&ndlp->lock); 4432 /* 4433 * If a discovery event readded nlp_delayfunc after timer 4434 * firing and before processing the timer, cancel the 4435 * nlp_delayfunc. 4436 */ 4437 del_timer_sync(&ndlp->nlp_delayfunc); 4438 retry = ndlp->nlp_retry; 4439 ndlp->nlp_retry = 0; 4440 4441 switch (cmd) { 4442 case ELS_CMD_FLOGI: 4443 lpfc_issue_els_flogi(vport, ndlp, retry); 4444 break; 4445 case ELS_CMD_PLOGI: 4446 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4447 ndlp->nlp_prev_state = ndlp->nlp_state; 4448 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4449 } 4450 break; 4451 case ELS_CMD_ADISC: 4452 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4453 ndlp->nlp_prev_state = ndlp->nlp_state; 4454 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4455 } 4456 break; 4457 case ELS_CMD_PRLI: 4458 case ELS_CMD_NVMEPRLI: 4459 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4460 ndlp->nlp_prev_state = ndlp->nlp_state; 4461 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4462 } 4463 break; 4464 case ELS_CMD_LOGO: 4465 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4466 ndlp->nlp_prev_state = ndlp->nlp_state; 4467 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4468 } 4469 break; 4470 case ELS_CMD_FDISC: 4471 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4472 lpfc_issue_els_fdisc(vport, ndlp, retry); 4473 break; 4474 } 4475 return; 4476 } 4477 4478 /** 4479 * lpfc_link_reset - Issue link reset 4480 * @vport: pointer to a virtual N_Port data structure. 4481 * 4482 * This routine performs link reset by sending INIT_LINK mailbox command. 4483 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4484 * INIT_LINK mailbox command. 4485 * 4486 * Return code 4487 * 0 - Link reset initiated successfully 4488 * 1 - Failed to initiate link reset 4489 **/ 4490 int 4491 lpfc_link_reset(struct lpfc_vport *vport) 4492 { 4493 struct lpfc_hba *phba = vport->phba; 4494 LPFC_MBOXQ_t *mbox; 4495 uint32_t control; 4496 int rc; 4497 4498 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4499 "2851 Attempt link reset\n"); 4500 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4501 if (!mbox) { 4502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4503 "2852 Failed to allocate mbox memory"); 4504 return 1; 4505 } 4506 4507 /* Enable Link attention interrupts */ 4508 if (phba->sli_rev <= LPFC_SLI_REV3) { 4509 spin_lock_irq(&phba->hbalock); 4510 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4511 control = readl(phba->HCregaddr); 4512 control |= HC_LAINT_ENA; 4513 writel(control, phba->HCregaddr); 4514 readl(phba->HCregaddr); /* flush */ 4515 spin_unlock_irq(&phba->hbalock); 4516 } 4517 4518 lpfc_init_link(phba, mbox, phba->cfg_topology, 4519 phba->cfg_link_speed); 4520 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4521 mbox->vport = vport; 4522 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4523 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4525 "2853 Failed to issue INIT_LINK " 4526 "mbox command, rc:x%x\n", rc); 4527 mempool_free(mbox, phba->mbox_mem_pool); 4528 return 1; 4529 } 4530 4531 return 0; 4532 } 4533 4534 /** 4535 * lpfc_els_retry - Make retry decision on an els command iocb 4536 * @phba: pointer to lpfc hba data structure. 4537 * @cmdiocb: pointer to lpfc command iocb data structure. 4538 * @rspiocb: pointer to lpfc response iocb data structure. 4539 * 4540 * This routine makes a retry decision on an ELS command IOCB, which has 4541 * failed. The following ELS IOCBs use this function for retrying the command 4542 * when previously issued command responsed with error status: FLOGI, PLOGI, 4543 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4544 * returned error status, it makes the decision whether a retry shall be 4545 * issued for the command, and whether a retry shall be made immediately or 4546 * delayed. In the former case, the corresponding ELS command issuing-function 4547 * is called to retry the command. In the later case, the ELS command shall 4548 * be posted to the ndlp delayed event and delayed function timer set to the 4549 * ndlp for the delayed command issusing. 4550 * 4551 * Return code 4552 * 0 - No retry of els command is made 4553 * 1 - Immediate or delayed retry of els command is made 4554 **/ 4555 static int 4556 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4557 struct lpfc_iocbq *rspiocb) 4558 { 4559 struct lpfc_vport *vport = cmdiocb->vport; 4560 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4561 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4562 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4563 uint32_t *elscmd; 4564 struct ls_rjt stat; 4565 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4566 int logerr = 0; 4567 uint32_t cmd = 0; 4568 uint32_t did; 4569 int link_reset = 0, rc; 4570 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4571 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4572 4573 4574 /* Note: cmd_dmabuf may be 0 for internal driver abort 4575 * of delays ELS command. 4576 */ 4577 4578 if (pcmd && pcmd->virt) { 4579 elscmd = (uint32_t *) (pcmd->virt); 4580 cmd = *elscmd++; 4581 } 4582 4583 if (ndlp) 4584 did = ndlp->nlp_DID; 4585 else { 4586 /* We should only hit this case for retrying PLOGI */ 4587 did = get_job_els_rsp64_did(phba, rspiocb); 4588 ndlp = lpfc_findnode_did(vport, did); 4589 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4590 return 0; 4591 } 4592 4593 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4594 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4595 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4596 4597 switch (ulp_status) { 4598 case IOSTAT_FCP_RSP_ERROR: 4599 break; 4600 case IOSTAT_REMOTE_STOP: 4601 if (phba->sli_rev == LPFC_SLI_REV4) { 4602 /* This IO was aborted by the target, we don't 4603 * know the rxid and because we did not send the 4604 * ABTS we cannot generate and RRQ. 4605 */ 4606 lpfc_set_rrq_active(phba, ndlp, 4607 cmdiocb->sli4_lxritag, 0, 0); 4608 } 4609 break; 4610 case IOSTAT_LOCAL_REJECT: 4611 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4612 case IOERR_LOOP_OPEN_FAILURE: 4613 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4614 delay = 1000; 4615 retry = 1; 4616 break; 4617 4618 case IOERR_ILLEGAL_COMMAND: 4619 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4620 "0124 Retry illegal cmd x%x " 4621 "retry:x%x delay:x%x\n", 4622 cmd, cmdiocb->retry, delay); 4623 retry = 1; 4624 /* All command's retry policy */ 4625 maxretry = 8; 4626 if (cmdiocb->retry > 2) 4627 delay = 1000; 4628 break; 4629 4630 case IOERR_NO_RESOURCES: 4631 logerr = 1; /* HBA out of resources */ 4632 retry = 1; 4633 if (cmdiocb->retry > 100) 4634 delay = 100; 4635 maxretry = 250; 4636 break; 4637 4638 case IOERR_ILLEGAL_FRAME: 4639 delay = 100; 4640 retry = 1; 4641 break; 4642 4643 case IOERR_INVALID_RPI: 4644 if (cmd == ELS_CMD_PLOGI && 4645 did == NameServer_DID) { 4646 /* Continue forever if plogi to */ 4647 /* the nameserver fails */ 4648 maxretry = 0; 4649 delay = 100; 4650 } 4651 retry = 1; 4652 break; 4653 4654 case IOERR_SEQUENCE_TIMEOUT: 4655 if (cmd == ELS_CMD_PLOGI && 4656 did == NameServer_DID && 4657 (cmdiocb->retry + 1) == maxretry) { 4658 /* Reset the Link */ 4659 link_reset = 1; 4660 break; 4661 } 4662 retry = 1; 4663 delay = 100; 4664 break; 4665 case IOERR_SLI_ABORTED: 4666 /* Retry ELS PLOGI command? 4667 * Possibly the rport just wasn't ready. 4668 */ 4669 if (cmd == ELS_CMD_PLOGI) { 4670 /* No retry if state change */ 4671 if (ndlp && 4672 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4673 goto out_retry; 4674 retry = 1; 4675 maxretry = 2; 4676 } 4677 break; 4678 } 4679 break; 4680 4681 case IOSTAT_NPORT_RJT: 4682 case IOSTAT_FABRIC_RJT: 4683 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4684 retry = 1; 4685 break; 4686 } 4687 break; 4688 4689 case IOSTAT_NPORT_BSY: 4690 case IOSTAT_FABRIC_BSY: 4691 logerr = 1; /* Fabric / Remote NPort out of resources */ 4692 retry = 1; 4693 break; 4694 4695 case IOSTAT_LS_RJT: 4696 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4697 /* Added for Vendor specifc support 4698 * Just keep retrying for these Rsn / Exp codes 4699 */ 4700 if ((vport->fc_flag & FC_PT2PT) && 4701 cmd == ELS_CMD_NVMEPRLI) { 4702 switch (stat.un.b.lsRjtRsnCode) { 4703 case LSRJT_UNABLE_TPC: 4704 case LSRJT_INVALID_CMD: 4705 case LSRJT_LOGICAL_ERR: 4706 case LSRJT_CMD_UNSUPPORTED: 4707 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4708 "0168 NVME PRLI LS_RJT " 4709 "reason %x port doesn't " 4710 "support NVME, disabling NVME\n", 4711 stat.un.b.lsRjtRsnCode); 4712 retry = 0; 4713 vport->fc_flag |= FC_PT2PT_NO_NVME; 4714 goto out_retry; 4715 } 4716 } 4717 switch (stat.un.b.lsRjtRsnCode) { 4718 case LSRJT_UNABLE_TPC: 4719 /* Special case for PRLI LS_RJTs. Recall that lpfc 4720 * uses a single routine to issue both PRLI FC4 types. 4721 * If the PRLI is rejected because that FC4 type 4722 * isn't really supported, don't retry and cause 4723 * multiple transport registrations. Otherwise, parse 4724 * the reason code/reason code explanation and take the 4725 * appropriate action. 4726 */ 4727 lpfc_printf_vlog(vport, KERN_INFO, 4728 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4729 "0153 ELS cmd x%x LS_RJT by x%x. " 4730 "RsnCode x%x RsnCodeExp x%x\n", 4731 cmd, did, stat.un.b.lsRjtRsnCode, 4732 stat.un.b.lsRjtRsnCodeExp); 4733 4734 switch (stat.un.b.lsRjtRsnCodeExp) { 4735 case LSEXP_CANT_GIVE_DATA: 4736 case LSEXP_CMD_IN_PROGRESS: 4737 if (cmd == ELS_CMD_PLOGI) { 4738 delay = 1000; 4739 maxretry = 48; 4740 } 4741 retry = 1; 4742 break; 4743 case LSEXP_REQ_UNSUPPORTED: 4744 case LSEXP_NO_RSRC_ASSIGN: 4745 /* These explanation codes get no retry. */ 4746 if (cmd == ELS_CMD_PRLI || 4747 cmd == ELS_CMD_NVMEPRLI) 4748 break; 4749 fallthrough; 4750 default: 4751 /* Limit the delay and retry action to a limited 4752 * cmd set. There are other ELS commands where 4753 * a retry is not expected. 4754 */ 4755 if (cmd == ELS_CMD_PLOGI || 4756 cmd == ELS_CMD_PRLI || 4757 cmd == ELS_CMD_NVMEPRLI) { 4758 delay = 1000; 4759 maxretry = lpfc_max_els_tries + 1; 4760 retry = 1; 4761 } 4762 break; 4763 } 4764 4765 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4766 (cmd == ELS_CMD_FDISC) && 4767 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4768 lpfc_printf_vlog(vport, KERN_ERR, 4769 LOG_TRACE_EVENT, 4770 "0125 FDISC Failed (x%x). " 4771 "Fabric out of resources\n", 4772 stat.un.lsRjtError); 4773 lpfc_vport_set_state(vport, 4774 FC_VPORT_NO_FABRIC_RSCS); 4775 } 4776 break; 4777 4778 case LSRJT_LOGICAL_BSY: 4779 if ((cmd == ELS_CMD_PLOGI) || 4780 (cmd == ELS_CMD_PRLI) || 4781 (cmd == ELS_CMD_NVMEPRLI)) { 4782 delay = 1000; 4783 maxretry = 48; 4784 } else if (cmd == ELS_CMD_FDISC) { 4785 /* FDISC retry policy */ 4786 maxretry = 48; 4787 if (cmdiocb->retry >= 32) 4788 delay = 1000; 4789 } 4790 retry = 1; 4791 break; 4792 4793 case LSRJT_LOGICAL_ERR: 4794 /* There are some cases where switches return this 4795 * error when they are not ready and should be returning 4796 * Logical Busy. We should delay every time. 4797 */ 4798 if (cmd == ELS_CMD_FDISC && 4799 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4800 maxretry = 3; 4801 delay = 1000; 4802 retry = 1; 4803 } else if (cmd == ELS_CMD_FLOGI && 4804 stat.un.b.lsRjtRsnCodeExp == 4805 LSEXP_NOTHING_MORE) { 4806 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4807 retry = 1; 4808 lpfc_printf_vlog(vport, KERN_ERR, 4809 LOG_TRACE_EVENT, 4810 "0820 FLOGI Failed (x%x). " 4811 "BBCredit Not Supported\n", 4812 stat.un.lsRjtError); 4813 } 4814 break; 4815 4816 case LSRJT_PROTOCOL_ERR: 4817 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4818 (cmd == ELS_CMD_FDISC) && 4819 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4820 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4821 ) { 4822 lpfc_printf_vlog(vport, KERN_ERR, 4823 LOG_TRACE_EVENT, 4824 "0122 FDISC Failed (x%x). " 4825 "Fabric Detected Bad WWN\n", 4826 stat.un.lsRjtError); 4827 lpfc_vport_set_state(vport, 4828 FC_VPORT_FABRIC_REJ_WWN); 4829 } 4830 break; 4831 case LSRJT_VENDOR_UNIQUE: 4832 if ((stat.un.b.vendorUnique == 0x45) && 4833 (cmd == ELS_CMD_FLOGI)) { 4834 goto out_retry; 4835 } 4836 break; 4837 case LSRJT_CMD_UNSUPPORTED: 4838 /* lpfc nvmet returns this type of LS_RJT when it 4839 * receives an FCP PRLI because lpfc nvmet only 4840 * support NVME. ELS request is terminated for FCP4 4841 * on this rport. 4842 */ 4843 if (stat.un.b.lsRjtRsnCodeExp == 4844 LSEXP_REQ_UNSUPPORTED) { 4845 if (cmd == ELS_CMD_PRLI) 4846 goto out_retry; 4847 } 4848 break; 4849 } 4850 break; 4851 4852 case IOSTAT_INTERMED_RSP: 4853 case IOSTAT_BA_RJT: 4854 break; 4855 4856 default: 4857 break; 4858 } 4859 4860 if (link_reset) { 4861 rc = lpfc_link_reset(vport); 4862 if (rc) { 4863 /* Do not give up. Retry PLOGI one more time and attempt 4864 * link reset if PLOGI fails again. 4865 */ 4866 retry = 1; 4867 delay = 100; 4868 goto out_retry; 4869 } 4870 return 1; 4871 } 4872 4873 if (did == FDMI_DID) 4874 retry = 1; 4875 4876 if ((cmd == ELS_CMD_FLOGI) && 4877 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4878 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4879 /* FLOGI retry policy */ 4880 retry = 1; 4881 /* retry FLOGI forever */ 4882 if (phba->link_flag != LS_LOOPBACK_MODE) 4883 maxretry = 0; 4884 else 4885 maxretry = 2; 4886 4887 if (cmdiocb->retry >= 100) 4888 delay = 5000; 4889 else if (cmdiocb->retry >= 32) 4890 delay = 1000; 4891 } else if ((cmd == ELS_CMD_FDISC) && 4892 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4893 /* retry FDISCs every second up to devloss */ 4894 retry = 1; 4895 maxretry = vport->cfg_devloss_tmo; 4896 delay = 1000; 4897 } 4898 4899 cmdiocb->retry++; 4900 if (maxretry && (cmdiocb->retry >= maxretry)) { 4901 phba->fc_stat.elsRetryExceeded++; 4902 retry = 0; 4903 } 4904 4905 if ((vport->load_flag & FC_UNLOADING) != 0) 4906 retry = 0; 4907 4908 out_retry: 4909 if (retry) { 4910 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4911 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4912 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4913 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4914 "2849 Stop retry ELS command " 4915 "x%x to remote NPORT x%x, " 4916 "Data: x%x x%x\n", cmd, did, 4917 cmdiocb->retry, delay); 4918 return 0; 4919 } 4920 } 4921 4922 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4923 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4924 "0107 Retry ELS command x%x to remote " 4925 "NPORT x%x Data: x%x x%x\n", 4926 cmd, did, cmdiocb->retry, delay); 4927 4928 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4929 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4930 ((ulp_word4 & IOERR_PARAM_MASK) != 4931 IOERR_NO_RESOURCES))) { 4932 /* Don't reset timer for no resources */ 4933 4934 /* If discovery / RSCN timer is running, reset it */ 4935 if (timer_pending(&vport->fc_disctmo) || 4936 (vport->fc_flag & FC_RSCN_MODE)) 4937 lpfc_set_disctmo(vport); 4938 } 4939 4940 phba->fc_stat.elsXmitRetry++; 4941 if (ndlp && delay) { 4942 phba->fc_stat.elsDelayRetry++; 4943 ndlp->nlp_retry = cmdiocb->retry; 4944 4945 /* delay is specified in milliseconds */ 4946 mod_timer(&ndlp->nlp_delayfunc, 4947 jiffies + msecs_to_jiffies(delay)); 4948 spin_lock_irq(&ndlp->lock); 4949 ndlp->nlp_flag |= NLP_DELAY_TMO; 4950 spin_unlock_irq(&ndlp->lock); 4951 4952 ndlp->nlp_prev_state = ndlp->nlp_state; 4953 if ((cmd == ELS_CMD_PRLI) || 4954 (cmd == ELS_CMD_NVMEPRLI)) 4955 lpfc_nlp_set_state(vport, ndlp, 4956 NLP_STE_PRLI_ISSUE); 4957 else if (cmd != ELS_CMD_ADISC) 4958 lpfc_nlp_set_state(vport, ndlp, 4959 NLP_STE_NPR_NODE); 4960 ndlp->nlp_last_elscmd = cmd; 4961 4962 return 1; 4963 } 4964 switch (cmd) { 4965 case ELS_CMD_FLOGI: 4966 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4967 return 1; 4968 case ELS_CMD_FDISC: 4969 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4970 return 1; 4971 case ELS_CMD_PLOGI: 4972 if (ndlp) { 4973 ndlp->nlp_prev_state = ndlp->nlp_state; 4974 lpfc_nlp_set_state(vport, ndlp, 4975 NLP_STE_PLOGI_ISSUE); 4976 } 4977 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4978 return 1; 4979 case ELS_CMD_ADISC: 4980 ndlp->nlp_prev_state = ndlp->nlp_state; 4981 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4982 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4983 return 1; 4984 case ELS_CMD_PRLI: 4985 case ELS_CMD_NVMEPRLI: 4986 ndlp->nlp_prev_state = ndlp->nlp_state; 4987 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4988 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4989 return 1; 4990 case ELS_CMD_LOGO: 4991 ndlp->nlp_prev_state = ndlp->nlp_state; 4992 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4993 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4994 return 1; 4995 } 4996 } 4997 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4998 if (logerr) { 4999 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5000 "0137 No retry ELS command x%x to remote " 5001 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 5002 cmd, did, ulp_status, 5003 ulp_word4); 5004 } 5005 else { 5006 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5007 "0108 No retry ELS command x%x to remote " 5008 "NPORT x%x Retried:%d Error:x%x/%x\n", 5009 cmd, did, cmdiocb->retry, ulp_status, 5010 ulp_word4); 5011 } 5012 return 0; 5013 } 5014 5015 /** 5016 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5017 * @phba: pointer to lpfc hba data structure. 5018 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5019 * 5020 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5021 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5022 * checks to see whether there is a lpfc DMA buffer associated with the 5023 * response of the command IOCB. If so, it will be released before releasing 5024 * the lpfc DMA buffer associated with the IOCB itself. 5025 * 5026 * Return code 5027 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5028 **/ 5029 static int 5030 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5031 { 5032 struct lpfc_dmabuf *buf_ptr; 5033 5034 /* Free the response before processing the command. */ 5035 if (!list_empty(&buf_ptr1->list)) { 5036 list_remove_head(&buf_ptr1->list, buf_ptr, 5037 struct lpfc_dmabuf, 5038 list); 5039 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5040 kfree(buf_ptr); 5041 } 5042 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5043 kfree(buf_ptr1); 5044 return 0; 5045 } 5046 5047 /** 5048 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5049 * @phba: pointer to lpfc hba data structure. 5050 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5051 * 5052 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5053 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5054 * pool. 5055 * 5056 * Return code 5057 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5058 **/ 5059 static int 5060 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5061 { 5062 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5063 kfree(buf_ptr); 5064 return 0; 5065 } 5066 5067 /** 5068 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5069 * @phba: pointer to lpfc hba data structure. 5070 * @elsiocb: pointer to lpfc els command iocb data structure. 5071 * 5072 * This routine frees a command IOCB and its associated resources. The 5073 * command IOCB data structure contains the reference to various associated 5074 * resources, these fields must be set to NULL if the associated reference 5075 * not present: 5076 * cmd_dmabuf - reference to cmd. 5077 * cmd_dmabuf->next - reference to rsp 5078 * rsp_dmabuf - unused 5079 * bpl_dmabuf - reference to bpl 5080 * 5081 * It first properly decrements the reference count held on ndlp for the 5082 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5083 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5084 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5085 * adds the DMA buffer the @phba data structure for the delayed release. 5086 * If reference to the Buffer Pointer List (BPL) is present, the 5087 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5088 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5089 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5090 * 5091 * Return code 5092 * 0 - Success (currently, always return 0) 5093 **/ 5094 int 5095 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5096 { 5097 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5098 5099 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5100 elsiocb->ndlp = NULL; 5101 5102 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5103 if (elsiocb->cmd_dmabuf) { 5104 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5105 /* Firmware could still be in progress of DMAing 5106 * payload, so don't free data buffer till after 5107 * a hbeat. 5108 */ 5109 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5110 buf_ptr = elsiocb->cmd_dmabuf; 5111 elsiocb->cmd_dmabuf = NULL; 5112 if (buf_ptr) { 5113 buf_ptr1 = NULL; 5114 spin_lock_irq(&phba->hbalock); 5115 if (!list_empty(&buf_ptr->list)) { 5116 list_remove_head(&buf_ptr->list, 5117 buf_ptr1, struct lpfc_dmabuf, 5118 list); 5119 INIT_LIST_HEAD(&buf_ptr1->list); 5120 list_add_tail(&buf_ptr1->list, 5121 &phba->elsbuf); 5122 phba->elsbuf_cnt++; 5123 } 5124 INIT_LIST_HEAD(&buf_ptr->list); 5125 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5126 phba->elsbuf_cnt++; 5127 spin_unlock_irq(&phba->hbalock); 5128 } 5129 } else { 5130 buf_ptr1 = elsiocb->cmd_dmabuf; 5131 lpfc_els_free_data(phba, buf_ptr1); 5132 elsiocb->cmd_dmabuf = NULL; 5133 } 5134 } 5135 5136 if (elsiocb->bpl_dmabuf) { 5137 buf_ptr = elsiocb->bpl_dmabuf; 5138 lpfc_els_free_bpl(phba, buf_ptr); 5139 elsiocb->bpl_dmabuf = NULL; 5140 } 5141 lpfc_sli_release_iocbq(phba, elsiocb); 5142 return 0; 5143 } 5144 5145 /** 5146 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5147 * @phba: pointer to lpfc hba data structure. 5148 * @cmdiocb: pointer to lpfc command iocb data structure. 5149 * @rspiocb: pointer to lpfc response iocb data structure. 5150 * 5151 * This routine is the completion callback function to the Logout (LOGO) 5152 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5153 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5154 * release the ndlp if it has the last reference remaining (reference count 5155 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp 5156 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5157 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5158 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5159 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5160 * IOCB data structure. 5161 **/ 5162 static void 5163 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5164 struct lpfc_iocbq *rspiocb) 5165 { 5166 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5167 struct lpfc_vport *vport = cmdiocb->vport; 5168 u32 ulp_status, ulp_word4; 5169 5170 ulp_status = get_job_ulpstatus(phba, rspiocb); 5171 ulp_word4 = get_job_word4(phba, rspiocb); 5172 5173 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5174 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5175 ulp_status, ulp_word4, ndlp->nlp_DID); 5176 /* ACC to LOGO completes to NPort <nlp_DID> */ 5177 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5178 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5179 "Data: x%x x%x x%x\n", 5180 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5181 ndlp->nlp_state, ndlp->nlp_rpi); 5182 5183 /* This clause allows the LOGO ACC to complete and free resources 5184 * for the Fabric Domain Controller. It does deliberately skip 5185 * the unreg_rpi and release rpi because some fabrics send RDP 5186 * requests after logging out from the initiator. 5187 */ 5188 if (ndlp->nlp_type & NLP_FABRIC && 5189 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5190 goto out; 5191 5192 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5193 /* If PLOGI is being retried, PLOGI completion will cleanup the 5194 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5195 * progress on nodes discovered from last RSCN. 5196 */ 5197 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5198 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5199 goto out; 5200 5201 /* NPort Recovery mode or node is just allocated */ 5202 if (!lpfc_nlp_not_used(ndlp)) { 5203 /* A LOGO is completing and the node is in NPR state. 5204 * Just unregister the RPI because the node is still 5205 * required. 5206 */ 5207 lpfc_unreg_rpi(vport, ndlp); 5208 } else { 5209 /* Indicate the node has already released, should 5210 * not reference to it from within lpfc_els_free_iocb. 5211 */ 5212 cmdiocb->ndlp = NULL; 5213 } 5214 } 5215 out: 5216 /* 5217 * The driver received a LOGO from the rport and has ACK'd it. 5218 * At this point, the driver is done so release the IOCB 5219 */ 5220 lpfc_els_free_iocb(phba, cmdiocb); 5221 lpfc_nlp_put(ndlp); 5222 } 5223 5224 /** 5225 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5226 * @phba: pointer to lpfc hba data structure. 5227 * @pmb: pointer to the driver internal queue element for mailbox command. 5228 * 5229 * This routine is the completion callback function for unregister default 5230 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5231 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5232 * decrements the ndlp reference count held for this completion callback 5233 * function. After that, it invokes the lpfc_nlp_not_used() to check 5234 * whether there is only one reference left on the ndlp. If so, it will 5235 * perform one more decrement and trigger the release of the ndlp. 5236 **/ 5237 void 5238 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5239 { 5240 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5241 u32 mbx_flag = pmb->mbox_flag; 5242 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5243 5244 if (ndlp) { 5245 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5246 "0006 rpi x%x DID:%x flg:%x %d x%px " 5247 "mbx_cmd x%x mbx_flag x%x x%px\n", 5248 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5249 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5250 mbx_flag, pmb); 5251 5252 /* This ends the default/temporary RPI cleanup logic for this 5253 * ndlp and the node and rpi needs to be released. Free the rpi 5254 * first on an UNREG_LOGIN and then release the final 5255 * references. 5256 */ 5257 spin_lock_irq(&ndlp->lock); 5258 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5259 if (mbx_cmd == MBX_UNREG_LOGIN) 5260 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5261 spin_unlock_irq(&ndlp->lock); 5262 lpfc_nlp_put(ndlp); 5263 lpfc_drop_node(ndlp->vport, ndlp); 5264 } 5265 5266 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5267 } 5268 5269 /** 5270 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5271 * @phba: pointer to lpfc hba data structure. 5272 * @cmdiocb: pointer to lpfc command iocb data structure. 5273 * @rspiocb: pointer to lpfc response iocb data structure. 5274 * 5275 * This routine is the completion callback function for ELS Response IOCB 5276 * command. In normal case, this callback function just properly sets the 5277 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5278 * field in the command IOCB is not NULL, the referred mailbox command will 5279 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5280 * the IOCB. 5281 **/ 5282 static void 5283 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5284 struct lpfc_iocbq *rspiocb) 5285 { 5286 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5287 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5288 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5289 IOCB_t *irsp; 5290 LPFC_MBOXQ_t *mbox = NULL; 5291 u32 ulp_status, ulp_word4, tmo, did, iotag; 5292 5293 if (!vport) { 5294 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5295 "3177 ELS response failed\n"); 5296 goto out; 5297 } 5298 if (cmdiocb->context_un.mbox) 5299 mbox = cmdiocb->context_un.mbox; 5300 5301 ulp_status = get_job_ulpstatus(phba, rspiocb); 5302 ulp_word4 = get_job_word4(phba, rspiocb); 5303 did = get_job_els_rsp64_did(phba, cmdiocb); 5304 5305 if (phba->sli_rev == LPFC_SLI_REV4) { 5306 tmo = get_wqe_tmo(cmdiocb); 5307 iotag = get_wqe_reqtag(cmdiocb); 5308 } else { 5309 irsp = &rspiocb->iocb; 5310 tmo = irsp->ulpTimeout; 5311 iotag = irsp->ulpIoTag; 5312 } 5313 5314 /* Check to see if link went down during discovery */ 5315 if (!ndlp || lpfc_els_chk_latt(vport)) { 5316 if (mbox) 5317 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5318 goto out; 5319 } 5320 5321 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5322 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5323 ulp_status, ulp_word4, did); 5324 /* ELS response tag <ulpIoTag> completes */ 5325 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5326 "0110 ELS response tag x%x completes " 5327 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5328 iotag, ulp_status, ulp_word4, tmo, 5329 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5330 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5331 if (mbox) { 5332 if (ulp_status == 0 5333 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5334 if (!lpfc_unreg_rpi(vport, ndlp) && 5335 (!(vport->fc_flag & FC_PT2PT))) { 5336 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5337 ndlp->nlp_state == 5338 NLP_STE_REG_LOGIN_ISSUE) { 5339 lpfc_printf_vlog(vport, KERN_INFO, 5340 LOG_DISCOVERY, 5341 "0314 PLOGI recov " 5342 "DID x%x " 5343 "Data: x%x x%x x%x\n", 5344 ndlp->nlp_DID, 5345 ndlp->nlp_state, 5346 ndlp->nlp_rpi, 5347 ndlp->nlp_flag); 5348 goto out_free_mbox; 5349 } 5350 } 5351 5352 /* Increment reference count to ndlp to hold the 5353 * reference to ndlp for the callback function. 5354 */ 5355 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5356 if (!mbox->ctx_ndlp) 5357 goto out_free_mbox; 5358 5359 mbox->vport = vport; 5360 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5361 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5362 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5363 } 5364 else { 5365 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5366 ndlp->nlp_prev_state = ndlp->nlp_state; 5367 lpfc_nlp_set_state(vport, ndlp, 5368 NLP_STE_REG_LOGIN_ISSUE); 5369 } 5370 5371 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5372 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5373 != MBX_NOT_FINISHED) 5374 goto out; 5375 5376 /* Decrement the ndlp reference count we 5377 * set for this failed mailbox command. 5378 */ 5379 lpfc_nlp_put(ndlp); 5380 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5381 5382 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5383 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5384 "0138 ELS rsp: Cannot issue reg_login for x%x " 5385 "Data: x%x x%x x%x\n", 5386 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5387 ndlp->nlp_rpi); 5388 } 5389 out_free_mbox: 5390 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5391 } 5392 out: 5393 if (ndlp && shost) { 5394 spin_lock_irq(&ndlp->lock); 5395 if (mbox) 5396 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5397 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5398 spin_unlock_irq(&ndlp->lock); 5399 } 5400 5401 /* An SLI4 NPIV instance wants to drop the node at this point under 5402 * these conditions and release the RPI. 5403 */ 5404 if (phba->sli_rev == LPFC_SLI_REV4 && 5405 (vport && vport->port_type == LPFC_NPIV_PORT) && 5406 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5407 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5408 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5409 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5410 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5411 spin_lock_irq(&ndlp->lock); 5412 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5413 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5414 spin_unlock_irq(&ndlp->lock); 5415 lpfc_drop_node(vport, ndlp); 5416 } 5417 } 5418 5419 /* Release the originating I/O reference. */ 5420 lpfc_els_free_iocb(phba, cmdiocb); 5421 lpfc_nlp_put(ndlp); 5422 return; 5423 } 5424 5425 /** 5426 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5427 * @vport: pointer to a host virtual N_Port data structure. 5428 * @flag: the els command code to be accepted. 5429 * @oldiocb: pointer to the original lpfc command iocb data structure. 5430 * @ndlp: pointer to a node-list data structure. 5431 * @mbox: pointer to the driver internal queue element for mailbox command. 5432 * 5433 * This routine prepares and issues an Accept (ACC) response IOCB 5434 * command. It uses the @flag to properly set up the IOCB field for the 5435 * specific ACC response command to be issued and invokes the 5436 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5437 * @mbox pointer is passed in, it will be put into the context_un.mbox 5438 * field of the IOCB for the completion callback function to issue the 5439 * mailbox command to the HBA later when callback is invoked. 5440 * 5441 * Note that the ndlp reference count will be incremented by 1 for holding the 5442 * ndlp and the reference to ndlp will be stored into the ndlp field of 5443 * the IOCB for the completion callback function to the corresponding 5444 * response ELS IOCB command. 5445 * 5446 * Return code 5447 * 0 - Successfully issued acc response 5448 * 1 - Failed to issue acc response 5449 **/ 5450 int 5451 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5452 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5453 LPFC_MBOXQ_t *mbox) 5454 { 5455 struct lpfc_hba *phba = vport->phba; 5456 IOCB_t *icmd; 5457 IOCB_t *oldcmd; 5458 union lpfc_wqe128 *wqe; 5459 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5460 struct lpfc_iocbq *elsiocb; 5461 uint8_t *pcmd; 5462 struct serv_parm *sp; 5463 uint16_t cmdsize; 5464 int rc; 5465 ELS_PKT *els_pkt_ptr; 5466 struct fc_els_rdf_resp *rdf_resp; 5467 5468 switch (flag) { 5469 case ELS_CMD_ACC: 5470 cmdsize = sizeof(uint32_t); 5471 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5472 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5473 if (!elsiocb) { 5474 spin_lock_irq(&ndlp->lock); 5475 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5476 spin_unlock_irq(&ndlp->lock); 5477 return 1; 5478 } 5479 5480 if (phba->sli_rev == LPFC_SLI_REV4) { 5481 wqe = &elsiocb->wqe; 5482 /* XRI / rx_id */ 5483 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5484 bf_get(wqe_ctxt_tag, 5485 &oldwqe->xmit_els_rsp.wqe_com)); 5486 5487 /* oxid */ 5488 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5489 bf_get(wqe_rcvoxid, 5490 &oldwqe->xmit_els_rsp.wqe_com)); 5491 } else { 5492 icmd = &elsiocb->iocb; 5493 oldcmd = &oldiocb->iocb; 5494 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5495 icmd->unsli3.rcvsli3.ox_id = 5496 oldcmd->unsli3.rcvsli3.ox_id; 5497 } 5498 5499 pcmd = elsiocb->cmd_dmabuf->virt; 5500 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5501 pcmd += sizeof(uint32_t); 5502 5503 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5504 "Issue ACC: did:x%x flg:x%x", 5505 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5506 break; 5507 case ELS_CMD_FLOGI: 5508 case ELS_CMD_PLOGI: 5509 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5510 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5511 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5512 if (!elsiocb) 5513 return 1; 5514 5515 if (phba->sli_rev == LPFC_SLI_REV4) { 5516 wqe = &elsiocb->wqe; 5517 /* XRI / rx_id */ 5518 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5519 bf_get(wqe_ctxt_tag, 5520 &oldwqe->xmit_els_rsp.wqe_com)); 5521 5522 /* oxid */ 5523 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5524 bf_get(wqe_rcvoxid, 5525 &oldwqe->xmit_els_rsp.wqe_com)); 5526 } else { 5527 icmd = &elsiocb->iocb; 5528 oldcmd = &oldiocb->iocb; 5529 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5530 icmd->unsli3.rcvsli3.ox_id = 5531 oldcmd->unsli3.rcvsli3.ox_id; 5532 } 5533 5534 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5535 5536 if (mbox) 5537 elsiocb->context_un.mbox = mbox; 5538 5539 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5540 pcmd += sizeof(uint32_t); 5541 sp = (struct serv_parm *)pcmd; 5542 5543 if (flag == ELS_CMD_FLOGI) { 5544 /* Copy the received service parameters back */ 5545 memcpy(sp, &phba->fc_fabparam, 5546 sizeof(struct serv_parm)); 5547 5548 /* Clear the F_Port bit */ 5549 sp->cmn.fPort = 0; 5550 5551 /* Mark all class service parameters as invalid */ 5552 sp->cls1.classValid = 0; 5553 sp->cls2.classValid = 0; 5554 sp->cls3.classValid = 0; 5555 sp->cls4.classValid = 0; 5556 5557 /* Copy our worldwide names */ 5558 memcpy(&sp->portName, &vport->fc_sparam.portName, 5559 sizeof(struct lpfc_name)); 5560 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5561 sizeof(struct lpfc_name)); 5562 } else { 5563 memcpy(pcmd, &vport->fc_sparam, 5564 sizeof(struct serv_parm)); 5565 5566 sp->cmn.valid_vendor_ver_level = 0; 5567 memset(sp->un.vendorVersion, 0, 5568 sizeof(sp->un.vendorVersion)); 5569 sp->cmn.bbRcvSizeMsb &= 0xF; 5570 5571 /* If our firmware supports this feature, convey that 5572 * info to the target using the vendor specific field. 5573 */ 5574 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5575 sp->cmn.valid_vendor_ver_level = 1; 5576 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5577 sp->un.vv.flags = 5578 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5579 } 5580 } 5581 5582 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5583 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5584 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5585 break; 5586 case ELS_CMD_PRLO: 5587 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5588 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5589 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5590 if (!elsiocb) 5591 return 1; 5592 5593 if (phba->sli_rev == LPFC_SLI_REV4) { 5594 wqe = &elsiocb->wqe; 5595 /* XRI / rx_id */ 5596 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5597 bf_get(wqe_ctxt_tag, 5598 &oldwqe->xmit_els_rsp.wqe_com)); 5599 5600 /* oxid */ 5601 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5602 bf_get(wqe_rcvoxid, 5603 &oldwqe->xmit_els_rsp.wqe_com)); 5604 } else { 5605 icmd = &elsiocb->iocb; 5606 oldcmd = &oldiocb->iocb; 5607 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5608 icmd->unsli3.rcvsli3.ox_id = 5609 oldcmd->unsli3.rcvsli3.ox_id; 5610 } 5611 5612 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5613 5614 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5615 sizeof(uint32_t) + sizeof(PRLO)); 5616 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5617 els_pkt_ptr = (ELS_PKT *) pcmd; 5618 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5619 5620 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5621 "Issue ACC PRLO: did:x%x flg:x%x", 5622 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5623 break; 5624 case ELS_CMD_RDF: 5625 cmdsize = sizeof(*rdf_resp); 5626 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5627 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5628 if (!elsiocb) 5629 return 1; 5630 5631 if (phba->sli_rev == LPFC_SLI_REV4) { 5632 wqe = &elsiocb->wqe; 5633 /* XRI / rx_id */ 5634 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5635 bf_get(wqe_ctxt_tag, 5636 &oldwqe->xmit_els_rsp.wqe_com)); 5637 5638 /* oxid */ 5639 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5640 bf_get(wqe_rcvoxid, 5641 &oldwqe->xmit_els_rsp.wqe_com)); 5642 } else { 5643 icmd = &elsiocb->iocb; 5644 oldcmd = &oldiocb->iocb; 5645 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5646 icmd->unsli3.rcvsli3.ox_id = 5647 oldcmd->unsli3.rcvsli3.ox_id; 5648 } 5649 5650 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5651 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5652 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5653 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5654 5655 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5656 rdf_resp->desc_list_len = cpu_to_be32(12); 5657 5658 /* FC-LS-5 specifies LS REQ Information descriptor */ 5659 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5660 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5661 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5662 break; 5663 default: 5664 return 1; 5665 } 5666 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5667 spin_lock_irq(&ndlp->lock); 5668 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5669 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5670 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5671 spin_unlock_irq(&ndlp->lock); 5672 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5673 } else { 5674 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5675 } 5676 5677 phba->fc_stat.elsXmitACC++; 5678 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5679 if (!elsiocb->ndlp) { 5680 lpfc_els_free_iocb(phba, elsiocb); 5681 return 1; 5682 } 5683 5684 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5685 if (rc == IOCB_ERROR) { 5686 lpfc_els_free_iocb(phba, elsiocb); 5687 lpfc_nlp_put(ndlp); 5688 return 1; 5689 } 5690 5691 /* Xmit ELS ACC response tag <ulpIoTag> */ 5692 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5693 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5694 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5695 "RPI: x%x, fc_flag x%x refcnt %d\n", 5696 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5697 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5698 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5699 return 0; 5700 } 5701 5702 /** 5703 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5704 * @vport: pointer to a virtual N_Port data structure. 5705 * @rejectError: reject response to issue 5706 * @oldiocb: pointer to the original lpfc command iocb data structure. 5707 * @ndlp: pointer to a node-list data structure. 5708 * @mbox: pointer to the driver internal queue element for mailbox command. 5709 * 5710 * This routine prepares and issue an Reject (RJT) response IOCB 5711 * command. If a @mbox pointer is passed in, it will be put into the 5712 * context_un.mbox field of the IOCB for the completion callback function 5713 * to issue to the HBA later. 5714 * 5715 * Note that the ndlp reference count will be incremented by 1 for holding the 5716 * ndlp and the reference to ndlp will be stored into the ndlp field of 5717 * the IOCB for the completion callback function to the reject response 5718 * ELS IOCB command. 5719 * 5720 * Return code 5721 * 0 - Successfully issued reject response 5722 * 1 - Failed to issue reject response 5723 **/ 5724 int 5725 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5726 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5727 LPFC_MBOXQ_t *mbox) 5728 { 5729 int rc; 5730 struct lpfc_hba *phba = vport->phba; 5731 IOCB_t *icmd; 5732 IOCB_t *oldcmd; 5733 union lpfc_wqe128 *wqe; 5734 struct lpfc_iocbq *elsiocb; 5735 uint8_t *pcmd; 5736 uint16_t cmdsize; 5737 5738 cmdsize = 2 * sizeof(uint32_t); 5739 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5740 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5741 if (!elsiocb) 5742 return 1; 5743 5744 if (phba->sli_rev == LPFC_SLI_REV4) { 5745 wqe = &elsiocb->wqe; 5746 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5747 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5748 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5749 get_job_rcvoxid(phba, oldiocb)); 5750 } else { 5751 icmd = &elsiocb->iocb; 5752 oldcmd = &oldiocb->iocb; 5753 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5754 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5755 } 5756 5757 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5758 5759 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5760 pcmd += sizeof(uint32_t); 5761 *((uint32_t *) (pcmd)) = rejectError; 5762 5763 if (mbox) 5764 elsiocb->context_un.mbox = mbox; 5765 5766 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5767 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5768 "0129 Xmit ELS RJT x%x response tag x%x " 5769 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5770 "rpi x%x\n", 5771 rejectError, elsiocb->iotag, 5772 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5773 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5774 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5775 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5776 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5777 5778 phba->fc_stat.elsXmitLSRJT++; 5779 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5780 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5781 if (!elsiocb->ndlp) { 5782 lpfc_els_free_iocb(phba, elsiocb); 5783 return 1; 5784 } 5785 5786 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5787 * node's assigned RPI gets released provided this node is not already 5788 * registered with the transport. 5789 */ 5790 if (phba->sli_rev == LPFC_SLI_REV4 && 5791 vport->port_type == LPFC_NPIV_PORT && 5792 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5793 spin_lock_irq(&ndlp->lock); 5794 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5795 spin_unlock_irq(&ndlp->lock); 5796 } 5797 5798 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5799 if (rc == IOCB_ERROR) { 5800 lpfc_els_free_iocb(phba, elsiocb); 5801 lpfc_nlp_put(ndlp); 5802 return 1; 5803 } 5804 5805 return 0; 5806 } 5807 5808 /** 5809 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5810 * @vport: pointer to a host virtual N_Port data structure. 5811 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5812 * @ndlp: NPort to where rsp is directed 5813 * 5814 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5815 * this N_Port's support of hardware signals in its Congestion 5816 * Capabilities Descriptor. 5817 * 5818 * Return code 5819 * 0 - Successfully issued edc rsp command 5820 * 1 - Failed to issue edc rsp command 5821 **/ 5822 static int 5823 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5824 struct lpfc_nodelist *ndlp) 5825 { 5826 struct lpfc_hba *phba = vport->phba; 5827 struct fc_els_edc_resp *edc_rsp; 5828 struct fc_tlv_desc *tlv; 5829 struct lpfc_iocbq *elsiocb; 5830 IOCB_t *icmd, *cmd; 5831 union lpfc_wqe128 *wqe; 5832 u32 cgn_desc_size, lft_desc_size; 5833 u16 cmdsize; 5834 uint8_t *pcmd; 5835 int rc; 5836 5837 cmdsize = sizeof(struct fc_els_edc_resp); 5838 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5839 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5840 sizeof(struct fc_diag_lnkflt_desc) : 0; 5841 cmdsize += cgn_desc_size + lft_desc_size; 5842 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5843 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5844 if (!elsiocb) 5845 return 1; 5846 5847 if (phba->sli_rev == LPFC_SLI_REV4) { 5848 wqe = &elsiocb->wqe; 5849 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5850 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5851 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5852 get_job_rcvoxid(phba, cmdiocb)); 5853 } else { 5854 icmd = &elsiocb->iocb; 5855 cmd = &cmdiocb->iocb; 5856 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5857 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5858 } 5859 5860 pcmd = elsiocb->cmd_dmabuf->virt; 5861 memset(pcmd, 0, cmdsize); 5862 5863 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5864 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5865 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5866 cgn_desc_size + lft_desc_size); 5867 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5868 edc_rsp->lsri.desc_len = cpu_to_be32( 5869 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5870 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5871 tlv = edc_rsp->desc; 5872 lpfc_format_edc_cgn_desc(phba, tlv); 5873 tlv = fc_tlv_next_desc(tlv); 5874 if (lft_desc_size) 5875 lpfc_format_edc_lft_desc(phba, tlv); 5876 5877 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5878 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5879 ndlp->nlp_DID, ndlp->nlp_flag, 5880 kref_read(&ndlp->kref)); 5881 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5882 5883 phba->fc_stat.elsXmitACC++; 5884 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5885 if (!elsiocb->ndlp) { 5886 lpfc_els_free_iocb(phba, elsiocb); 5887 return 1; 5888 } 5889 5890 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5891 if (rc == IOCB_ERROR) { 5892 lpfc_els_free_iocb(phba, elsiocb); 5893 lpfc_nlp_put(ndlp); 5894 return 1; 5895 } 5896 5897 /* Xmit ELS ACC response tag <ulpIoTag> */ 5898 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5899 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5900 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5901 "RPI: x%x, fc_flag x%x\n", 5902 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5903 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5904 ndlp->nlp_rpi, vport->fc_flag); 5905 5906 return 0; 5907 } 5908 5909 /** 5910 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5911 * @vport: pointer to a virtual N_Port data structure. 5912 * @oldiocb: pointer to the original lpfc command iocb data structure. 5913 * @ndlp: pointer to a node-list data structure. 5914 * 5915 * This routine prepares and issues an Accept (ACC) response to Address 5916 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5917 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5918 * 5919 * Note that the ndlp reference count will be incremented by 1 for holding the 5920 * ndlp and the reference to ndlp will be stored into the ndlp field of 5921 * the IOCB for the completion callback function to the ADISC Accept response 5922 * ELS IOCB command. 5923 * 5924 * Return code 5925 * 0 - Successfully issued acc adisc response 5926 * 1 - Failed to issue adisc acc response 5927 **/ 5928 int 5929 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5930 struct lpfc_nodelist *ndlp) 5931 { 5932 struct lpfc_hba *phba = vport->phba; 5933 ADISC *ap; 5934 IOCB_t *icmd, *oldcmd; 5935 union lpfc_wqe128 *wqe; 5936 struct lpfc_iocbq *elsiocb; 5937 uint8_t *pcmd; 5938 uint16_t cmdsize; 5939 int rc; 5940 u32 ulp_context; 5941 5942 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5943 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5944 ndlp->nlp_DID, ELS_CMD_ACC); 5945 if (!elsiocb) 5946 return 1; 5947 5948 if (phba->sli_rev == LPFC_SLI_REV4) { 5949 wqe = &elsiocb->wqe; 5950 /* XRI / rx_id */ 5951 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5952 get_job_ulpcontext(phba, oldiocb)); 5953 ulp_context = get_job_ulpcontext(phba, elsiocb); 5954 /* oxid */ 5955 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5956 get_job_rcvoxid(phba, oldiocb)); 5957 } else { 5958 icmd = &elsiocb->iocb; 5959 oldcmd = &oldiocb->iocb; 5960 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5961 ulp_context = elsiocb->iocb.ulpContext; 5962 icmd->unsli3.rcvsli3.ox_id = 5963 oldcmd->unsli3.rcvsli3.ox_id; 5964 } 5965 5966 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5967 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5968 "0130 Xmit ADISC ACC response iotag x%x xri: " 5969 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5970 elsiocb->iotag, ulp_context, 5971 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5972 ndlp->nlp_rpi); 5973 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5974 5975 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5976 pcmd += sizeof(uint32_t); 5977 5978 ap = (ADISC *) (pcmd); 5979 ap->hardAL_PA = phba->fc_pref_ALPA; 5980 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5981 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5982 ap->DID = be32_to_cpu(vport->fc_myDID); 5983 5984 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5985 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5986 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5987 5988 phba->fc_stat.elsXmitACC++; 5989 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5990 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5991 if (!elsiocb->ndlp) { 5992 lpfc_els_free_iocb(phba, elsiocb); 5993 return 1; 5994 } 5995 5996 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5997 if (rc == IOCB_ERROR) { 5998 lpfc_els_free_iocb(phba, elsiocb); 5999 lpfc_nlp_put(ndlp); 6000 return 1; 6001 } 6002 6003 return 0; 6004 } 6005 6006 /** 6007 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6008 * @vport: pointer to a virtual N_Port data structure. 6009 * @oldiocb: pointer to the original lpfc command iocb data structure. 6010 * @ndlp: pointer to a node-list data structure. 6011 * 6012 * This routine prepares and issues an Accept (ACC) response to Process 6013 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6014 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6015 * 6016 * Note that the ndlp reference count will be incremented by 1 for holding the 6017 * ndlp and the reference to ndlp will be stored into the ndlp field of 6018 * the IOCB for the completion callback function to the PRLI Accept response 6019 * ELS IOCB command. 6020 * 6021 * Return code 6022 * 0 - Successfully issued acc prli response 6023 * 1 - Failed to issue acc prli response 6024 **/ 6025 int 6026 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6027 struct lpfc_nodelist *ndlp) 6028 { 6029 struct lpfc_hba *phba = vport->phba; 6030 PRLI *npr; 6031 struct lpfc_nvme_prli *npr_nvme; 6032 lpfc_vpd_t *vpd; 6033 IOCB_t *icmd; 6034 IOCB_t *oldcmd; 6035 union lpfc_wqe128 *wqe; 6036 struct lpfc_iocbq *elsiocb; 6037 uint8_t *pcmd; 6038 uint16_t cmdsize; 6039 uint32_t prli_fc4_req, *req_payload; 6040 struct lpfc_dmabuf *req_buf; 6041 int rc; 6042 u32 elsrspcmd, ulp_context; 6043 6044 /* Need the incoming PRLI payload to determine if the ACC is for an 6045 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6046 */ 6047 req_buf = oldiocb->cmd_dmabuf; 6048 req_payload = (((uint32_t *)req_buf->virt) + 1); 6049 6050 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6051 prli_fc4_req = be32_to_cpu(*req_payload); 6052 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6054 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6055 prli_fc4_req, *((uint32_t *)req_payload)); 6056 6057 if (prli_fc4_req == PRLI_FCP_TYPE) { 6058 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6059 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6060 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6061 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6062 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6063 } else { 6064 return 1; 6065 } 6066 6067 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6068 ndlp->nlp_DID, elsrspcmd); 6069 if (!elsiocb) 6070 return 1; 6071 6072 if (phba->sli_rev == LPFC_SLI_REV4) { 6073 wqe = &elsiocb->wqe; 6074 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6075 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6076 ulp_context = get_job_ulpcontext(phba, elsiocb); 6077 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6078 get_job_rcvoxid(phba, oldiocb)); 6079 } else { 6080 icmd = &elsiocb->iocb; 6081 oldcmd = &oldiocb->iocb; 6082 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6083 ulp_context = elsiocb->iocb.ulpContext; 6084 icmd->unsli3.rcvsli3.ox_id = 6085 oldcmd->unsli3.rcvsli3.ox_id; 6086 } 6087 6088 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6089 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6090 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6091 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6092 elsiocb->iotag, ulp_context, 6093 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6094 ndlp->nlp_rpi); 6095 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6096 memset(pcmd, 0, cmdsize); 6097 6098 *((uint32_t *)(pcmd)) = elsrspcmd; 6099 pcmd += sizeof(uint32_t); 6100 6101 /* For PRLI, remainder of payload is PRLI parameter page */ 6102 vpd = &phba->vpd; 6103 6104 if (prli_fc4_req == PRLI_FCP_TYPE) { 6105 /* 6106 * If the remote port is a target and our firmware version 6107 * is 3.20 or later, set the following bits for FC-TAPE 6108 * support. 6109 */ 6110 npr = (PRLI *) pcmd; 6111 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6112 (vpd->rev.feaLevelHigh >= 0x02)) { 6113 npr->ConfmComplAllowed = 1; 6114 npr->Retry = 1; 6115 npr->TaskRetryIdReq = 1; 6116 } 6117 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6118 npr->estabImagePair = 1; 6119 npr->readXferRdyDis = 1; 6120 npr->ConfmComplAllowed = 1; 6121 npr->prliType = PRLI_FCP_TYPE; 6122 npr->initiatorFunc = 1; 6123 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6124 /* Respond with an NVME PRLI Type */ 6125 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6126 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6127 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6128 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6129 if (phba->nvmet_support) { 6130 bf_set(prli_tgt, npr_nvme, 1); 6131 bf_set(prli_disc, npr_nvme, 1); 6132 if (phba->cfg_nvme_enable_fb) { 6133 bf_set(prli_fba, npr_nvme, 1); 6134 6135 /* TBD. Target mode needs to post buffers 6136 * that support the configured first burst 6137 * byte size. 6138 */ 6139 bf_set(prli_fb_sz, npr_nvme, 6140 phba->cfg_nvmet_fb_size); 6141 } 6142 } else { 6143 bf_set(prli_init, npr_nvme, 1); 6144 } 6145 6146 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6147 "6015 NVME issue PRLI ACC word1 x%08x " 6148 "word4 x%08x word5 x%08x flag x%x, " 6149 "fcp_info x%x nlp_type x%x\n", 6150 npr_nvme->word1, npr_nvme->word4, 6151 npr_nvme->word5, ndlp->nlp_flag, 6152 ndlp->nlp_fcp_info, ndlp->nlp_type); 6153 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6154 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6155 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6156 } else 6157 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6158 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6159 prli_fc4_req, ndlp->nlp_fc4_type, 6160 ndlp->nlp_DID); 6161 6162 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6163 "Issue ACC PRLI: did:x%x flg:x%x", 6164 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6165 6166 phba->fc_stat.elsXmitACC++; 6167 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6168 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6169 if (!elsiocb->ndlp) { 6170 lpfc_els_free_iocb(phba, elsiocb); 6171 return 1; 6172 } 6173 6174 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6175 if (rc == IOCB_ERROR) { 6176 lpfc_els_free_iocb(phba, elsiocb); 6177 lpfc_nlp_put(ndlp); 6178 return 1; 6179 } 6180 6181 return 0; 6182 } 6183 6184 /** 6185 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6186 * @vport: pointer to a virtual N_Port data structure. 6187 * @format: rnid command format. 6188 * @oldiocb: pointer to the original lpfc command iocb data structure. 6189 * @ndlp: pointer to a node-list data structure. 6190 * 6191 * This routine issues a Request Node Identification Data (RNID) Accept 6192 * (ACC) response. It constructs the RNID ACC response command according to 6193 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6194 * issue the response. 6195 * 6196 * Note that the ndlp reference count will be incremented by 1 for holding the 6197 * ndlp and the reference to ndlp will be stored into the ndlp field of 6198 * the IOCB for the completion callback function. 6199 * 6200 * Return code 6201 * 0 - Successfully issued acc rnid response 6202 * 1 - Failed to issue acc rnid response 6203 **/ 6204 static int 6205 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6206 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6207 { 6208 struct lpfc_hba *phba = vport->phba; 6209 RNID *rn; 6210 IOCB_t *icmd, *oldcmd; 6211 union lpfc_wqe128 *wqe; 6212 struct lpfc_iocbq *elsiocb; 6213 uint8_t *pcmd; 6214 uint16_t cmdsize; 6215 int rc; 6216 u32 ulp_context; 6217 6218 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6219 + (2 * sizeof(struct lpfc_name)); 6220 if (format) 6221 cmdsize += sizeof(RNID_TOP_DISC); 6222 6223 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6224 ndlp->nlp_DID, ELS_CMD_ACC); 6225 if (!elsiocb) 6226 return 1; 6227 6228 if (phba->sli_rev == LPFC_SLI_REV4) { 6229 wqe = &elsiocb->wqe; 6230 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6231 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6232 ulp_context = get_job_ulpcontext(phba, elsiocb); 6233 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6234 get_job_rcvoxid(phba, oldiocb)); 6235 } else { 6236 icmd = &elsiocb->iocb; 6237 oldcmd = &oldiocb->iocb; 6238 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6239 ulp_context = elsiocb->iocb.ulpContext; 6240 icmd->unsli3.rcvsli3.ox_id = 6241 oldcmd->unsli3.rcvsli3.ox_id; 6242 } 6243 6244 /* Xmit RNID ACC response tag <ulpIoTag> */ 6245 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6246 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6247 elsiocb->iotag, ulp_context); 6248 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6249 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6250 pcmd += sizeof(uint32_t); 6251 6252 memset(pcmd, 0, sizeof(RNID)); 6253 rn = (RNID *) (pcmd); 6254 rn->Format = format; 6255 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6256 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6257 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6258 switch (format) { 6259 case 0: 6260 rn->SpecificLen = 0; 6261 break; 6262 case RNID_TOPOLOGY_DISC: 6263 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6264 memcpy(&rn->un.topologyDisc.portName, 6265 &vport->fc_portname, sizeof(struct lpfc_name)); 6266 rn->un.topologyDisc.unitType = RNID_HBA; 6267 rn->un.topologyDisc.physPort = 0; 6268 rn->un.topologyDisc.attachedNodes = 0; 6269 break; 6270 default: 6271 rn->CommonLen = 0; 6272 rn->SpecificLen = 0; 6273 break; 6274 } 6275 6276 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6277 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6278 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6279 6280 phba->fc_stat.elsXmitACC++; 6281 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6282 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6283 if (!elsiocb->ndlp) { 6284 lpfc_els_free_iocb(phba, elsiocb); 6285 return 1; 6286 } 6287 6288 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6289 if (rc == IOCB_ERROR) { 6290 lpfc_els_free_iocb(phba, elsiocb); 6291 lpfc_nlp_put(ndlp); 6292 return 1; 6293 } 6294 6295 return 0; 6296 } 6297 6298 /** 6299 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6300 * @vport: pointer to a virtual N_Port data structure. 6301 * @iocb: pointer to the lpfc command iocb data structure. 6302 * @ndlp: pointer to a node-list data structure. 6303 * 6304 * Return 6305 **/ 6306 static void 6307 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6308 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6309 { 6310 struct lpfc_hba *phba = vport->phba; 6311 uint8_t *pcmd; 6312 struct RRQ *rrq; 6313 uint16_t rxid; 6314 uint16_t xri; 6315 struct lpfc_node_rrq *prrq; 6316 6317 6318 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6319 pcmd += sizeof(uint32_t); 6320 rrq = (struct RRQ *)pcmd; 6321 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6322 rxid = bf_get(rrq_rxid, rrq); 6323 6324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6325 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6326 " x%x x%x\n", 6327 be32_to_cpu(bf_get(rrq_did, rrq)), 6328 bf_get(rrq_oxid, rrq), 6329 rxid, 6330 get_wqe_reqtag(iocb), 6331 get_job_ulpcontext(phba, iocb)); 6332 6333 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6334 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6335 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6336 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6337 xri = bf_get(rrq_oxid, rrq); 6338 else 6339 xri = rxid; 6340 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6341 if (prrq) 6342 lpfc_clr_rrq_active(phba, xri, prrq); 6343 return; 6344 } 6345 6346 /** 6347 * lpfc_els_rsp_echo_acc - Issue echo acc response 6348 * @vport: pointer to a virtual N_Port data structure. 6349 * @data: pointer to echo data to return in the accept. 6350 * @oldiocb: pointer to the original lpfc command iocb data structure. 6351 * @ndlp: pointer to a node-list data structure. 6352 * 6353 * Return code 6354 * 0 - Successfully issued acc echo response 6355 * 1 - Failed to issue acc echo response 6356 **/ 6357 static int 6358 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6359 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6360 { 6361 struct lpfc_hba *phba = vport->phba; 6362 IOCB_t *icmd, *oldcmd; 6363 union lpfc_wqe128 *wqe; 6364 struct lpfc_iocbq *elsiocb; 6365 uint8_t *pcmd; 6366 uint16_t cmdsize; 6367 int rc; 6368 u32 ulp_context; 6369 6370 if (phba->sli_rev == LPFC_SLI_REV4) 6371 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6372 else 6373 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6374 6375 /* The accumulated length can exceed the BPL_SIZE. For 6376 * now, use this as the limit 6377 */ 6378 if (cmdsize > LPFC_BPL_SIZE) 6379 cmdsize = LPFC_BPL_SIZE; 6380 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6381 ndlp->nlp_DID, ELS_CMD_ACC); 6382 if (!elsiocb) 6383 return 1; 6384 6385 if (phba->sli_rev == LPFC_SLI_REV4) { 6386 wqe = &elsiocb->wqe; 6387 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6388 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6389 ulp_context = get_job_ulpcontext(phba, elsiocb); 6390 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6391 get_job_rcvoxid(phba, oldiocb)); 6392 } else { 6393 icmd = &elsiocb->iocb; 6394 oldcmd = &oldiocb->iocb; 6395 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6396 ulp_context = elsiocb->iocb.ulpContext; 6397 icmd->unsli3.rcvsli3.ox_id = 6398 oldcmd->unsli3.rcvsli3.ox_id; 6399 } 6400 6401 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6402 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6403 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6404 elsiocb->iotag, ulp_context); 6405 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6406 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6407 pcmd += sizeof(uint32_t); 6408 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6409 6410 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6411 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6412 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6413 6414 phba->fc_stat.elsXmitACC++; 6415 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6416 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6417 if (!elsiocb->ndlp) { 6418 lpfc_els_free_iocb(phba, elsiocb); 6419 return 1; 6420 } 6421 6422 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6423 if (rc == IOCB_ERROR) { 6424 lpfc_els_free_iocb(phba, elsiocb); 6425 lpfc_nlp_put(ndlp); 6426 return 1; 6427 } 6428 6429 return 0; 6430 } 6431 6432 /** 6433 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6434 * @vport: pointer to a host virtual N_Port data structure. 6435 * 6436 * This routine issues Address Discover (ADISC) ELS commands to those 6437 * N_Ports which are in node port recovery state and ADISC has not been issued 6438 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6439 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6440 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6441 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6442 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6443 * IOCBs quit for later pick up. On the other hand, after walking through 6444 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6445 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6446 * no more ADISC need to be sent. 6447 * 6448 * Return code 6449 * The number of N_Ports with adisc issued. 6450 **/ 6451 int 6452 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6453 { 6454 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6455 struct lpfc_nodelist *ndlp, *next_ndlp; 6456 int sentadisc = 0; 6457 6458 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6459 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6460 6461 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6462 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6463 continue; 6464 6465 spin_lock_irq(&ndlp->lock); 6466 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6467 spin_unlock_irq(&ndlp->lock); 6468 6469 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6470 /* This node was marked for ADISC but was not picked 6471 * for discovery. This is possible if the node was 6472 * missing in gidft response. 6473 * 6474 * At time of marking node for ADISC, we skipped unreg 6475 * from backend 6476 */ 6477 lpfc_nlp_unreg_node(vport, ndlp); 6478 lpfc_unreg_rpi(vport, ndlp); 6479 continue; 6480 } 6481 6482 ndlp->nlp_prev_state = ndlp->nlp_state; 6483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6484 lpfc_issue_els_adisc(vport, ndlp, 0); 6485 sentadisc++; 6486 vport->num_disc_nodes++; 6487 if (vport->num_disc_nodes >= 6488 vport->cfg_discovery_threads) { 6489 spin_lock_irq(shost->host_lock); 6490 vport->fc_flag |= FC_NLP_MORE; 6491 spin_unlock_irq(shost->host_lock); 6492 break; 6493 } 6494 6495 } 6496 if (sentadisc == 0) { 6497 spin_lock_irq(shost->host_lock); 6498 vport->fc_flag &= ~FC_NLP_MORE; 6499 spin_unlock_irq(shost->host_lock); 6500 } 6501 return sentadisc; 6502 } 6503 6504 /** 6505 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6506 * @vport: pointer to a host virtual N_Port data structure. 6507 * 6508 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6509 * which are in node port recovery state, with a @vport. Each time an ELS 6510 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6511 * the per @vport number of discover count (num_disc_nodes) shall be 6512 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6513 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6514 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6515 * later pick up. On the other hand, after walking through all the ndlps with 6516 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6517 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6518 * PLOGI need to be sent. 6519 * 6520 * Return code 6521 * The number of N_Ports with plogi issued. 6522 **/ 6523 int 6524 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6525 { 6526 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6527 struct lpfc_nodelist *ndlp, *next_ndlp; 6528 int sentplogi = 0; 6529 6530 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6531 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6532 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6533 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6534 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6535 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6536 ndlp->nlp_prev_state = ndlp->nlp_state; 6537 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6538 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6539 sentplogi++; 6540 vport->num_disc_nodes++; 6541 if (vport->num_disc_nodes >= 6542 vport->cfg_discovery_threads) { 6543 spin_lock_irq(shost->host_lock); 6544 vport->fc_flag |= FC_NLP_MORE; 6545 spin_unlock_irq(shost->host_lock); 6546 break; 6547 } 6548 } 6549 } 6550 6551 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6552 "6452 Discover PLOGI %d flag x%x\n", 6553 sentplogi, vport->fc_flag); 6554 6555 if (sentplogi) { 6556 lpfc_set_disctmo(vport); 6557 } 6558 else { 6559 spin_lock_irq(shost->host_lock); 6560 vport->fc_flag &= ~FC_NLP_MORE; 6561 spin_unlock_irq(shost->host_lock); 6562 } 6563 return sentplogi; 6564 } 6565 6566 static uint32_t 6567 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6568 uint32_t word0) 6569 { 6570 6571 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6572 desc->payload.els_req = word0; 6573 desc->length = cpu_to_be32(sizeof(desc->payload)); 6574 6575 return sizeof(struct fc_rdp_link_service_desc); 6576 } 6577 6578 static uint32_t 6579 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6580 uint8_t *page_a0, uint8_t *page_a2) 6581 { 6582 uint16_t wavelength; 6583 uint16_t temperature; 6584 uint16_t rx_power; 6585 uint16_t tx_bias; 6586 uint16_t tx_power; 6587 uint16_t vcc; 6588 uint16_t flag = 0; 6589 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6590 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6591 6592 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6593 6594 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6595 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6596 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6597 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6598 6599 if ((trasn_code_byte4->fc_sw_laser) || 6600 (trasn_code_byte5->fc_sw_laser_sl) || 6601 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6602 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6603 } else if (trasn_code_byte4->fc_lw_laser) { 6604 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6605 page_a0[SSF_WAVELENGTH_B0]; 6606 if (wavelength == SFP_WAVELENGTH_LC1310) 6607 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6608 if (wavelength == SFP_WAVELENGTH_LL1550) 6609 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6610 } 6611 /* check if its SFP+ */ 6612 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6613 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6614 << SFP_FLAG_CT_SHIFT; 6615 6616 /* check if its OPTICAL */ 6617 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6618 SFP_FLAG_IS_OPTICAL_PORT : 0) 6619 << SFP_FLAG_IS_OPTICAL_SHIFT; 6620 6621 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6622 page_a2[SFF_TEMPERATURE_B0]); 6623 vcc = (page_a2[SFF_VCC_B1] << 8 | 6624 page_a2[SFF_VCC_B0]); 6625 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6626 page_a2[SFF_TXPOWER_B0]); 6627 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6628 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6629 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6630 page_a2[SFF_RXPOWER_B0]); 6631 desc->sfp_info.temperature = cpu_to_be16(temperature); 6632 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6633 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6634 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6635 desc->sfp_info.vcc = cpu_to_be16(vcc); 6636 6637 desc->sfp_info.flags = cpu_to_be16(flag); 6638 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6639 6640 return sizeof(struct fc_rdp_sfp_desc); 6641 } 6642 6643 static uint32_t 6644 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6645 READ_LNK_VAR *stat) 6646 { 6647 uint32_t type; 6648 6649 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6650 6651 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6652 6653 desc->info.port_type = cpu_to_be32(type); 6654 6655 desc->info.link_status.link_failure_cnt = 6656 cpu_to_be32(stat->linkFailureCnt); 6657 desc->info.link_status.loss_of_synch_cnt = 6658 cpu_to_be32(stat->lossSyncCnt); 6659 desc->info.link_status.loss_of_signal_cnt = 6660 cpu_to_be32(stat->lossSignalCnt); 6661 desc->info.link_status.primitive_seq_proto_err = 6662 cpu_to_be32(stat->primSeqErrCnt); 6663 desc->info.link_status.invalid_trans_word = 6664 cpu_to_be32(stat->invalidXmitWord); 6665 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6666 6667 desc->length = cpu_to_be32(sizeof(desc->info)); 6668 6669 return sizeof(struct fc_rdp_link_error_status_desc); 6670 } 6671 6672 static uint32_t 6673 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6674 struct lpfc_vport *vport) 6675 { 6676 uint32_t bbCredit; 6677 6678 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6679 6680 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6681 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6682 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6683 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6684 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6685 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6686 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6687 } else { 6688 desc->bbc_info.attached_port_bbc = 0; 6689 } 6690 6691 desc->bbc_info.rtt = 0; 6692 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6693 6694 return sizeof(struct fc_rdp_bbc_desc); 6695 } 6696 6697 static uint32_t 6698 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6699 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6700 { 6701 uint32_t flags = 0; 6702 6703 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6704 6705 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6706 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6707 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6708 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6709 6710 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6711 flags |= RDP_OET_HIGH_ALARM; 6712 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6713 flags |= RDP_OET_LOW_ALARM; 6714 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6715 flags |= RDP_OET_HIGH_WARNING; 6716 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6717 flags |= RDP_OET_LOW_WARNING; 6718 6719 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6720 desc->oed_info.function_flags = cpu_to_be32(flags); 6721 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6722 return sizeof(struct fc_rdp_oed_sfp_desc); 6723 } 6724 6725 static uint32_t 6726 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6727 struct fc_rdp_oed_sfp_desc *desc, 6728 uint8_t *page_a2) 6729 { 6730 uint32_t flags = 0; 6731 6732 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6733 6734 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6735 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6736 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6737 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6738 6739 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6740 flags |= RDP_OET_HIGH_ALARM; 6741 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6742 flags |= RDP_OET_LOW_ALARM; 6743 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6744 flags |= RDP_OET_HIGH_WARNING; 6745 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6746 flags |= RDP_OET_LOW_WARNING; 6747 6748 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6749 desc->oed_info.function_flags = cpu_to_be32(flags); 6750 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6751 return sizeof(struct fc_rdp_oed_sfp_desc); 6752 } 6753 6754 static uint32_t 6755 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6756 struct fc_rdp_oed_sfp_desc *desc, 6757 uint8_t *page_a2) 6758 { 6759 uint32_t flags = 0; 6760 6761 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6762 6763 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6764 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6765 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6766 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6767 6768 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6769 flags |= RDP_OET_HIGH_ALARM; 6770 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6771 flags |= RDP_OET_LOW_ALARM; 6772 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6773 flags |= RDP_OET_HIGH_WARNING; 6774 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6775 flags |= RDP_OET_LOW_WARNING; 6776 6777 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6778 desc->oed_info.function_flags = cpu_to_be32(flags); 6779 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6780 return sizeof(struct fc_rdp_oed_sfp_desc); 6781 } 6782 6783 static uint32_t 6784 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6785 struct fc_rdp_oed_sfp_desc *desc, 6786 uint8_t *page_a2) 6787 { 6788 uint32_t flags = 0; 6789 6790 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6791 6792 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6793 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6794 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6795 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6796 6797 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6798 flags |= RDP_OET_HIGH_ALARM; 6799 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6800 flags |= RDP_OET_LOW_ALARM; 6801 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6802 flags |= RDP_OET_HIGH_WARNING; 6803 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6804 flags |= RDP_OET_LOW_WARNING; 6805 6806 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6807 desc->oed_info.function_flags = cpu_to_be32(flags); 6808 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6809 return sizeof(struct fc_rdp_oed_sfp_desc); 6810 } 6811 6812 6813 static uint32_t 6814 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6815 struct fc_rdp_oed_sfp_desc *desc, 6816 uint8_t *page_a2) 6817 { 6818 uint32_t flags = 0; 6819 6820 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6821 6822 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6823 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6824 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6825 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6826 6827 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6828 flags |= RDP_OET_HIGH_ALARM; 6829 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6830 flags |= RDP_OET_LOW_ALARM; 6831 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6832 flags |= RDP_OET_HIGH_WARNING; 6833 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6834 flags |= RDP_OET_LOW_WARNING; 6835 6836 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6837 desc->oed_info.function_flags = cpu_to_be32(flags); 6838 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6839 return sizeof(struct fc_rdp_oed_sfp_desc); 6840 } 6841 6842 static uint32_t 6843 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6844 uint8_t *page_a0, struct lpfc_vport *vport) 6845 { 6846 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6847 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6848 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6849 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6850 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6851 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6852 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6853 return sizeof(struct fc_rdp_opd_sfp_desc); 6854 } 6855 6856 static uint32_t 6857 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6858 { 6859 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6860 return 0; 6861 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6862 6863 desc->info.CorrectedBlocks = 6864 cpu_to_be32(stat->fecCorrBlkCount); 6865 desc->info.UncorrectableBlocks = 6866 cpu_to_be32(stat->fecUncorrBlkCount); 6867 6868 desc->length = cpu_to_be32(sizeof(desc->info)); 6869 6870 return sizeof(struct fc_fec_rdp_desc); 6871 } 6872 6873 static uint32_t 6874 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6875 { 6876 uint16_t rdp_cap = 0; 6877 uint16_t rdp_speed; 6878 6879 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6880 6881 switch (phba->fc_linkspeed) { 6882 case LPFC_LINK_SPEED_1GHZ: 6883 rdp_speed = RDP_PS_1GB; 6884 break; 6885 case LPFC_LINK_SPEED_2GHZ: 6886 rdp_speed = RDP_PS_2GB; 6887 break; 6888 case LPFC_LINK_SPEED_4GHZ: 6889 rdp_speed = RDP_PS_4GB; 6890 break; 6891 case LPFC_LINK_SPEED_8GHZ: 6892 rdp_speed = RDP_PS_8GB; 6893 break; 6894 case LPFC_LINK_SPEED_10GHZ: 6895 rdp_speed = RDP_PS_10GB; 6896 break; 6897 case LPFC_LINK_SPEED_16GHZ: 6898 rdp_speed = RDP_PS_16GB; 6899 break; 6900 case LPFC_LINK_SPEED_32GHZ: 6901 rdp_speed = RDP_PS_32GB; 6902 break; 6903 case LPFC_LINK_SPEED_64GHZ: 6904 rdp_speed = RDP_PS_64GB; 6905 break; 6906 case LPFC_LINK_SPEED_128GHZ: 6907 rdp_speed = RDP_PS_128GB; 6908 break; 6909 case LPFC_LINK_SPEED_256GHZ: 6910 rdp_speed = RDP_PS_256GB; 6911 break; 6912 default: 6913 rdp_speed = RDP_PS_UNKNOWN; 6914 break; 6915 } 6916 6917 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6918 6919 if (phba->lmt & LMT_256Gb) 6920 rdp_cap |= RDP_PS_256GB; 6921 if (phba->lmt & LMT_128Gb) 6922 rdp_cap |= RDP_PS_128GB; 6923 if (phba->lmt & LMT_64Gb) 6924 rdp_cap |= RDP_PS_64GB; 6925 if (phba->lmt & LMT_32Gb) 6926 rdp_cap |= RDP_PS_32GB; 6927 if (phba->lmt & LMT_16Gb) 6928 rdp_cap |= RDP_PS_16GB; 6929 if (phba->lmt & LMT_10Gb) 6930 rdp_cap |= RDP_PS_10GB; 6931 if (phba->lmt & LMT_8Gb) 6932 rdp_cap |= RDP_PS_8GB; 6933 if (phba->lmt & LMT_4Gb) 6934 rdp_cap |= RDP_PS_4GB; 6935 if (phba->lmt & LMT_2Gb) 6936 rdp_cap |= RDP_PS_2GB; 6937 if (phba->lmt & LMT_1Gb) 6938 rdp_cap |= RDP_PS_1GB; 6939 6940 if (rdp_cap == 0) 6941 rdp_cap = RDP_CAP_UNKNOWN; 6942 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6943 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6944 6945 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6946 desc->length = cpu_to_be32(sizeof(desc->info)); 6947 return sizeof(struct fc_rdp_port_speed_desc); 6948 } 6949 6950 static uint32_t 6951 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6952 struct lpfc_vport *vport) 6953 { 6954 6955 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6956 6957 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6958 sizeof(desc->port_names.wwnn)); 6959 6960 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6961 sizeof(desc->port_names.wwpn)); 6962 6963 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6964 return sizeof(struct fc_rdp_port_name_desc); 6965 } 6966 6967 static uint32_t 6968 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6969 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6970 { 6971 6972 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6973 if (vport->fc_flag & FC_FABRIC) { 6974 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6975 sizeof(desc->port_names.wwnn)); 6976 6977 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6978 sizeof(desc->port_names.wwpn)); 6979 } else { /* Point to Point */ 6980 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6981 sizeof(desc->port_names.wwnn)); 6982 6983 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6984 sizeof(desc->port_names.wwpn)); 6985 } 6986 6987 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6988 return sizeof(struct fc_rdp_port_name_desc); 6989 } 6990 6991 static void 6992 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6993 int status) 6994 { 6995 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6996 struct lpfc_vport *vport = ndlp->vport; 6997 struct lpfc_iocbq *elsiocb; 6998 struct ulp_bde64 *bpl; 6999 IOCB_t *icmd; 7000 union lpfc_wqe128 *wqe; 7001 uint8_t *pcmd; 7002 struct ls_rjt *stat; 7003 struct fc_rdp_res_frame *rdp_res; 7004 uint32_t cmdsize, len; 7005 uint16_t *flag_ptr; 7006 int rc; 7007 u32 ulp_context; 7008 7009 if (status != SUCCESS) 7010 goto error; 7011 7012 /* This will change once we know the true size of the RDP payload */ 7013 cmdsize = sizeof(struct fc_rdp_res_frame); 7014 7015 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7016 lpfc_max_els_tries, rdp_context->ndlp, 7017 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7018 if (!elsiocb) 7019 goto free_rdp_context; 7020 7021 ulp_context = get_job_ulpcontext(phba, elsiocb); 7022 if (phba->sli_rev == LPFC_SLI_REV4) { 7023 wqe = &elsiocb->wqe; 7024 /* ox-id of the frame */ 7025 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7026 rdp_context->ox_id); 7027 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7028 rdp_context->rx_id); 7029 } else { 7030 icmd = &elsiocb->iocb; 7031 icmd->ulpContext = rdp_context->rx_id; 7032 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7033 } 7034 7035 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7036 "2171 Xmit RDP response tag x%x xri x%x, " 7037 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7038 elsiocb->iotag, ulp_context, 7039 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7040 ndlp->nlp_rpi); 7041 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7042 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7043 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7044 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7045 7046 /* Update Alarm and Warning */ 7047 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7048 phba->sfp_alarm |= *flag_ptr; 7049 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7050 phba->sfp_warning |= *flag_ptr; 7051 7052 /* For RDP payload */ 7053 len = 8; 7054 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7055 (len + pcmd), ELS_CMD_RDP); 7056 7057 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7058 rdp_context->page_a0, rdp_context->page_a2); 7059 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7060 phba); 7061 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7062 (len + pcmd), &rdp_context->link_stat); 7063 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7064 (len + pcmd), vport); 7065 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7066 (len + pcmd), vport, ndlp); 7067 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7068 &rdp_context->link_stat); 7069 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7070 &rdp_context->link_stat, vport); 7071 len += lpfc_rdp_res_oed_temp_desc(phba, 7072 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7073 rdp_context->page_a2); 7074 len += lpfc_rdp_res_oed_voltage_desc(phba, 7075 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7076 rdp_context->page_a2); 7077 len += lpfc_rdp_res_oed_txbias_desc(phba, 7078 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7079 rdp_context->page_a2); 7080 len += lpfc_rdp_res_oed_txpower_desc(phba, 7081 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7082 rdp_context->page_a2); 7083 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7084 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7085 rdp_context->page_a2); 7086 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7087 rdp_context->page_a0, vport); 7088 7089 rdp_res->length = cpu_to_be32(len - 8); 7090 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7091 7092 /* Now that we know the true size of the payload, update the BPL */ 7093 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7094 bpl->tus.f.bdeSize = len; 7095 bpl->tus.f.bdeFlags = 0; 7096 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7097 7098 phba->fc_stat.elsXmitACC++; 7099 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7100 if (!elsiocb->ndlp) { 7101 lpfc_els_free_iocb(phba, elsiocb); 7102 goto free_rdp_context; 7103 } 7104 7105 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7106 if (rc == IOCB_ERROR) { 7107 lpfc_els_free_iocb(phba, elsiocb); 7108 lpfc_nlp_put(ndlp); 7109 } 7110 7111 goto free_rdp_context; 7112 7113 error: 7114 cmdsize = 2 * sizeof(uint32_t); 7115 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7116 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7117 if (!elsiocb) 7118 goto free_rdp_context; 7119 7120 if (phba->sli_rev == LPFC_SLI_REV4) { 7121 wqe = &elsiocb->wqe; 7122 /* ox-id of the frame */ 7123 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7124 rdp_context->ox_id); 7125 bf_set(wqe_ctxt_tag, 7126 &wqe->xmit_els_rsp.wqe_com, 7127 rdp_context->rx_id); 7128 } else { 7129 icmd = &elsiocb->iocb; 7130 icmd->ulpContext = rdp_context->rx_id; 7131 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7132 } 7133 7134 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7135 7136 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7137 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7138 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7139 7140 phba->fc_stat.elsXmitLSRJT++; 7141 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7142 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7143 if (!elsiocb->ndlp) { 7144 lpfc_els_free_iocb(phba, elsiocb); 7145 goto free_rdp_context; 7146 } 7147 7148 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7149 if (rc == IOCB_ERROR) { 7150 lpfc_els_free_iocb(phba, elsiocb); 7151 lpfc_nlp_put(ndlp); 7152 } 7153 7154 free_rdp_context: 7155 /* This reference put is for the original unsolicited RDP. If the 7156 * prep failed, there is no reference to remove. 7157 */ 7158 lpfc_nlp_put(ndlp); 7159 kfree(rdp_context); 7160 } 7161 7162 static int 7163 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7164 { 7165 LPFC_MBOXQ_t *mbox = NULL; 7166 int rc; 7167 7168 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7169 if (!mbox) { 7170 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7171 "7105 failed to allocate mailbox memory"); 7172 return 1; 7173 } 7174 7175 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7176 goto rdp_fail; 7177 mbox->vport = rdp_context->ndlp->vport; 7178 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7179 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7180 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7181 if (rc == MBX_NOT_FINISHED) { 7182 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7183 return 1; 7184 } 7185 7186 return 0; 7187 7188 rdp_fail: 7189 mempool_free(mbox, phba->mbox_mem_pool); 7190 return 1; 7191 } 7192 7193 /* 7194 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7195 * @vport: pointer to a host virtual N_Port data structure. 7196 * @cmdiocb: pointer to lpfc command iocb data structure. 7197 * @ndlp: pointer to a node-list data structure. 7198 * 7199 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7200 * IOCB. First, the payload of the unsolicited RDP is checked. 7201 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7202 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7203 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7204 * gather all data and send RDP response. 7205 * 7206 * Return code 7207 * 0 - Sent the acc response 7208 * 1 - Sent the reject response. 7209 */ 7210 static int 7211 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7212 struct lpfc_nodelist *ndlp) 7213 { 7214 struct lpfc_hba *phba = vport->phba; 7215 struct lpfc_dmabuf *pcmd; 7216 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7217 struct fc_rdp_req_frame *rdp_req; 7218 struct lpfc_rdp_context *rdp_context; 7219 union lpfc_wqe128 *cmd = NULL; 7220 struct ls_rjt stat; 7221 7222 if (phba->sli_rev < LPFC_SLI_REV4 || 7223 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7224 LPFC_SLI_INTF_IF_TYPE_2) { 7225 rjt_err = LSRJT_UNABLE_TPC; 7226 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7227 goto error; 7228 } 7229 7230 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7231 rjt_err = LSRJT_UNABLE_TPC; 7232 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7233 goto error; 7234 } 7235 7236 pcmd = cmdiocb->cmd_dmabuf; 7237 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7238 7239 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7240 "2422 ELS RDP Request " 7241 "dec len %d tag x%x port_id %d len %d\n", 7242 be32_to_cpu(rdp_req->rdp_des_length), 7243 be32_to_cpu(rdp_req->nport_id_desc.tag), 7244 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7245 be32_to_cpu(rdp_req->nport_id_desc.length)); 7246 7247 if (sizeof(struct fc_rdp_nport_desc) != 7248 be32_to_cpu(rdp_req->rdp_des_length)) 7249 goto rjt_logerr; 7250 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7251 goto rjt_logerr; 7252 if (RDP_NPORT_ID_SIZE != 7253 be32_to_cpu(rdp_req->nport_id_desc.length)) 7254 goto rjt_logerr; 7255 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7256 if (!rdp_context) { 7257 rjt_err = LSRJT_UNABLE_TPC; 7258 goto error; 7259 } 7260 7261 cmd = &cmdiocb->wqe; 7262 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7263 if (!rdp_context->ndlp) { 7264 kfree(rdp_context); 7265 rjt_err = LSRJT_UNABLE_TPC; 7266 goto error; 7267 } 7268 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7269 &cmd->xmit_els_rsp.wqe_com); 7270 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7271 &cmd->xmit_els_rsp.wqe_com); 7272 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7273 if (lpfc_get_rdp_info(phba, rdp_context)) { 7274 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7275 "2423 Unable to send mailbox"); 7276 kfree(rdp_context); 7277 rjt_err = LSRJT_UNABLE_TPC; 7278 lpfc_nlp_put(ndlp); 7279 goto error; 7280 } 7281 7282 return 0; 7283 7284 rjt_logerr: 7285 rjt_err = LSRJT_LOGICAL_ERR; 7286 7287 error: 7288 memset(&stat, 0, sizeof(stat)); 7289 stat.un.b.lsRjtRsnCode = rjt_err; 7290 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7291 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7292 return 1; 7293 } 7294 7295 7296 static void 7297 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7298 { 7299 MAILBOX_t *mb; 7300 IOCB_t *icmd; 7301 union lpfc_wqe128 *wqe; 7302 uint8_t *pcmd; 7303 struct lpfc_iocbq *elsiocb; 7304 struct lpfc_nodelist *ndlp; 7305 struct ls_rjt *stat; 7306 union lpfc_sli4_cfg_shdr *shdr; 7307 struct lpfc_lcb_context *lcb_context; 7308 struct fc_lcb_res_frame *lcb_res; 7309 uint32_t cmdsize, shdr_status, shdr_add_status; 7310 int rc; 7311 7312 mb = &pmb->u.mb; 7313 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7314 ndlp = lcb_context->ndlp; 7315 pmb->ctx_ndlp = NULL; 7316 pmb->ctx_buf = NULL; 7317 7318 shdr = (union lpfc_sli4_cfg_shdr *) 7319 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7320 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7321 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7322 7323 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7324 "0194 SET_BEACON_CONFIG mailbox " 7325 "completed with status x%x add_status x%x," 7326 " mbx status x%x\n", 7327 shdr_status, shdr_add_status, mb->mbxStatus); 7328 7329 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7330 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7331 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7332 mempool_free(pmb, phba->mbox_mem_pool); 7333 goto error; 7334 } 7335 7336 mempool_free(pmb, phba->mbox_mem_pool); 7337 cmdsize = sizeof(struct fc_lcb_res_frame); 7338 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7339 lpfc_max_els_tries, ndlp, 7340 ndlp->nlp_DID, ELS_CMD_ACC); 7341 7342 /* Decrement the ndlp reference count from previous mbox command */ 7343 lpfc_nlp_put(ndlp); 7344 7345 if (!elsiocb) 7346 goto free_lcb_context; 7347 7348 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7349 7350 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7351 7352 if (phba->sli_rev == LPFC_SLI_REV4) { 7353 wqe = &elsiocb->wqe; 7354 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7355 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7356 lcb_context->ox_id); 7357 } else { 7358 icmd = &elsiocb->iocb; 7359 icmd->ulpContext = lcb_context->rx_id; 7360 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7361 } 7362 7363 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7364 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7365 lcb_res->lcb_sub_command = lcb_context->sub_command; 7366 lcb_res->lcb_type = lcb_context->type; 7367 lcb_res->capability = lcb_context->capability; 7368 lcb_res->lcb_frequency = lcb_context->frequency; 7369 lcb_res->lcb_duration = lcb_context->duration; 7370 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7371 phba->fc_stat.elsXmitACC++; 7372 7373 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7374 if (!elsiocb->ndlp) { 7375 lpfc_els_free_iocb(phba, elsiocb); 7376 goto out; 7377 } 7378 7379 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7380 if (rc == IOCB_ERROR) { 7381 lpfc_els_free_iocb(phba, elsiocb); 7382 lpfc_nlp_put(ndlp); 7383 } 7384 out: 7385 kfree(lcb_context); 7386 return; 7387 7388 error: 7389 cmdsize = sizeof(struct fc_lcb_res_frame); 7390 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7391 lpfc_max_els_tries, ndlp, 7392 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7393 lpfc_nlp_put(ndlp); 7394 if (!elsiocb) 7395 goto free_lcb_context; 7396 7397 if (phba->sli_rev == LPFC_SLI_REV4) { 7398 wqe = &elsiocb->wqe; 7399 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7400 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7401 lcb_context->ox_id); 7402 } else { 7403 icmd = &elsiocb->iocb; 7404 icmd->ulpContext = lcb_context->rx_id; 7405 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7406 } 7407 7408 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7409 7410 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7411 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7412 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7413 7414 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7415 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7416 7417 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7418 phba->fc_stat.elsXmitLSRJT++; 7419 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7420 if (!elsiocb->ndlp) { 7421 lpfc_els_free_iocb(phba, elsiocb); 7422 goto free_lcb_context; 7423 } 7424 7425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7426 if (rc == IOCB_ERROR) { 7427 lpfc_els_free_iocb(phba, elsiocb); 7428 lpfc_nlp_put(ndlp); 7429 } 7430 free_lcb_context: 7431 kfree(lcb_context); 7432 } 7433 7434 static int 7435 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7436 struct lpfc_lcb_context *lcb_context, 7437 uint32_t beacon_state) 7438 { 7439 struct lpfc_hba *phba = vport->phba; 7440 union lpfc_sli4_cfg_shdr *cfg_shdr; 7441 LPFC_MBOXQ_t *mbox = NULL; 7442 uint32_t len; 7443 int rc; 7444 7445 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7446 if (!mbox) 7447 return 1; 7448 7449 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7450 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7451 sizeof(struct lpfc_sli4_cfg_mhdr); 7452 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7453 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7454 LPFC_SLI4_MBX_EMBED); 7455 mbox->ctx_ndlp = (void *)lcb_context; 7456 mbox->vport = phba->pport; 7457 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7458 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7459 phba->sli4_hba.physical_port); 7460 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7461 beacon_state); 7462 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7463 7464 /* 7465 * Check bv1s bit before issuing the mailbox 7466 * if bv1s == 1, LCB V1 supported 7467 * else, LCB V0 supported 7468 */ 7469 7470 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7471 /* COMMON_SET_BEACON_CONFIG_V1 */ 7472 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7473 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7474 bf_set(lpfc_mbx_set_beacon_port_type, 7475 &mbox->u.mqe.un.beacon_config, 0); 7476 bf_set(lpfc_mbx_set_beacon_duration_v1, 7477 &mbox->u.mqe.un.beacon_config, 7478 be16_to_cpu(lcb_context->duration)); 7479 } else { 7480 /* COMMON_SET_BEACON_CONFIG_V0 */ 7481 if (be16_to_cpu(lcb_context->duration) != 0) { 7482 mempool_free(mbox, phba->mbox_mem_pool); 7483 return 1; 7484 } 7485 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7486 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7487 bf_set(lpfc_mbx_set_beacon_state, 7488 &mbox->u.mqe.un.beacon_config, beacon_state); 7489 bf_set(lpfc_mbx_set_beacon_port_type, 7490 &mbox->u.mqe.un.beacon_config, 1); 7491 bf_set(lpfc_mbx_set_beacon_duration, 7492 &mbox->u.mqe.un.beacon_config, 7493 be16_to_cpu(lcb_context->duration)); 7494 } 7495 7496 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7497 if (rc == MBX_NOT_FINISHED) { 7498 mempool_free(mbox, phba->mbox_mem_pool); 7499 return 1; 7500 } 7501 7502 return 0; 7503 } 7504 7505 7506 /** 7507 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7508 * @vport: pointer to a host virtual N_Port data structure. 7509 * @cmdiocb: pointer to lpfc command iocb data structure. 7510 * @ndlp: pointer to a node-list data structure. 7511 * 7512 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7513 * First, the payload of the unsolicited LCB is checked. 7514 * Then based on Subcommand beacon will either turn on or off. 7515 * 7516 * Return code 7517 * 0 - Sent the acc response 7518 * 1 - Sent the reject response. 7519 **/ 7520 static int 7521 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7522 struct lpfc_nodelist *ndlp) 7523 { 7524 struct lpfc_hba *phba = vport->phba; 7525 struct lpfc_dmabuf *pcmd; 7526 uint8_t *lp; 7527 struct fc_lcb_request_frame *beacon; 7528 struct lpfc_lcb_context *lcb_context; 7529 u8 state, rjt_err = 0; 7530 struct ls_rjt stat; 7531 7532 pcmd = cmdiocb->cmd_dmabuf; 7533 lp = (uint8_t *)pcmd->virt; 7534 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7535 7536 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7537 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7538 "type x%x frequency %x duration x%x\n", 7539 lp[0], lp[1], lp[2], 7540 beacon->lcb_command, 7541 beacon->lcb_sub_command, 7542 beacon->lcb_type, 7543 beacon->lcb_frequency, 7544 be16_to_cpu(beacon->lcb_duration)); 7545 7546 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7547 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7548 rjt_err = LSRJT_CMD_UNSUPPORTED; 7549 goto rjt; 7550 } 7551 7552 if (phba->sli_rev < LPFC_SLI_REV4 || 7553 phba->hba_flag & HBA_FCOE_MODE || 7554 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7555 LPFC_SLI_INTF_IF_TYPE_2)) { 7556 rjt_err = LSRJT_CMD_UNSUPPORTED; 7557 goto rjt; 7558 } 7559 7560 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7561 if (!lcb_context) { 7562 rjt_err = LSRJT_UNABLE_TPC; 7563 goto rjt; 7564 } 7565 7566 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7567 lcb_context->sub_command = beacon->lcb_sub_command; 7568 lcb_context->capability = 0; 7569 lcb_context->type = beacon->lcb_type; 7570 lcb_context->frequency = beacon->lcb_frequency; 7571 lcb_context->duration = beacon->lcb_duration; 7572 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7573 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7574 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7575 if (!lcb_context->ndlp) { 7576 rjt_err = LSRJT_UNABLE_TPC; 7577 goto rjt_free; 7578 } 7579 7580 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7581 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7582 "0193 failed to send mail box"); 7583 lpfc_nlp_put(ndlp); 7584 rjt_err = LSRJT_UNABLE_TPC; 7585 goto rjt_free; 7586 } 7587 return 0; 7588 7589 rjt_free: 7590 kfree(lcb_context); 7591 rjt: 7592 memset(&stat, 0, sizeof(stat)); 7593 stat.un.b.lsRjtRsnCode = rjt_err; 7594 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7595 return 1; 7596 } 7597 7598 7599 /** 7600 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7601 * @vport: pointer to a host virtual N_Port data structure. 7602 * 7603 * This routine cleans up any Registration State Change Notification 7604 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7605 * @vport together with the host_lock is used to prevent multiple thread 7606 * trying to access the RSCN array on a same @vport at the same time. 7607 **/ 7608 void 7609 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7610 { 7611 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7612 struct lpfc_hba *phba = vport->phba; 7613 int i; 7614 7615 spin_lock_irq(shost->host_lock); 7616 if (vport->fc_rscn_flush) { 7617 /* Another thread is walking fc_rscn_id_list on this vport */ 7618 spin_unlock_irq(shost->host_lock); 7619 return; 7620 } 7621 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7622 vport->fc_rscn_flush = 1; 7623 spin_unlock_irq(shost->host_lock); 7624 7625 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7626 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7627 vport->fc_rscn_id_list[i] = NULL; 7628 } 7629 spin_lock_irq(shost->host_lock); 7630 vport->fc_rscn_id_cnt = 0; 7631 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7632 spin_unlock_irq(shost->host_lock); 7633 lpfc_can_disctmo(vport); 7634 /* Indicate we are done walking this fc_rscn_id_list */ 7635 vport->fc_rscn_flush = 0; 7636 } 7637 7638 /** 7639 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7640 * @vport: pointer to a host virtual N_Port data structure. 7641 * @did: remote destination port identifier. 7642 * 7643 * This routine checks whether there is any pending Registration State 7644 * Configuration Notification (RSCN) to a @did on @vport. 7645 * 7646 * Return code 7647 * None zero - The @did matched with a pending rscn 7648 * 0 - not able to match @did with a pending rscn 7649 **/ 7650 int 7651 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7652 { 7653 D_ID ns_did; 7654 D_ID rscn_did; 7655 uint32_t *lp; 7656 uint32_t payload_len, i; 7657 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7658 7659 ns_did.un.word = did; 7660 7661 /* Never match fabric nodes for RSCNs */ 7662 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7663 return 0; 7664 7665 /* If we are doing a FULL RSCN rediscovery, match everything */ 7666 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7667 return did; 7668 7669 spin_lock_irq(shost->host_lock); 7670 if (vport->fc_rscn_flush) { 7671 /* Another thread is walking fc_rscn_id_list on this vport */ 7672 spin_unlock_irq(shost->host_lock); 7673 return 0; 7674 } 7675 /* Indicate we are walking fc_rscn_id_list on this vport */ 7676 vport->fc_rscn_flush = 1; 7677 spin_unlock_irq(shost->host_lock); 7678 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7679 lp = vport->fc_rscn_id_list[i]->virt; 7680 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7681 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7682 while (payload_len) { 7683 rscn_did.un.word = be32_to_cpu(*lp++); 7684 payload_len -= sizeof(uint32_t); 7685 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7686 case RSCN_ADDRESS_FORMAT_PORT: 7687 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7688 && (ns_did.un.b.area == rscn_did.un.b.area) 7689 && (ns_did.un.b.id == rscn_did.un.b.id)) 7690 goto return_did_out; 7691 break; 7692 case RSCN_ADDRESS_FORMAT_AREA: 7693 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7694 && (ns_did.un.b.area == rscn_did.un.b.area)) 7695 goto return_did_out; 7696 break; 7697 case RSCN_ADDRESS_FORMAT_DOMAIN: 7698 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7699 goto return_did_out; 7700 break; 7701 case RSCN_ADDRESS_FORMAT_FABRIC: 7702 goto return_did_out; 7703 } 7704 } 7705 } 7706 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7707 vport->fc_rscn_flush = 0; 7708 return 0; 7709 return_did_out: 7710 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7711 vport->fc_rscn_flush = 0; 7712 return did; 7713 } 7714 7715 /** 7716 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7717 * @vport: pointer to a host virtual N_Port data structure. 7718 * 7719 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7720 * state machine for a @vport's nodes that are with pending RSCN (Registration 7721 * State Change Notification). 7722 * 7723 * Return code 7724 * 0 - Successful (currently alway return 0) 7725 **/ 7726 static int 7727 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7728 { 7729 struct lpfc_nodelist *ndlp = NULL, *n; 7730 7731 /* Move all affected nodes by pending RSCNs to NPR state. */ 7732 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7733 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7734 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7735 continue; 7736 7737 /* NVME Target mode does not do RSCN Recovery. */ 7738 if (vport->phba->nvmet_support) 7739 continue; 7740 7741 /* If we are in the process of doing discovery on this 7742 * NPort, let it continue on its own. 7743 */ 7744 switch (ndlp->nlp_state) { 7745 case NLP_STE_PLOGI_ISSUE: 7746 case NLP_STE_ADISC_ISSUE: 7747 case NLP_STE_REG_LOGIN_ISSUE: 7748 case NLP_STE_PRLI_ISSUE: 7749 case NLP_STE_LOGO_ISSUE: 7750 continue; 7751 } 7752 7753 lpfc_disc_state_machine(vport, ndlp, NULL, 7754 NLP_EVT_DEVICE_RECOVERY); 7755 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7756 } 7757 return 0; 7758 } 7759 7760 /** 7761 * lpfc_send_rscn_event - Send an RSCN event to management application 7762 * @vport: pointer to a host virtual N_Port data structure. 7763 * @cmdiocb: pointer to lpfc command iocb data structure. 7764 * 7765 * lpfc_send_rscn_event sends an RSCN netlink event to management 7766 * applications. 7767 */ 7768 static void 7769 lpfc_send_rscn_event(struct lpfc_vport *vport, 7770 struct lpfc_iocbq *cmdiocb) 7771 { 7772 struct lpfc_dmabuf *pcmd; 7773 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7774 uint32_t *payload_ptr; 7775 uint32_t payload_len; 7776 struct lpfc_rscn_event_header *rscn_event_data; 7777 7778 pcmd = cmdiocb->cmd_dmabuf; 7779 payload_ptr = (uint32_t *) pcmd->virt; 7780 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7781 7782 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7783 payload_len, GFP_KERNEL); 7784 if (!rscn_event_data) { 7785 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7786 "0147 Failed to allocate memory for RSCN event\n"); 7787 return; 7788 } 7789 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7790 rscn_event_data->payload_length = payload_len; 7791 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7792 payload_len); 7793 7794 fc_host_post_vendor_event(shost, 7795 fc_get_event_number(), 7796 sizeof(struct lpfc_rscn_event_header) + payload_len, 7797 (char *)rscn_event_data, 7798 LPFC_NL_VENDOR_ID); 7799 7800 kfree(rscn_event_data); 7801 } 7802 7803 /** 7804 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7805 * @vport: pointer to a host virtual N_Port data structure. 7806 * @cmdiocb: pointer to lpfc command iocb data structure. 7807 * @ndlp: pointer to a node-list data structure. 7808 * 7809 * This routine processes an unsolicited RSCN (Registration State Change 7810 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7811 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7812 * discover state machine is about to begin discovery, it just accepts the 7813 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7814 * contains N_Port IDs for other vports on this HBA, it just accepts the 7815 * RSCN and ignore processing it. If the state machine is in the recovery 7816 * state, the fc_rscn_id_list of this @vport is walked and the 7817 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7818 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7819 * routine is invoked to handle the RSCN event. 7820 * 7821 * Return code 7822 * 0 - Just sent the acc response 7823 * 1 - Sent the acc response and waited for name server completion 7824 **/ 7825 static int 7826 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7827 struct lpfc_nodelist *ndlp) 7828 { 7829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7830 struct lpfc_hba *phba = vport->phba; 7831 struct lpfc_dmabuf *pcmd; 7832 uint32_t *lp, *datap; 7833 uint32_t payload_len, length, nportid, *cmd; 7834 int rscn_cnt; 7835 int rscn_id = 0, hba_id = 0; 7836 int i, tmo; 7837 7838 pcmd = cmdiocb->cmd_dmabuf; 7839 lp = (uint32_t *) pcmd->virt; 7840 7841 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7842 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7843 /* RSCN received */ 7844 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7845 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7846 vport->fc_flag, payload_len, *lp, 7847 vport->fc_rscn_id_cnt); 7848 7849 /* Send an RSCN event to the management application */ 7850 lpfc_send_rscn_event(vport, cmdiocb); 7851 7852 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7853 fc_host_post_event(shost, fc_get_event_number(), 7854 FCH_EVT_RSCN, lp[i]); 7855 7856 /* Check if RSCN is coming from a direct-connected remote NPort */ 7857 if (vport->fc_flag & FC_PT2PT) { 7858 /* If so, just ACC it, no other action needed for now */ 7859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7860 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7861 *lp, vport->fc_flag, payload_len); 7862 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7863 7864 /* Check to see if we need to NVME rescan this target 7865 * remoteport. 7866 */ 7867 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7868 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7869 lpfc_nvme_rescan_port(vport, ndlp); 7870 return 0; 7871 } 7872 7873 /* If we are about to begin discovery, just ACC the RSCN. 7874 * Discovery processing will satisfy it. 7875 */ 7876 if (vport->port_state <= LPFC_NS_QRY) { 7877 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7878 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7879 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7880 7881 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7882 return 0; 7883 } 7884 7885 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7886 * just ACC and ignore it. 7887 */ 7888 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7889 !(vport->cfg_peer_port_login)) { 7890 i = payload_len; 7891 datap = lp; 7892 while (i > 0) { 7893 nportid = *datap++; 7894 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7895 i -= sizeof(uint32_t); 7896 rscn_id++; 7897 if (lpfc_find_vport_by_did(phba, nportid)) 7898 hba_id++; 7899 } 7900 if (rscn_id == hba_id) { 7901 /* ALL NPortIDs in RSCN are on HBA */ 7902 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7903 "0219 Ignore RSCN " 7904 "Data: x%x x%x x%x x%x\n", 7905 vport->fc_flag, payload_len, 7906 *lp, vport->fc_rscn_id_cnt); 7907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7908 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7909 ndlp->nlp_DID, vport->port_state, 7910 ndlp->nlp_flag); 7911 7912 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7913 ndlp, NULL); 7914 /* Restart disctmo if its already running */ 7915 if (vport->fc_flag & FC_DISC_TMO) { 7916 tmo = ((phba->fc_ratov * 3) + 3); 7917 mod_timer(&vport->fc_disctmo, 7918 jiffies + 7919 msecs_to_jiffies(1000 * tmo)); 7920 } 7921 return 0; 7922 } 7923 } 7924 7925 spin_lock_irq(shost->host_lock); 7926 if (vport->fc_rscn_flush) { 7927 /* Another thread is walking fc_rscn_id_list on this vport */ 7928 vport->fc_flag |= FC_RSCN_DISCOVERY; 7929 spin_unlock_irq(shost->host_lock); 7930 /* Send back ACC */ 7931 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7932 return 0; 7933 } 7934 /* Indicate we are walking fc_rscn_id_list on this vport */ 7935 vport->fc_rscn_flush = 1; 7936 spin_unlock_irq(shost->host_lock); 7937 /* Get the array count after successfully have the token */ 7938 rscn_cnt = vport->fc_rscn_id_cnt; 7939 /* If we are already processing an RSCN, save the received 7940 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 7941 */ 7942 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7943 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7944 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7945 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7946 7947 spin_lock_irq(shost->host_lock); 7948 vport->fc_flag |= FC_RSCN_DEFERRED; 7949 7950 /* Restart disctmo if its already running */ 7951 if (vport->fc_flag & FC_DISC_TMO) { 7952 tmo = ((phba->fc_ratov * 3) + 3); 7953 mod_timer(&vport->fc_disctmo, 7954 jiffies + msecs_to_jiffies(1000 * tmo)); 7955 } 7956 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7957 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7958 vport->fc_flag |= FC_RSCN_MODE; 7959 spin_unlock_irq(shost->host_lock); 7960 if (rscn_cnt) { 7961 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7962 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7963 } 7964 if ((rscn_cnt) && 7965 (payload_len + length <= LPFC_BPL_SIZE)) { 7966 *cmd &= ELS_CMD_MASK; 7967 *cmd |= cpu_to_be32(payload_len + length); 7968 memcpy(((uint8_t *)cmd) + length, lp, 7969 payload_len); 7970 } else { 7971 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7972 vport->fc_rscn_id_cnt++; 7973 /* If we zero, cmdiocb->cmd_dmabuf, the calling 7974 * routine will not try to free it. 7975 */ 7976 cmdiocb->cmd_dmabuf = NULL; 7977 } 7978 /* Deferred RSCN */ 7979 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7980 "0235 Deferred RSCN " 7981 "Data: x%x x%x x%x\n", 7982 vport->fc_rscn_id_cnt, vport->fc_flag, 7983 vport->port_state); 7984 } else { 7985 vport->fc_flag |= FC_RSCN_DISCOVERY; 7986 spin_unlock_irq(shost->host_lock); 7987 /* ReDiscovery RSCN */ 7988 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7989 "0234 ReDiscovery RSCN " 7990 "Data: x%x x%x x%x\n", 7991 vport->fc_rscn_id_cnt, vport->fc_flag, 7992 vport->port_state); 7993 } 7994 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7995 vport->fc_rscn_flush = 0; 7996 /* Send back ACC */ 7997 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7998 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7999 lpfc_rscn_recovery_check(vport); 8000 return 0; 8001 } 8002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8003 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8004 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8005 8006 spin_lock_irq(shost->host_lock); 8007 vport->fc_flag |= FC_RSCN_MODE; 8008 spin_unlock_irq(shost->host_lock); 8009 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8010 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8011 vport->fc_rscn_flush = 0; 8012 /* 8013 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8014 * not try to free it. 8015 */ 8016 cmdiocb->cmd_dmabuf = NULL; 8017 lpfc_set_disctmo(vport); 8018 /* Send back ACC */ 8019 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8020 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8021 lpfc_rscn_recovery_check(vport); 8022 return lpfc_els_handle_rscn(vport); 8023 } 8024 8025 /** 8026 * lpfc_els_handle_rscn - Handle rscn for a vport 8027 * @vport: pointer to a host virtual N_Port data structure. 8028 * 8029 * This routine handles the Registration State Configuration Notification 8030 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8031 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8032 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8033 * NameServer shall be issued. If CT command to the NameServer fails to be 8034 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8035 * RSCN activities with the @vport. 8036 * 8037 * Return code 8038 * 0 - Cleaned up rscn on the @vport 8039 * 1 - Wait for plogi to name server before proceed 8040 **/ 8041 int 8042 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8043 { 8044 struct lpfc_nodelist *ndlp; 8045 struct lpfc_hba *phba = vport->phba; 8046 8047 /* Ignore RSCN if the port is being torn down. */ 8048 if (vport->load_flag & FC_UNLOADING) { 8049 lpfc_els_flush_rscn(vport); 8050 return 0; 8051 } 8052 8053 /* Start timer for RSCN processing */ 8054 lpfc_set_disctmo(vport); 8055 8056 /* RSCN processed */ 8057 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8058 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8059 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8060 vport->port_state, vport->num_disc_nodes, 8061 vport->gidft_inp); 8062 8063 /* To process RSCN, first compare RSCN data with NameServer */ 8064 vport->fc_ns_retry = 0; 8065 vport->num_disc_nodes = 0; 8066 8067 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8068 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8069 /* Good ndlp, issue CT Request to NameServer. Need to 8070 * know how many gidfts were issued. If none, then just 8071 * flush the RSCN. Otherwise, the outstanding requests 8072 * need to complete. 8073 */ 8074 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8075 if (lpfc_issue_gidft(vport) > 0) 8076 return 1; 8077 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8078 if (lpfc_issue_gidpt(vport) > 0) 8079 return 1; 8080 } else { 8081 return 1; 8082 } 8083 } else { 8084 /* Nameserver login in question. Revalidate. */ 8085 if (ndlp) { 8086 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8087 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8088 } else { 8089 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8090 if (!ndlp) { 8091 lpfc_els_flush_rscn(vport); 8092 return 0; 8093 } 8094 ndlp->nlp_prev_state = ndlp->nlp_state; 8095 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8096 } 8097 ndlp->nlp_type |= NLP_FABRIC; 8098 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8099 /* Wait for NameServer login cmpl before we can 8100 * continue 8101 */ 8102 return 1; 8103 } 8104 8105 lpfc_els_flush_rscn(vport); 8106 return 0; 8107 } 8108 8109 /** 8110 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8111 * @vport: pointer to a host virtual N_Port data structure. 8112 * @cmdiocb: pointer to lpfc command iocb data structure. 8113 * @ndlp: pointer to a node-list data structure. 8114 * 8115 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8116 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8117 * point topology. As an unsolicited FLOGI should not be received in a loop 8118 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8119 * lpfc_check_sparm() routine is invoked to check the parameters in the 8120 * unsolicited FLOGI. If parameters validation failed, the routine 8121 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8122 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8123 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8124 * will initiate PLOGI. The higher lexicographical value party shall has 8125 * higher priority (as the winning port) and will initiate PLOGI and 8126 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8127 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8128 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8129 * 8130 * Return code 8131 * 0 - Successfully processed the unsolicited flogi 8132 * 1 - Failed to process the unsolicited flogi 8133 **/ 8134 static int 8135 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8136 struct lpfc_nodelist *ndlp) 8137 { 8138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8139 struct lpfc_hba *phba = vport->phba; 8140 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8141 uint32_t *lp = (uint32_t *) pcmd->virt; 8142 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8143 struct serv_parm *sp; 8144 LPFC_MBOXQ_t *mbox; 8145 uint32_t cmd, did; 8146 int rc; 8147 uint32_t fc_flag = 0; 8148 uint32_t port_state = 0; 8149 8150 /* Clear external loopback plug detected flag */ 8151 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8152 8153 cmd = *lp++; 8154 sp = (struct serv_parm *) lp; 8155 8156 /* FLOGI received */ 8157 8158 lpfc_set_disctmo(vport); 8159 8160 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8161 /* We should never receive a FLOGI in loop mode, ignore it */ 8162 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8163 8164 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8165 Loop Mode */ 8166 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8167 "0113 An FLOGI ELS command x%x was " 8168 "received from DID x%x in Loop Mode\n", 8169 cmd, did); 8170 return 1; 8171 } 8172 8173 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8174 8175 /* 8176 * If our portname is greater than the remote portname, 8177 * then we initiate Nport login. 8178 */ 8179 8180 rc = memcmp(&vport->fc_portname, &sp->portName, 8181 sizeof(struct lpfc_name)); 8182 8183 if (!rc) { 8184 if (phba->sli_rev < LPFC_SLI_REV4) { 8185 mbox = mempool_alloc(phba->mbox_mem_pool, 8186 GFP_KERNEL); 8187 if (!mbox) 8188 return 1; 8189 lpfc_linkdown(phba); 8190 lpfc_init_link(phba, mbox, 8191 phba->cfg_topology, 8192 phba->cfg_link_speed); 8193 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8194 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8195 mbox->vport = vport; 8196 rc = lpfc_sli_issue_mbox(phba, mbox, 8197 MBX_NOWAIT); 8198 lpfc_set_loopback_flag(phba); 8199 if (rc == MBX_NOT_FINISHED) 8200 mempool_free(mbox, phba->mbox_mem_pool); 8201 return 1; 8202 } 8203 8204 /* External loopback plug insertion detected */ 8205 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8206 8207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8208 "1119 External Loopback plug detected\n"); 8209 8210 /* abort the flogi coming back to ourselves 8211 * due to external loopback on the port. 8212 */ 8213 lpfc_els_abort_flogi(phba); 8214 return 0; 8215 8216 } else if (rc > 0) { /* greater than */ 8217 spin_lock_irq(shost->host_lock); 8218 vport->fc_flag |= FC_PT2PT_PLOGI; 8219 spin_unlock_irq(shost->host_lock); 8220 8221 /* If we have the high WWPN we can assign our own 8222 * myDID; otherwise, we have to WAIT for a PLOGI 8223 * from the remote NPort to find out what it 8224 * will be. 8225 */ 8226 vport->fc_myDID = PT2PT_LocalID; 8227 } else { 8228 vport->fc_myDID = PT2PT_RemoteID; 8229 } 8230 8231 /* 8232 * The vport state should go to LPFC_FLOGI only 8233 * AFTER we issue a FLOGI, not receive one. 8234 */ 8235 spin_lock_irq(shost->host_lock); 8236 fc_flag = vport->fc_flag; 8237 port_state = vport->port_state; 8238 vport->fc_flag |= FC_PT2PT; 8239 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8240 8241 /* Acking an unsol FLOGI. Count 1 for link bounce 8242 * work-around. 8243 */ 8244 vport->rcv_flogi_cnt++; 8245 spin_unlock_irq(shost->host_lock); 8246 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8247 "3311 Rcv Flogi PS x%x new PS x%x " 8248 "fc_flag x%x new fc_flag x%x\n", 8249 port_state, vport->port_state, 8250 fc_flag, vport->fc_flag); 8251 8252 /* 8253 * We temporarily set fc_myDID to make it look like we are 8254 * a Fabric. This is done just so we end up with the right 8255 * did / sid on the FLOGI ACC rsp. 8256 */ 8257 did = vport->fc_myDID; 8258 vport->fc_myDID = Fabric_DID; 8259 8260 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8261 8262 /* Defer ACC response until AFTER we issue a FLOGI */ 8263 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8264 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8265 &wqe->xmit_els_rsp.wqe_com); 8266 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8267 &wqe->xmit_els_rsp.wqe_com); 8268 8269 vport->fc_myDID = did; 8270 8271 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8272 "3344 Deferring FLOGI ACC: rx_id: x%x," 8273 " ox_id: x%x, hba_flag x%x\n", 8274 phba->defer_flogi_acc_rx_id, 8275 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8276 8277 phba->defer_flogi_acc_flag = true; 8278 8279 return 0; 8280 } 8281 8282 /* Send back ACC */ 8283 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8284 8285 /* Now lets put fc_myDID back to what its supposed to be */ 8286 vport->fc_myDID = did; 8287 8288 return 0; 8289 } 8290 8291 /** 8292 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8293 * @vport: pointer to a host virtual N_Port data structure. 8294 * @cmdiocb: pointer to lpfc command iocb data structure. 8295 * @ndlp: pointer to a node-list data structure. 8296 * 8297 * This routine processes Request Node Identification Data (RNID) IOCB 8298 * received as an ELS unsolicited event. Only when the RNID specified format 8299 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8300 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8301 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8302 * rejected by invoking the lpfc_els_rsp_reject() routine. 8303 * 8304 * Return code 8305 * 0 - Successfully processed rnid iocb (currently always return 0) 8306 **/ 8307 static int 8308 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8309 struct lpfc_nodelist *ndlp) 8310 { 8311 struct lpfc_dmabuf *pcmd; 8312 uint32_t *lp; 8313 RNID *rn; 8314 struct ls_rjt stat; 8315 8316 pcmd = cmdiocb->cmd_dmabuf; 8317 lp = (uint32_t *) pcmd->virt; 8318 8319 lp++; 8320 rn = (RNID *) lp; 8321 8322 /* RNID received */ 8323 8324 switch (rn->Format) { 8325 case 0: 8326 case RNID_TOPOLOGY_DISC: 8327 /* Send back ACC */ 8328 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8329 break; 8330 default: 8331 /* Reject this request because format not supported */ 8332 stat.un.b.lsRjtRsvd0 = 0; 8333 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8334 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8335 stat.un.b.vendorUnique = 0; 8336 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8337 NULL); 8338 } 8339 return 0; 8340 } 8341 8342 /** 8343 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8344 * @vport: pointer to a host virtual N_Port data structure. 8345 * @cmdiocb: pointer to lpfc command iocb data structure. 8346 * @ndlp: pointer to a node-list data structure. 8347 * 8348 * Return code 8349 * 0 - Successfully processed echo iocb (currently always return 0) 8350 **/ 8351 static int 8352 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8353 struct lpfc_nodelist *ndlp) 8354 { 8355 uint8_t *pcmd; 8356 8357 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8358 8359 /* skip over first word of echo command to find echo data */ 8360 pcmd += sizeof(uint32_t); 8361 8362 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8363 return 0; 8364 } 8365 8366 /** 8367 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8368 * @vport: pointer to a host virtual N_Port data structure. 8369 * @cmdiocb: pointer to lpfc command iocb data structure. 8370 * @ndlp: pointer to a node-list data structure. 8371 * 8372 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8373 * received as an ELS unsolicited event. Currently, this function just invokes 8374 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8375 * 8376 * Return code 8377 * 0 - Successfully processed lirr iocb (currently always return 0) 8378 **/ 8379 static int 8380 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8381 struct lpfc_nodelist *ndlp) 8382 { 8383 struct ls_rjt stat; 8384 8385 /* For now, unconditionally reject this command */ 8386 stat.un.b.lsRjtRsvd0 = 0; 8387 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8388 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8389 stat.un.b.vendorUnique = 0; 8390 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8391 return 0; 8392 } 8393 8394 /** 8395 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8396 * @vport: pointer to a host virtual N_Port data structure. 8397 * @cmdiocb: pointer to lpfc command iocb data structure. 8398 * @ndlp: pointer to a node-list data structure. 8399 * 8400 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8401 * received as an ELS unsolicited event. A request to RRQ shall only 8402 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8403 * Nx_Port N_Port_ID of the target Exchange is the same as the 8404 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8405 * not accepted, an LS_RJT with reason code "Unable to perform 8406 * command request" and reason code explanation "Invalid Originator 8407 * S_ID" shall be returned. For now, we just unconditionally accept 8408 * RRQ from the target. 8409 **/ 8410 static void 8411 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8412 struct lpfc_nodelist *ndlp) 8413 { 8414 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8415 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8416 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8417 } 8418 8419 /** 8420 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8421 * @phba: pointer to lpfc hba data structure. 8422 * @pmb: pointer to the driver internal queue element for mailbox command. 8423 * 8424 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8425 * mailbox command. This callback function is to actually send the Accept 8426 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8427 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8428 * mailbox command, constructs the RLS response with the link statistics 8429 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8430 * response to the RLS. 8431 * 8432 * Note that the ndlp reference count will be incremented by 1 for holding the 8433 * ndlp and the reference to ndlp will be stored into the ndlp field of 8434 * the IOCB for the completion callback function to the RLS Accept Response 8435 * ELS IOCB command. 8436 * 8437 **/ 8438 static void 8439 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8440 { 8441 int rc = 0; 8442 MAILBOX_t *mb; 8443 IOCB_t *icmd; 8444 union lpfc_wqe128 *wqe; 8445 struct RLS_RSP *rls_rsp; 8446 uint8_t *pcmd; 8447 struct lpfc_iocbq *elsiocb; 8448 struct lpfc_nodelist *ndlp; 8449 uint16_t oxid; 8450 uint16_t rxid; 8451 uint32_t cmdsize; 8452 u32 ulp_context; 8453 8454 mb = &pmb->u.mb; 8455 8456 ndlp = pmb->ctx_ndlp; 8457 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8458 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8459 pmb->ctx_buf = NULL; 8460 pmb->ctx_ndlp = NULL; 8461 8462 if (mb->mbxStatus) { 8463 mempool_free(pmb, phba->mbox_mem_pool); 8464 return; 8465 } 8466 8467 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8468 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8469 lpfc_max_els_tries, ndlp, 8470 ndlp->nlp_DID, ELS_CMD_ACC); 8471 8472 /* Decrement the ndlp reference count from previous mbox command */ 8473 lpfc_nlp_put(ndlp); 8474 8475 if (!elsiocb) { 8476 mempool_free(pmb, phba->mbox_mem_pool); 8477 return; 8478 } 8479 8480 ulp_context = get_job_ulpcontext(phba, elsiocb); 8481 if (phba->sli_rev == LPFC_SLI_REV4) { 8482 wqe = &elsiocb->wqe; 8483 /* Xri / rx_id */ 8484 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8485 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8486 } else { 8487 icmd = &elsiocb->iocb; 8488 icmd->ulpContext = rxid; 8489 icmd->unsli3.rcvsli3.ox_id = oxid; 8490 } 8491 8492 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8493 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8494 pcmd += sizeof(uint32_t); /* Skip past command */ 8495 rls_rsp = (struct RLS_RSP *)pcmd; 8496 8497 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8498 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8499 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8500 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8501 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8502 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8503 mempool_free(pmb, phba->mbox_mem_pool); 8504 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8505 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8506 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8507 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8508 elsiocb->iotag, ulp_context, 8509 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8510 ndlp->nlp_rpi); 8511 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8512 phba->fc_stat.elsXmitACC++; 8513 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8514 if (!elsiocb->ndlp) { 8515 lpfc_els_free_iocb(phba, elsiocb); 8516 return; 8517 } 8518 8519 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8520 if (rc == IOCB_ERROR) { 8521 lpfc_els_free_iocb(phba, elsiocb); 8522 lpfc_nlp_put(ndlp); 8523 } 8524 return; 8525 } 8526 8527 /** 8528 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8529 * @vport: pointer to a host virtual N_Port data structure. 8530 * @cmdiocb: pointer to lpfc command iocb data structure. 8531 * @ndlp: pointer to a node-list data structure. 8532 * 8533 * This routine processes Read Link Status (RLS) IOCB received as an 8534 * ELS unsolicited event. It first checks the remote port state. If the 8535 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8536 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8537 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8538 * for reading the HBA link statistics. It is for the callback function, 8539 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8540 * to actually sending out RPL Accept (ACC) response. 8541 * 8542 * Return codes 8543 * 0 - Successfully processed rls iocb (currently always return 0) 8544 **/ 8545 static int 8546 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8547 struct lpfc_nodelist *ndlp) 8548 { 8549 struct lpfc_hba *phba = vport->phba; 8550 LPFC_MBOXQ_t *mbox; 8551 struct ls_rjt stat; 8552 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8553 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8554 8555 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8556 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8557 /* reject the unsolicited RLS request and done with it */ 8558 goto reject_out; 8559 8560 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8561 if (mbox) { 8562 lpfc_read_lnk_stat(phba, mbox); 8563 mbox->ctx_buf = (void *)((unsigned long) 8564 (ox_id << 16 | ctx)); 8565 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8566 if (!mbox->ctx_ndlp) 8567 goto node_err; 8568 mbox->vport = vport; 8569 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8570 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8571 != MBX_NOT_FINISHED) 8572 /* Mbox completion will send ELS Response */ 8573 return 0; 8574 /* Decrement reference count used for the failed mbox 8575 * command. 8576 */ 8577 lpfc_nlp_put(ndlp); 8578 node_err: 8579 mempool_free(mbox, phba->mbox_mem_pool); 8580 } 8581 reject_out: 8582 /* issue rejection response */ 8583 stat.un.b.lsRjtRsvd0 = 0; 8584 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8585 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8586 stat.un.b.vendorUnique = 0; 8587 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8588 return 0; 8589 } 8590 8591 /** 8592 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8593 * @vport: pointer to a host virtual N_Port data structure. 8594 * @cmdiocb: pointer to lpfc command iocb data structure. 8595 * @ndlp: pointer to a node-list data structure. 8596 * 8597 * This routine processes Read Timout Value (RTV) IOCB received as an 8598 * ELS unsolicited event. It first checks the remote port state. If the 8599 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8600 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8601 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8602 * Value (RTV) unsolicited IOCB event. 8603 * 8604 * Note that the ndlp reference count will be incremented by 1 for holding the 8605 * ndlp and the reference to ndlp will be stored into the ndlp field of 8606 * the IOCB for the completion callback function to the RTV Accept Response 8607 * ELS IOCB command. 8608 * 8609 * Return codes 8610 * 0 - Successfully processed rtv iocb (currently always return 0) 8611 **/ 8612 static int 8613 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8614 struct lpfc_nodelist *ndlp) 8615 { 8616 int rc = 0; 8617 IOCB_t *icmd; 8618 union lpfc_wqe128 *wqe; 8619 struct lpfc_hba *phba = vport->phba; 8620 struct ls_rjt stat; 8621 struct RTV_RSP *rtv_rsp; 8622 uint8_t *pcmd; 8623 struct lpfc_iocbq *elsiocb; 8624 uint32_t cmdsize; 8625 u32 ulp_context; 8626 8627 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8628 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8629 /* reject the unsolicited RTV request and done with it */ 8630 goto reject_out; 8631 8632 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8633 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8634 lpfc_max_els_tries, ndlp, 8635 ndlp->nlp_DID, ELS_CMD_ACC); 8636 8637 if (!elsiocb) 8638 return 1; 8639 8640 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8641 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8642 pcmd += sizeof(uint32_t); /* Skip past command */ 8643 8644 ulp_context = get_job_ulpcontext(phba, elsiocb); 8645 /* use the command's xri in the response */ 8646 if (phba->sli_rev == LPFC_SLI_REV4) { 8647 wqe = &elsiocb->wqe; 8648 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8649 get_job_ulpcontext(phba, cmdiocb)); 8650 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8651 get_job_rcvoxid(phba, cmdiocb)); 8652 } else { 8653 icmd = &elsiocb->iocb; 8654 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8655 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8656 } 8657 8658 rtv_rsp = (struct RTV_RSP *)pcmd; 8659 8660 /* populate RTV payload */ 8661 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8662 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8663 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8664 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8665 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8666 8667 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8668 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8669 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8670 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8671 "Data: x%x x%x x%x\n", 8672 elsiocb->iotag, ulp_context, 8673 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8674 ndlp->nlp_rpi, 8675 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8676 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8677 phba->fc_stat.elsXmitACC++; 8678 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8679 if (!elsiocb->ndlp) { 8680 lpfc_els_free_iocb(phba, elsiocb); 8681 return 0; 8682 } 8683 8684 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8685 if (rc == IOCB_ERROR) { 8686 lpfc_els_free_iocb(phba, elsiocb); 8687 lpfc_nlp_put(ndlp); 8688 } 8689 return 0; 8690 8691 reject_out: 8692 /* issue rejection response */ 8693 stat.un.b.lsRjtRsvd0 = 0; 8694 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8695 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8696 stat.un.b.vendorUnique = 0; 8697 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8698 return 0; 8699 } 8700 8701 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8702 * @vport: pointer to a host virtual N_Port data structure. 8703 * @ndlp: pointer to a node-list data structure. 8704 * @did: DID of the target. 8705 * @rrq: Pointer to the rrq struct. 8706 * 8707 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8708 * Successful the the completion handler will clear the RRQ. 8709 * 8710 * Return codes 8711 * 0 - Successfully sent rrq els iocb. 8712 * 1 - Failed to send rrq els iocb. 8713 **/ 8714 static int 8715 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8716 uint32_t did, struct lpfc_node_rrq *rrq) 8717 { 8718 struct lpfc_hba *phba = vport->phba; 8719 struct RRQ *els_rrq; 8720 struct lpfc_iocbq *elsiocb; 8721 uint8_t *pcmd; 8722 uint16_t cmdsize; 8723 int ret; 8724 8725 if (!ndlp) 8726 return 1; 8727 8728 /* If ndlp is not NULL, we will bump the reference count on it */ 8729 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8730 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8731 ELS_CMD_RRQ); 8732 if (!elsiocb) 8733 return 1; 8734 8735 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8736 8737 /* For RRQ request, remainder of payload is Exchange IDs */ 8738 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8739 pcmd += sizeof(uint32_t); 8740 els_rrq = (struct RRQ *) pcmd; 8741 8742 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8743 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8744 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8745 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8746 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8747 8748 8749 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8750 "Issue RRQ: did:x%x", 8751 did, rrq->xritag, rrq->rxid); 8752 elsiocb->context_un.rrq = rrq; 8753 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8754 8755 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8756 if (!elsiocb->ndlp) 8757 goto io_err; 8758 8759 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8760 if (ret == IOCB_ERROR) { 8761 lpfc_nlp_put(ndlp); 8762 goto io_err; 8763 } 8764 return 0; 8765 8766 io_err: 8767 lpfc_els_free_iocb(phba, elsiocb); 8768 return 1; 8769 } 8770 8771 /** 8772 * lpfc_send_rrq - Sends ELS RRQ if needed. 8773 * @phba: pointer to lpfc hba data structure. 8774 * @rrq: pointer to the active rrq. 8775 * 8776 * This routine will call the lpfc_issue_els_rrq if the rrq is 8777 * still active for the xri. If this function returns a failure then 8778 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8779 * 8780 * Returns 0 Success. 8781 * 1 Failure. 8782 **/ 8783 int 8784 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8785 { 8786 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8787 rrq->nlp_DID); 8788 if (!ndlp) 8789 return 1; 8790 8791 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8792 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8793 rrq->nlp_DID, rrq); 8794 else 8795 return 1; 8796 } 8797 8798 /** 8799 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8800 * @vport: pointer to a host virtual N_Port data structure. 8801 * @cmdsize: size of the ELS command. 8802 * @oldiocb: pointer to the original lpfc command iocb data structure. 8803 * @ndlp: pointer to a node-list data structure. 8804 * 8805 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8806 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8807 * 8808 * Note that the ndlp reference count will be incremented by 1 for holding the 8809 * ndlp and the reference to ndlp will be stored into the ndlp field of 8810 * the IOCB for the completion callback function to the RPL Accept Response 8811 * ELS command. 8812 * 8813 * Return code 8814 * 0 - Successfully issued ACC RPL ELS command 8815 * 1 - Failed to issue ACC RPL ELS command 8816 **/ 8817 static int 8818 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8819 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8820 { 8821 int rc = 0; 8822 struct lpfc_hba *phba = vport->phba; 8823 IOCB_t *icmd; 8824 union lpfc_wqe128 *wqe; 8825 RPL_RSP rpl_rsp; 8826 struct lpfc_iocbq *elsiocb; 8827 uint8_t *pcmd; 8828 u32 ulp_context; 8829 8830 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8831 ndlp->nlp_DID, ELS_CMD_ACC); 8832 8833 if (!elsiocb) 8834 return 1; 8835 8836 ulp_context = get_job_ulpcontext(phba, elsiocb); 8837 if (phba->sli_rev == LPFC_SLI_REV4) { 8838 wqe = &elsiocb->wqe; 8839 /* Xri / rx_id */ 8840 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8841 get_job_ulpcontext(phba, oldiocb)); 8842 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8843 get_job_rcvoxid(phba, oldiocb)); 8844 } else { 8845 icmd = &elsiocb->iocb; 8846 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 8847 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 8848 } 8849 8850 pcmd = elsiocb->cmd_dmabuf->virt; 8851 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8852 pcmd += sizeof(uint16_t); 8853 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8854 pcmd += sizeof(uint16_t); 8855 8856 /* Setup the RPL ACC payload */ 8857 rpl_rsp.listLen = be32_to_cpu(1); 8858 rpl_rsp.index = 0; 8859 rpl_rsp.port_num_blk.portNum = 0; 8860 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8861 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8862 sizeof(struct lpfc_name)); 8863 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8864 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8865 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8866 "0120 Xmit ELS RPL ACC response tag x%x " 8867 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8868 "rpi x%x\n", 8869 elsiocb->iotag, ulp_context, 8870 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8871 ndlp->nlp_rpi); 8872 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8873 phba->fc_stat.elsXmitACC++; 8874 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8875 if (!elsiocb->ndlp) { 8876 lpfc_els_free_iocb(phba, elsiocb); 8877 return 1; 8878 } 8879 8880 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8881 if (rc == IOCB_ERROR) { 8882 lpfc_els_free_iocb(phba, elsiocb); 8883 lpfc_nlp_put(ndlp); 8884 return 1; 8885 } 8886 8887 return 0; 8888 } 8889 8890 /** 8891 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8892 * @vport: pointer to a host virtual N_Port data structure. 8893 * @cmdiocb: pointer to lpfc command iocb data structure. 8894 * @ndlp: pointer to a node-list data structure. 8895 * 8896 * This routine processes Read Port List (RPL) IOCB received as an ELS 8897 * unsolicited event. It first checks the remote port state. If the remote 8898 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8899 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8900 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8901 * to accept the RPL. 8902 * 8903 * Return code 8904 * 0 - Successfully processed rpl iocb (currently always return 0) 8905 **/ 8906 static int 8907 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8908 struct lpfc_nodelist *ndlp) 8909 { 8910 struct lpfc_dmabuf *pcmd; 8911 uint32_t *lp; 8912 uint32_t maxsize; 8913 uint16_t cmdsize; 8914 RPL *rpl; 8915 struct ls_rjt stat; 8916 8917 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8918 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8919 /* issue rejection response */ 8920 stat.un.b.lsRjtRsvd0 = 0; 8921 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8922 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8923 stat.un.b.vendorUnique = 0; 8924 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8925 NULL); 8926 /* rejected the unsolicited RPL request and done with it */ 8927 return 0; 8928 } 8929 8930 pcmd = cmdiocb->cmd_dmabuf; 8931 lp = (uint32_t *) pcmd->virt; 8932 rpl = (RPL *) (lp + 1); 8933 maxsize = be32_to_cpu(rpl->maxsize); 8934 8935 /* We support only one port */ 8936 if ((rpl->index == 0) && 8937 ((maxsize == 0) || 8938 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8939 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8940 } else { 8941 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8942 } 8943 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8944 8945 return 0; 8946 } 8947 8948 /** 8949 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8950 * @vport: pointer to a virtual N_Port data structure. 8951 * @cmdiocb: pointer to lpfc command iocb data structure. 8952 * @ndlp: pointer to a node-list data structure. 8953 * 8954 * This routine processes Fibre Channel Address Resolution Protocol 8955 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8956 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8957 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8958 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8959 * remote PortName is compared against the FC PortName stored in the @vport 8960 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8961 * compared against the FC NodeName stored in the @vport data structure. 8962 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8963 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8964 * invoked to send out FARP Response to the remote node. Before sending the 8965 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8966 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8967 * routine is invoked to log into the remote port first. 8968 * 8969 * Return code 8970 * 0 - Either the FARP Match Mode not supported or successfully processed 8971 **/ 8972 static int 8973 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8974 struct lpfc_nodelist *ndlp) 8975 { 8976 struct lpfc_dmabuf *pcmd; 8977 uint32_t *lp; 8978 FARP *fp; 8979 uint32_t cnt, did; 8980 8981 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 8982 pcmd = cmdiocb->cmd_dmabuf; 8983 lp = (uint32_t *) pcmd->virt; 8984 8985 lp++; 8986 fp = (FARP *) lp; 8987 /* FARP-REQ received from DID <did> */ 8988 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8989 "0601 FARP-REQ received from DID x%x\n", did); 8990 /* We will only support match on WWPN or WWNN */ 8991 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8992 return 0; 8993 } 8994 8995 cnt = 0; 8996 /* If this FARP command is searching for my portname */ 8997 if (fp->Mflags & FARP_MATCH_PORT) { 8998 if (memcmp(&fp->RportName, &vport->fc_portname, 8999 sizeof(struct lpfc_name)) == 0) 9000 cnt = 1; 9001 } 9002 9003 /* If this FARP command is searching for my nodename */ 9004 if (fp->Mflags & FARP_MATCH_NODE) { 9005 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9006 sizeof(struct lpfc_name)) == 0) 9007 cnt = 1; 9008 } 9009 9010 if (cnt) { 9011 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9012 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9013 /* Log back into the node before sending the FARP. */ 9014 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9015 ndlp->nlp_prev_state = ndlp->nlp_state; 9016 lpfc_nlp_set_state(vport, ndlp, 9017 NLP_STE_PLOGI_ISSUE); 9018 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9019 } 9020 9021 /* Send a FARP response to that node */ 9022 if (fp->Rflags & FARP_REQUEST_FARPR) 9023 lpfc_issue_els_farpr(vport, did, 0); 9024 } 9025 } 9026 return 0; 9027 } 9028 9029 /** 9030 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9031 * @vport: pointer to a host virtual N_Port data structure. 9032 * @cmdiocb: pointer to lpfc command iocb data structure. 9033 * @ndlp: pointer to a node-list data structure. 9034 * 9035 * This routine processes Fibre Channel Address Resolution Protocol 9036 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9037 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9038 * the FARP response request. 9039 * 9040 * Return code 9041 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9042 **/ 9043 static int 9044 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9045 struct lpfc_nodelist *ndlp) 9046 { 9047 struct lpfc_dmabuf *pcmd; 9048 uint32_t *lp; 9049 uint32_t did; 9050 9051 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9052 pcmd = cmdiocb->cmd_dmabuf; 9053 lp = (uint32_t *)pcmd->virt; 9054 9055 lp++; 9056 /* FARP-RSP received from DID <did> */ 9057 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9058 "0600 FARP-RSP received from DID x%x\n", did); 9059 /* ACCEPT the Farp resp request */ 9060 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9061 9062 return 0; 9063 } 9064 9065 /** 9066 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9067 * @vport: pointer to a host virtual N_Port data structure. 9068 * @cmdiocb: pointer to lpfc command iocb data structure. 9069 * @fan_ndlp: pointer to a node-list data structure. 9070 * 9071 * This routine processes a Fabric Address Notification (FAN) IOCB 9072 * command received as an ELS unsolicited event. The FAN ELS command will 9073 * only be processed on a physical port (i.e., the @vport represents the 9074 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9075 * compared against those in the phba data structure. If any of those is 9076 * different, the lpfc_initial_flogi() routine is invoked to initialize 9077 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9078 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9079 * is invoked to register login to the fabric. 9080 * 9081 * Return code 9082 * 0 - Successfully processed fan iocb (currently always return 0). 9083 **/ 9084 static int 9085 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9086 struct lpfc_nodelist *fan_ndlp) 9087 { 9088 struct lpfc_hba *phba = vport->phba; 9089 uint32_t *lp; 9090 FAN *fp; 9091 9092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9093 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9094 fp = (FAN *) ++lp; 9095 /* FAN received; Fan does not have a reply sequence */ 9096 if ((vport == phba->pport) && 9097 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9098 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9099 sizeof(struct lpfc_name))) || 9100 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9101 sizeof(struct lpfc_name)))) { 9102 /* This port has switched fabrics. FLOGI is required */ 9103 lpfc_issue_init_vfi(vport); 9104 } else { 9105 /* FAN verified - skip FLOGI */ 9106 vport->fc_myDID = vport->fc_prevDID; 9107 if (phba->sli_rev < LPFC_SLI_REV4) 9108 lpfc_issue_fabric_reglogin(vport); 9109 else { 9110 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9111 "3138 Need register VFI: (x%x/%x)\n", 9112 vport->fc_prevDID, vport->fc_myDID); 9113 lpfc_issue_reg_vfi(vport); 9114 } 9115 } 9116 } 9117 return 0; 9118 } 9119 9120 /** 9121 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9122 * @vport: pointer to a host virtual N_Port data structure. 9123 * @cmdiocb: pointer to lpfc command iocb data structure. 9124 * @ndlp: pointer to a node-list data structure. 9125 * 9126 * Return code 9127 * 0 - Successfully processed echo iocb (currently always return 0) 9128 **/ 9129 static int 9130 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9131 struct lpfc_nodelist *ndlp) 9132 { 9133 struct lpfc_hba *phba = vport->phba; 9134 struct fc_els_edc *edc_req; 9135 struct fc_tlv_desc *tlv; 9136 uint8_t *payload; 9137 uint32_t *ptr, dtag; 9138 const char *dtag_nm; 9139 int desc_cnt = 0, bytes_remain; 9140 struct fc_diag_lnkflt_desc *plnkflt; 9141 9142 payload = cmdiocb->cmd_dmabuf->virt; 9143 9144 edc_req = (struct fc_els_edc *)payload; 9145 bytes_remain = be32_to_cpu(edc_req->desc_len); 9146 9147 ptr = (uint32_t *)payload; 9148 lpfc_printf_vlog(vport, KERN_INFO, 9149 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9150 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9151 bytes_remain, be32_to_cpu(*ptr), 9152 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9153 9154 /* No signal support unless there is a congestion descriptor */ 9155 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9156 phba->cgn_sig_freq = 0; 9157 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9158 9159 if (bytes_remain <= 0) 9160 goto out; 9161 9162 tlv = edc_req->desc; 9163 9164 /* 9165 * cycle through EDC diagnostic descriptors to find the 9166 * congestion signaling capability descriptor 9167 */ 9168 while (bytes_remain) { 9169 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9170 lpfc_printf_log(phba, KERN_WARNING, 9171 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9172 "6464 Truncated TLV hdr on " 9173 "Diagnostic descriptor[%d]\n", 9174 desc_cnt); 9175 goto out; 9176 } 9177 9178 dtag = be32_to_cpu(tlv->desc_tag); 9179 switch (dtag) { 9180 case ELS_DTAG_LNK_FAULT_CAP: 9181 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9182 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9183 sizeof(struct fc_diag_lnkflt_desc)) { 9184 lpfc_printf_log(phba, KERN_WARNING, 9185 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9186 "6465 Truncated Link Fault Diagnostic " 9187 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9188 desc_cnt, bytes_remain, 9189 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9190 sizeof(struct fc_diag_lnkflt_desc)); 9191 goto out; 9192 } 9193 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9194 lpfc_printf_log(phba, KERN_INFO, 9195 LOG_ELS | LOG_LDS_EVENT, 9196 "4626 Link Fault Desc Data: x%08x len x%x " 9197 "da x%x dd x%x interval x%x\n", 9198 be32_to_cpu(plnkflt->desc_tag), 9199 be32_to_cpu(plnkflt->desc_len), 9200 be32_to_cpu( 9201 plnkflt->degrade_activate_threshold), 9202 be32_to_cpu( 9203 plnkflt->degrade_deactivate_threshold), 9204 be32_to_cpu(plnkflt->fec_degrade_interval)); 9205 break; 9206 case ELS_DTAG_CG_SIGNAL_CAP: 9207 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9208 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9209 sizeof(struct fc_diag_cg_sig_desc)) { 9210 lpfc_printf_log( 9211 phba, KERN_WARNING, LOG_CGN_MGMT, 9212 "6466 Truncated cgn signal Diagnostic " 9213 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9214 desc_cnt, bytes_remain, 9215 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9216 sizeof(struct fc_diag_cg_sig_desc)); 9217 goto out; 9218 } 9219 9220 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9221 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9222 9223 /* We start negotiation with lpfc_fabric_cgn_frequency. 9224 * When we process the EDC, we will settle on the 9225 * higher frequency. 9226 */ 9227 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9228 9229 lpfc_least_capable_settings( 9230 phba, (struct fc_diag_cg_sig_desc *)tlv); 9231 break; 9232 default: 9233 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9234 lpfc_printf_log(phba, KERN_WARNING, 9235 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9236 "6467 unknown Diagnostic " 9237 "Descriptor[%d]: tag x%x (%s)\n", 9238 desc_cnt, dtag, dtag_nm); 9239 } 9240 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9241 tlv = fc_tlv_next_desc(tlv); 9242 desc_cnt++; 9243 } 9244 out: 9245 /* Need to send back an ACC */ 9246 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9247 9248 lpfc_config_cgn_signal(phba); 9249 return 0; 9250 } 9251 9252 /** 9253 * lpfc_els_timeout - Handler funciton to the els timer 9254 * @t: timer context used to obtain the vport. 9255 * 9256 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9257 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9258 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9259 * up the worker thread. It is for the worker thread to invoke the routine 9260 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9261 **/ 9262 void 9263 lpfc_els_timeout(struct timer_list *t) 9264 { 9265 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9266 struct lpfc_hba *phba = vport->phba; 9267 uint32_t tmo_posted; 9268 unsigned long iflag; 9269 9270 spin_lock_irqsave(&vport->work_port_lock, iflag); 9271 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9272 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9273 vport->work_port_events |= WORKER_ELS_TMO; 9274 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9275 9276 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9277 lpfc_worker_wake_up(phba); 9278 return; 9279 } 9280 9281 9282 /** 9283 * lpfc_els_timeout_handler - Process an els timeout event 9284 * @vport: pointer to a virtual N_Port data structure. 9285 * 9286 * This routine is the actual handler function that processes an ELS timeout 9287 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9288 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9289 * invoking the lpfc_sli_issue_abort_iotag() routine. 9290 **/ 9291 void 9292 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9293 { 9294 struct lpfc_hba *phba = vport->phba; 9295 struct lpfc_sli_ring *pring; 9296 struct lpfc_iocbq *tmp_iocb, *piocb; 9297 IOCB_t *cmd = NULL; 9298 struct lpfc_dmabuf *pcmd; 9299 uint32_t els_command = 0; 9300 uint32_t timeout; 9301 uint32_t remote_ID = 0xffffffff; 9302 LIST_HEAD(abort_list); 9303 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9304 9305 9306 timeout = (uint32_t)(phba->fc_ratov << 1); 9307 9308 pring = lpfc_phba_elsring(phba); 9309 if (unlikely(!pring)) 9310 return; 9311 9312 if (phba->pport->load_flag & FC_UNLOADING) 9313 return; 9314 9315 spin_lock_irq(&phba->hbalock); 9316 if (phba->sli_rev == LPFC_SLI_REV4) 9317 spin_lock(&pring->ring_lock); 9318 9319 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9320 ulp_command = get_job_cmnd(phba, piocb); 9321 ulp_context = get_job_ulpcontext(phba, piocb); 9322 did = get_job_els_rsp64_did(phba, piocb); 9323 9324 if (phba->sli_rev == LPFC_SLI_REV4) { 9325 iotag = get_wqe_reqtag(piocb); 9326 } else { 9327 cmd = &piocb->iocb; 9328 iotag = cmd->ulpIoTag; 9329 } 9330 9331 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9332 ulp_command == CMD_ABORT_XRI_CX || 9333 ulp_command == CMD_ABORT_XRI_CN || 9334 ulp_command == CMD_CLOSE_XRI_CN) 9335 continue; 9336 9337 if (piocb->vport != vport) 9338 continue; 9339 9340 pcmd = piocb->cmd_dmabuf; 9341 if (pcmd) 9342 els_command = *(uint32_t *) (pcmd->virt); 9343 9344 if (els_command == ELS_CMD_FARP || 9345 els_command == ELS_CMD_FARPR || 9346 els_command == ELS_CMD_FDISC) 9347 continue; 9348 9349 if (piocb->drvrTimeout > 0) { 9350 if (piocb->drvrTimeout >= timeout) 9351 piocb->drvrTimeout -= timeout; 9352 else 9353 piocb->drvrTimeout = 0; 9354 continue; 9355 } 9356 9357 remote_ID = 0xffffffff; 9358 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9359 remote_ID = did; 9360 } else { 9361 struct lpfc_nodelist *ndlp; 9362 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9363 if (ndlp) 9364 remote_ID = ndlp->nlp_DID; 9365 } 9366 list_add_tail(&piocb->dlist, &abort_list); 9367 } 9368 if (phba->sli_rev == LPFC_SLI_REV4) 9369 spin_unlock(&pring->ring_lock); 9370 spin_unlock_irq(&phba->hbalock); 9371 9372 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9373 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9374 "0127 ELS timeout Data: x%x x%x x%x " 9375 "x%x\n", els_command, 9376 remote_ID, ulp_command, iotag); 9377 9378 spin_lock_irq(&phba->hbalock); 9379 list_del_init(&piocb->dlist); 9380 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9381 spin_unlock_irq(&phba->hbalock); 9382 } 9383 9384 /* Make sure HBA is alive */ 9385 lpfc_issue_hb_tmo(phba); 9386 9387 if (!list_empty(&pring->txcmplq)) 9388 if (!(phba->pport->load_flag & FC_UNLOADING)) 9389 mod_timer(&vport->els_tmofunc, 9390 jiffies + msecs_to_jiffies(1000 * timeout)); 9391 } 9392 9393 /** 9394 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9395 * @vport: pointer to a host virtual N_Port data structure. 9396 * 9397 * This routine is used to clean up all the outstanding ELS commands on a 9398 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9399 * routine. After that, it walks the ELS transmit queue to remove all the 9400 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9401 * the IOCBs with a non-NULL completion callback function, the callback 9402 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9403 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9404 * callback function, the IOCB will simply be released. Finally, it walks 9405 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9406 * completion queue IOCB that is associated with the @vport and is not 9407 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9408 * part of the discovery state machine) out to HBA by invoking the 9409 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9410 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9411 * the IOCBs are aborted when this function returns. 9412 **/ 9413 void 9414 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9415 { 9416 LIST_HEAD(abort_list); 9417 struct lpfc_hba *phba = vport->phba; 9418 struct lpfc_sli_ring *pring; 9419 struct lpfc_iocbq *tmp_iocb, *piocb; 9420 u32 ulp_command; 9421 unsigned long iflags = 0; 9422 9423 lpfc_fabric_abort_vport(vport); 9424 9425 /* 9426 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9427 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9428 * ultimately grabs the ring_lock, the driver must splice the list into 9429 * a working list and release the locks before calling the abort. 9430 */ 9431 spin_lock_irqsave(&phba->hbalock, iflags); 9432 pring = lpfc_phba_elsring(phba); 9433 9434 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9435 if (unlikely(!pring)) { 9436 spin_unlock_irqrestore(&phba->hbalock, iflags); 9437 return; 9438 } 9439 9440 if (phba->sli_rev == LPFC_SLI_REV4) 9441 spin_lock(&pring->ring_lock); 9442 9443 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9444 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9445 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9446 continue; 9447 9448 if (piocb->vport != vport) 9449 continue; 9450 9451 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9452 continue; 9453 9454 /* On the ELS ring we can have ELS_REQUESTs or 9455 * GEN_REQUESTs waiting for a response. 9456 */ 9457 ulp_command = get_job_cmnd(phba, piocb); 9458 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9459 list_add_tail(&piocb->dlist, &abort_list); 9460 9461 /* If the link is down when flushing ELS commands 9462 * the firmware will not complete them till after 9463 * the link comes back up. This may confuse 9464 * discovery for the new link up, so we need to 9465 * change the compl routine to just clean up the iocb 9466 * and avoid any retry logic. 9467 */ 9468 if (phba->link_state == LPFC_LINK_DOWN) 9469 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9470 } 9471 if (ulp_command == CMD_GEN_REQUEST64_CR) 9472 list_add_tail(&piocb->dlist, &abort_list); 9473 } 9474 9475 if (phba->sli_rev == LPFC_SLI_REV4) 9476 spin_unlock(&pring->ring_lock); 9477 spin_unlock_irqrestore(&phba->hbalock, iflags); 9478 9479 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9480 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9481 spin_lock_irqsave(&phba->hbalock, iflags); 9482 list_del_init(&piocb->dlist); 9483 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9484 spin_unlock_irqrestore(&phba->hbalock, iflags); 9485 } 9486 /* Make sure HBA is alive */ 9487 lpfc_issue_hb_tmo(phba); 9488 9489 if (!list_empty(&abort_list)) 9490 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9491 "3387 abort list for txq not empty\n"); 9492 INIT_LIST_HEAD(&abort_list); 9493 9494 spin_lock_irqsave(&phba->hbalock, iflags); 9495 if (phba->sli_rev == LPFC_SLI_REV4) 9496 spin_lock(&pring->ring_lock); 9497 9498 /* No need to abort the txq list, 9499 * just queue them up for lpfc_sli_cancel_iocbs 9500 */ 9501 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9502 ulp_command = get_job_cmnd(phba, piocb); 9503 9504 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9505 continue; 9506 9507 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9508 if (ulp_command == CMD_QUE_RING_BUF_CN || 9509 ulp_command == CMD_QUE_RING_BUF64_CN || 9510 ulp_command == CMD_CLOSE_XRI_CN || 9511 ulp_command == CMD_ABORT_XRI_CN || 9512 ulp_command == CMD_ABORT_XRI_CX) 9513 continue; 9514 9515 if (piocb->vport != vport) 9516 continue; 9517 9518 list_del_init(&piocb->list); 9519 list_add_tail(&piocb->list, &abort_list); 9520 } 9521 9522 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9523 if (vport == phba->pport) { 9524 list_for_each_entry_safe(piocb, tmp_iocb, 9525 &phba->fabric_iocb_list, list) { 9526 list_del_init(&piocb->list); 9527 list_add_tail(&piocb->list, &abort_list); 9528 } 9529 } 9530 9531 if (phba->sli_rev == LPFC_SLI_REV4) 9532 spin_unlock(&pring->ring_lock); 9533 spin_unlock_irqrestore(&phba->hbalock, iflags); 9534 9535 /* Cancel all the IOCBs from the completions list */ 9536 lpfc_sli_cancel_iocbs(phba, &abort_list, 9537 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9538 9539 return; 9540 } 9541 9542 /** 9543 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9544 * @phba: pointer to lpfc hba data structure. 9545 * 9546 * This routine is used to clean up all the outstanding ELS commands on a 9547 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9548 * routine. After that, it walks the ELS transmit queue to remove all the 9549 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9550 * the IOCBs with the completion callback function associated, the callback 9551 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9552 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9553 * callback function associated, the IOCB will simply be released. Finally, 9554 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9555 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9556 * management plane IOCBs that are not part of the discovery state machine) 9557 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9558 **/ 9559 void 9560 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9561 { 9562 struct lpfc_vport *vport; 9563 9564 spin_lock_irq(&phba->port_list_lock); 9565 list_for_each_entry(vport, &phba->port_list, listentry) 9566 lpfc_els_flush_cmd(vport); 9567 spin_unlock_irq(&phba->port_list_lock); 9568 9569 return; 9570 } 9571 9572 /** 9573 * lpfc_send_els_failure_event - Posts an ELS command failure event 9574 * @phba: Pointer to hba context object. 9575 * @cmdiocbp: Pointer to command iocb which reported error. 9576 * @rspiocbp: Pointer to response iocb which reported error. 9577 * 9578 * This function sends an event when there is an ELS command 9579 * failure. 9580 **/ 9581 void 9582 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9583 struct lpfc_iocbq *cmdiocbp, 9584 struct lpfc_iocbq *rspiocbp) 9585 { 9586 struct lpfc_vport *vport = cmdiocbp->vport; 9587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9588 struct lpfc_lsrjt_event lsrjt_event; 9589 struct lpfc_fabric_event_header fabric_event; 9590 struct ls_rjt stat; 9591 struct lpfc_nodelist *ndlp; 9592 uint32_t *pcmd; 9593 u32 ulp_status, ulp_word4; 9594 9595 ndlp = cmdiocbp->ndlp; 9596 if (!ndlp) 9597 return; 9598 9599 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9600 ulp_word4 = get_job_word4(phba, rspiocbp); 9601 9602 if (ulp_status == IOSTAT_LS_RJT) { 9603 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9604 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9605 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9606 sizeof(struct lpfc_name)); 9607 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9608 sizeof(struct lpfc_name)); 9609 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9610 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9611 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9612 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9613 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9614 fc_host_post_vendor_event(shost, 9615 fc_get_event_number(), 9616 sizeof(lsrjt_event), 9617 (char *)&lsrjt_event, 9618 LPFC_NL_VENDOR_ID); 9619 return; 9620 } 9621 if (ulp_status == IOSTAT_NPORT_BSY || 9622 ulp_status == IOSTAT_FABRIC_BSY) { 9623 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9624 if (ulp_status == IOSTAT_NPORT_BSY) 9625 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9626 else 9627 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9628 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9629 sizeof(struct lpfc_name)); 9630 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9631 sizeof(struct lpfc_name)); 9632 fc_host_post_vendor_event(shost, 9633 fc_get_event_number(), 9634 sizeof(fabric_event), 9635 (char *)&fabric_event, 9636 LPFC_NL_VENDOR_ID); 9637 return; 9638 } 9639 9640 } 9641 9642 /** 9643 * lpfc_send_els_event - Posts unsolicited els event 9644 * @vport: Pointer to vport object. 9645 * @ndlp: Pointer FC node object. 9646 * @payload: ELS command code type. 9647 * 9648 * This function posts an event when there is an incoming 9649 * unsolicited ELS command. 9650 **/ 9651 static void 9652 lpfc_send_els_event(struct lpfc_vport *vport, 9653 struct lpfc_nodelist *ndlp, 9654 uint32_t *payload) 9655 { 9656 struct lpfc_els_event_header *els_data = NULL; 9657 struct lpfc_logo_event *logo_data = NULL; 9658 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9659 9660 if (*payload == ELS_CMD_LOGO) { 9661 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9662 if (!logo_data) { 9663 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9664 "0148 Failed to allocate memory " 9665 "for LOGO event\n"); 9666 return; 9667 } 9668 els_data = &logo_data->header; 9669 } else { 9670 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9671 GFP_KERNEL); 9672 if (!els_data) { 9673 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9674 "0149 Failed to allocate memory " 9675 "for ELS event\n"); 9676 return; 9677 } 9678 } 9679 els_data->event_type = FC_REG_ELS_EVENT; 9680 switch (*payload) { 9681 case ELS_CMD_PLOGI: 9682 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9683 break; 9684 case ELS_CMD_PRLO: 9685 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9686 break; 9687 case ELS_CMD_ADISC: 9688 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9689 break; 9690 case ELS_CMD_LOGO: 9691 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9692 /* Copy the WWPN in the LOGO payload */ 9693 memcpy(logo_data->logo_wwpn, &payload[2], 9694 sizeof(struct lpfc_name)); 9695 break; 9696 default: 9697 kfree(els_data); 9698 return; 9699 } 9700 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9701 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9702 if (*payload == ELS_CMD_LOGO) { 9703 fc_host_post_vendor_event(shost, 9704 fc_get_event_number(), 9705 sizeof(struct lpfc_logo_event), 9706 (char *)logo_data, 9707 LPFC_NL_VENDOR_ID); 9708 kfree(logo_data); 9709 } else { 9710 fc_host_post_vendor_event(shost, 9711 fc_get_event_number(), 9712 sizeof(struct lpfc_els_event_header), 9713 (char *)els_data, 9714 LPFC_NL_VENDOR_ID); 9715 kfree(els_data); 9716 } 9717 9718 return; 9719 } 9720 9721 9722 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9723 FC_FPIN_LI_EVT_TYPES_INIT); 9724 9725 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9726 FC_FPIN_DELI_EVT_TYPES_INIT); 9727 9728 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9729 FC_FPIN_CONGN_EVT_TYPES_INIT); 9730 9731 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9732 fc_fpin_congn_severity_types, 9733 FC_FPIN_CONGN_SEVERITY_INIT); 9734 9735 9736 /** 9737 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9738 * @phba: Pointer to phba object. 9739 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9740 * @cnt: count of WWPNs in FPIN payload 9741 * 9742 * This routine is called by LI and PC descriptors. 9743 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9744 */ 9745 static void 9746 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9747 { 9748 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9749 __be64 wwn; 9750 u64 wwpn; 9751 int i, len; 9752 int line = 0; 9753 int wcnt = 0; 9754 bool endit = false; 9755 9756 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9757 for (i = 0; i < cnt; i++) { 9758 /* Are we on the last WWPN */ 9759 if (i == (cnt - 1)) 9760 endit = true; 9761 9762 /* Extract the next WWPN from the payload */ 9763 wwn = *wwnlist++; 9764 wwpn = be64_to_cpu(wwn); 9765 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9766 " %016llx", wwpn); 9767 9768 /* Log a message if we are on the last WWPN 9769 * or if we hit the max allowed per message. 9770 */ 9771 wcnt++; 9772 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9773 buf[len] = 0; 9774 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9775 "4686 %s\n", buf); 9776 9777 /* Check if we reached the last WWPN */ 9778 if (endit) 9779 return; 9780 9781 /* Limit the number of log message displayed per FPIN */ 9782 line++; 9783 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9784 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9785 "4687 %d WWPNs Truncated\n", 9786 cnt - i - 1); 9787 return; 9788 } 9789 9790 /* Start over with next log message */ 9791 wcnt = 0; 9792 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9793 "Additional WWPNs:"); 9794 } 9795 } 9796 } 9797 9798 /** 9799 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9800 * @phba: Pointer to phba object. 9801 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9802 * 9803 * This function processes a Link Integrity FPIN event by logging a message. 9804 **/ 9805 static void 9806 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9807 { 9808 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9809 const char *li_evt_str; 9810 u32 li_evt, cnt; 9811 9812 li_evt = be16_to_cpu(li->event_type); 9813 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9814 cnt = be32_to_cpu(li->pname_count); 9815 9816 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9817 "4680 FPIN Link Integrity %s (x%x) " 9818 "Detecting PN x%016llx Attached PN x%016llx " 9819 "Duration %d mSecs Count %d Port Cnt %d\n", 9820 li_evt_str, li_evt, 9821 be64_to_cpu(li->detecting_wwpn), 9822 be64_to_cpu(li->attached_wwpn), 9823 be32_to_cpu(li->event_threshold), 9824 be32_to_cpu(li->event_count), cnt); 9825 9826 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9827 } 9828 9829 /** 9830 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9831 * @phba: Pointer to hba object. 9832 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9833 * 9834 * This function processes a Delivery FPIN event by logging a message. 9835 **/ 9836 static void 9837 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9838 { 9839 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9840 const char *del_rsn_str; 9841 u32 del_rsn; 9842 __be32 *frame; 9843 9844 del_rsn = be16_to_cpu(del->deli_reason_code); 9845 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9846 9847 /* Skip over desc_tag/desc_len header to payload */ 9848 frame = (__be32 *)(del + 1); 9849 9850 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9851 "4681 FPIN Delivery %s (x%x) " 9852 "Detecting PN x%016llx Attached PN x%016llx " 9853 "DiscHdr0 x%08x " 9854 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9855 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9856 del_rsn_str, del_rsn, 9857 be64_to_cpu(del->detecting_wwpn), 9858 be64_to_cpu(del->attached_wwpn), 9859 be32_to_cpu(frame[0]), 9860 be32_to_cpu(frame[1]), 9861 be32_to_cpu(frame[2]), 9862 be32_to_cpu(frame[3]), 9863 be32_to_cpu(frame[4]), 9864 be32_to_cpu(frame[5])); 9865 } 9866 9867 /** 9868 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9869 * @phba: Pointer to hba object. 9870 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9871 * 9872 * This function processes a Peer Congestion FPIN event by logging a message. 9873 **/ 9874 static void 9875 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9876 { 9877 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9878 const char *pc_evt_str; 9879 u32 pc_evt, cnt; 9880 9881 pc_evt = be16_to_cpu(pc->event_type); 9882 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9883 cnt = be32_to_cpu(pc->pname_count); 9884 9885 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9886 "4684 FPIN Peer Congestion %s (x%x) " 9887 "Duration %d mSecs " 9888 "Detecting PN x%016llx Attached PN x%016llx " 9889 "Impacted Port Cnt %d\n", 9890 pc_evt_str, pc_evt, 9891 be32_to_cpu(pc->event_period), 9892 be64_to_cpu(pc->detecting_wwpn), 9893 be64_to_cpu(pc->attached_wwpn), 9894 cnt); 9895 9896 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9897 } 9898 9899 /** 9900 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9901 * @phba: Pointer to hba object. 9902 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9903 * 9904 * This function processes an FPIN Congestion Notifiction. The notification 9905 * could be an Alarm or Warning. This routine feeds that data into driver's 9906 * running congestion algorithm. It also processes the FPIN by 9907 * logging a message. It returns 1 to indicate deliver this message 9908 * to the upper layer or 0 to indicate don't deliver it. 9909 **/ 9910 static int 9911 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9912 { 9913 struct lpfc_cgn_info *cp; 9914 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9915 const char *cgn_evt_str; 9916 u32 cgn_evt; 9917 const char *cgn_sev_str; 9918 u32 cgn_sev; 9919 uint16_t value; 9920 u32 crc; 9921 bool nm_log = false; 9922 int rc = 1; 9923 9924 cgn_evt = be16_to_cpu(cgn->event_type); 9925 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9926 cgn_sev = cgn->severity; 9927 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9928 9929 /* The driver only takes action on a Credit Stall or Oversubscription 9930 * event type to engage the IO algorithm. The driver prints an 9931 * unmaskable message only for Lost Credit and Credit Stall. 9932 * TODO: Still need to have definition of host action on clear, 9933 * lost credit and device specific event types. 9934 */ 9935 switch (cgn_evt) { 9936 case FPIN_CONGN_LOST_CREDIT: 9937 nm_log = true; 9938 break; 9939 case FPIN_CONGN_CREDIT_STALL: 9940 nm_log = true; 9941 fallthrough; 9942 case FPIN_CONGN_OVERSUBSCRIPTION: 9943 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9944 nm_log = false; 9945 switch (cgn_sev) { 9946 case FPIN_CONGN_SEVERITY_ERROR: 9947 /* Take action here for an Alarm event */ 9948 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9949 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9950 /* Track of alarm cnt for SYNC_WQE */ 9951 atomic_inc(&phba->cgn_sync_alarm_cnt); 9952 } 9953 /* Track alarm cnt for cgn_info regardless 9954 * of whether CMF is configured for Signals 9955 * or FPINs. 9956 */ 9957 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9958 goto cleanup; 9959 } 9960 break; 9961 case FPIN_CONGN_SEVERITY_WARNING: 9962 /* Take action here for a Warning event */ 9963 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9964 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9965 /* Track of warning cnt for SYNC_WQE */ 9966 atomic_inc(&phba->cgn_sync_warn_cnt); 9967 } 9968 /* Track warning cnt and freq for cgn_info 9969 * regardless of whether CMF is configured for 9970 * Signals or FPINs. 9971 */ 9972 atomic_inc(&phba->cgn_fabric_warn_cnt); 9973 cleanup: 9974 /* Save frequency in ms */ 9975 phba->cgn_fpin_frequency = 9976 be32_to_cpu(cgn->event_period); 9977 value = phba->cgn_fpin_frequency; 9978 if (phba->cgn_i) { 9979 cp = (struct lpfc_cgn_info *) 9980 phba->cgn_i->virt; 9981 cp->cgn_alarm_freq = 9982 cpu_to_le16(value); 9983 cp->cgn_warn_freq = 9984 cpu_to_le16(value); 9985 crc = lpfc_cgn_calc_crc32 9986 (cp, 9987 LPFC_CGN_INFO_SZ, 9988 LPFC_CGN_CRC32_SEED); 9989 cp->cgn_info_crc = cpu_to_le32(crc); 9990 } 9991 9992 /* Don't deliver to upper layer since 9993 * driver took action on this tlv. 9994 */ 9995 rc = 0; 9996 } 9997 break; 9998 } 9999 break; 10000 } 10001 10002 /* Change the log level to unmaskable for the following event types. */ 10003 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10004 LOG_CGN_MGMT | LOG_ELS, 10005 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10006 "Duration %d mSecs\n", 10007 cgn_sev_str, cgn_evt_str, cgn_evt, 10008 be32_to_cpu(cgn->event_period)); 10009 return rc; 10010 } 10011 10012 void 10013 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10014 { 10015 struct lpfc_hba *phba = vport->phba; 10016 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10017 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10018 const char *dtag_nm; 10019 int desc_cnt = 0, bytes_remain, cnt; 10020 u32 dtag, deliver = 0; 10021 int len; 10022 10023 /* FPINs handled only if we are in the right discovery state */ 10024 if (vport->port_state < LPFC_DISC_AUTH) 10025 return; 10026 10027 /* make sure there is the full fpin header */ 10028 if (fpin_length < sizeof(struct fc_els_fpin)) 10029 return; 10030 10031 /* Sanity check descriptor length. The desc_len value does not 10032 * include space for the ELS command and the desc_len fields. 10033 */ 10034 len = be32_to_cpu(fpin->desc_len); 10035 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10036 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10037 "4671 Bad ELS FPIN length %d: %d\n", 10038 len, fpin_length); 10039 return; 10040 } 10041 10042 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10043 first_tlv = tlv; 10044 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10045 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10046 10047 /* process each descriptor separately */ 10048 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10049 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10050 dtag = be32_to_cpu(tlv->desc_tag); 10051 switch (dtag) { 10052 case ELS_DTAG_LNK_INTEGRITY: 10053 lpfc_els_rcv_fpin_li(phba, tlv); 10054 deliver = 1; 10055 break; 10056 case ELS_DTAG_DELIVERY: 10057 lpfc_els_rcv_fpin_del(phba, tlv); 10058 deliver = 1; 10059 break; 10060 case ELS_DTAG_PEER_CONGEST: 10061 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10062 deliver = 1; 10063 break; 10064 case ELS_DTAG_CONGESTION: 10065 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10066 break; 10067 default: 10068 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10069 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10070 "4678 unknown FPIN descriptor[%d]: " 10071 "tag x%x (%s)\n", 10072 desc_cnt, dtag, dtag_nm); 10073 10074 /* If descriptor is bad, drop the rest of the data */ 10075 return; 10076 } 10077 lpfc_cgn_update_stat(phba, dtag); 10078 cnt = be32_to_cpu(tlv->desc_len); 10079 10080 /* Sanity check descriptor length. The desc_len value does not 10081 * include space for the desc_tag and the desc_len fields. 10082 */ 10083 len -= (cnt + sizeof(struct fc_tlv_desc)); 10084 if (len < 0) { 10085 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10086 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10087 "4672 Bad FPIN descriptor TLV length " 10088 "%d: %d %d %s\n", 10089 cnt, len, fpin_length, dtag_nm); 10090 return; 10091 } 10092 10093 current_tlv = tlv; 10094 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10095 tlv = fc_tlv_next_desc(tlv); 10096 10097 /* Format payload such that the FPIN delivered to the 10098 * upper layer is a single descriptor FPIN. 10099 */ 10100 if (desc_cnt) 10101 memcpy(first_tlv, current_tlv, 10102 (cnt + sizeof(struct fc_els_fpin))); 10103 10104 /* Adjust the length so that it only reflects a 10105 * single descriptor FPIN. 10106 */ 10107 fpin_length = cnt + sizeof(struct fc_els_fpin); 10108 fpin->desc_len = cpu_to_be32(fpin_length); 10109 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10110 10111 /* Send every descriptor individually to the upper layer */ 10112 if (deliver) 10113 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10114 fpin_length, (char *)fpin); 10115 desc_cnt++; 10116 } 10117 } 10118 10119 /** 10120 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10121 * @phba: pointer to lpfc hba data structure. 10122 * @pring: pointer to a SLI ring. 10123 * @vport: pointer to a host virtual N_Port data structure. 10124 * @elsiocb: pointer to lpfc els command iocb data structure. 10125 * 10126 * This routine is used for processing the IOCB associated with a unsolicited 10127 * event. It first determines whether there is an existing ndlp that matches 10128 * the DID from the unsolicited IOCB. If not, it will create a new one with 10129 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10130 * IOCB is then used to invoke the proper routine and to set up proper state 10131 * of the discovery state machine. 10132 **/ 10133 static void 10134 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10135 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10136 { 10137 struct lpfc_nodelist *ndlp; 10138 struct ls_rjt stat; 10139 u32 *payload, payload_len; 10140 u32 cmd = 0, did = 0, newnode, status = 0; 10141 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10142 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10143 LPFC_MBOXQ_t *mbox; 10144 10145 if (!vport || !elsiocb->cmd_dmabuf) 10146 goto dropit; 10147 10148 newnode = 0; 10149 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10150 payload = elsiocb->cmd_dmabuf->virt; 10151 if (phba->sli_rev == LPFC_SLI_REV4) 10152 payload_len = wcqe_cmpl->total_data_placed; 10153 else 10154 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10155 status = get_job_ulpstatus(phba, elsiocb); 10156 cmd = *payload; 10157 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10158 lpfc_sli3_post_buffer(phba, pring, 1); 10159 10160 did = get_job_els_rsp64_did(phba, elsiocb); 10161 if (status) { 10162 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10163 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10164 status, get_job_word4(phba, elsiocb), did); 10165 goto dropit; 10166 } 10167 10168 /* Check to see if link went down during discovery */ 10169 if (lpfc_els_chk_latt(vport)) 10170 goto dropit; 10171 10172 /* Ignore traffic received during vport shutdown. */ 10173 if (vport->load_flag & FC_UNLOADING) 10174 goto dropit; 10175 10176 /* If NPort discovery is delayed drop incoming ELS */ 10177 if ((vport->fc_flag & FC_DISC_DELAYED) && 10178 (cmd != ELS_CMD_PLOGI)) 10179 goto dropit; 10180 10181 ndlp = lpfc_findnode_did(vport, did); 10182 if (!ndlp) { 10183 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10184 ndlp = lpfc_nlp_init(vport, did); 10185 if (!ndlp) 10186 goto dropit; 10187 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10188 newnode = 1; 10189 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10190 ndlp->nlp_type |= NLP_FABRIC; 10191 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10192 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10193 newnode = 1; 10194 } 10195 10196 phba->fc_stat.elsRcvFrame++; 10197 10198 /* 10199 * Do not process any unsolicited ELS commands 10200 * if the ndlp is in DEV_LOSS 10201 */ 10202 spin_lock_irq(&ndlp->lock); 10203 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10204 spin_unlock_irq(&ndlp->lock); 10205 if (newnode) 10206 lpfc_nlp_put(ndlp); 10207 goto dropit; 10208 } 10209 spin_unlock_irq(&ndlp->lock); 10210 10211 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10212 if (!elsiocb->ndlp) 10213 goto dropit; 10214 elsiocb->vport = vport; 10215 10216 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10217 cmd &= ELS_CMD_MASK; 10218 } 10219 /* ELS command <elsCmd> received from NPORT <did> */ 10220 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10221 "0112 ELS command x%x received from NPORT x%x " 10222 "refcnt %d Data: x%x x%x x%x x%x\n", 10223 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10224 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10225 10226 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10227 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10228 (cmd != ELS_CMD_FLOGI) && 10229 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10230 rjt_err = LSRJT_LOGICAL_BSY; 10231 rjt_exp = LSEXP_NOTHING_MORE; 10232 goto lsrjt; 10233 } 10234 10235 switch (cmd) { 10236 case ELS_CMD_PLOGI: 10237 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10238 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10239 did, vport->port_state, ndlp->nlp_flag); 10240 10241 phba->fc_stat.elsRcvPLOGI++; 10242 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10243 if (phba->sli_rev == LPFC_SLI_REV4 && 10244 (phba->pport->fc_flag & FC_PT2PT)) { 10245 vport->fc_prevDID = vport->fc_myDID; 10246 /* Our DID needs to be updated before registering 10247 * the vfi. This is done in lpfc_rcv_plogi but 10248 * that is called after the reg_vfi. 10249 */ 10250 vport->fc_myDID = 10251 bf_get(els_rsp64_sid, 10252 &elsiocb->wqe.xmit_els_rsp); 10253 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10254 "3312 Remote port assigned DID x%x " 10255 "%x\n", vport->fc_myDID, 10256 vport->fc_prevDID); 10257 } 10258 10259 lpfc_send_els_event(vport, ndlp, payload); 10260 10261 /* If Nport discovery is delayed, reject PLOGIs */ 10262 if (vport->fc_flag & FC_DISC_DELAYED) { 10263 rjt_err = LSRJT_UNABLE_TPC; 10264 rjt_exp = LSEXP_NOTHING_MORE; 10265 break; 10266 } 10267 10268 if (vport->port_state < LPFC_DISC_AUTH) { 10269 if (!(phba->pport->fc_flag & FC_PT2PT) || 10270 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10271 rjt_err = LSRJT_UNABLE_TPC; 10272 rjt_exp = LSEXP_NOTHING_MORE; 10273 break; 10274 } 10275 } 10276 10277 spin_lock_irq(&ndlp->lock); 10278 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10279 spin_unlock_irq(&ndlp->lock); 10280 10281 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10282 NLP_EVT_RCV_PLOGI); 10283 10284 break; 10285 case ELS_CMD_FLOGI: 10286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10287 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10288 did, vport->port_state, ndlp->nlp_flag); 10289 10290 phba->fc_stat.elsRcvFLOGI++; 10291 10292 /* If the driver believes fabric discovery is done and is ready, 10293 * bounce the link. There is some descrepancy. 10294 */ 10295 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10296 vport->fc_flag & FC_PT2PT && 10297 vport->rcv_flogi_cnt >= 1) { 10298 rjt_err = LSRJT_LOGICAL_BSY; 10299 rjt_exp = LSEXP_NOTHING_MORE; 10300 init_link++; 10301 goto lsrjt; 10302 } 10303 10304 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10305 /* retain node if our response is deferred */ 10306 if (phba->defer_flogi_acc_flag) 10307 break; 10308 if (newnode) 10309 lpfc_disc_state_machine(vport, ndlp, NULL, 10310 NLP_EVT_DEVICE_RM); 10311 break; 10312 case ELS_CMD_LOGO: 10313 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10314 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10315 did, vport->port_state, ndlp->nlp_flag); 10316 10317 phba->fc_stat.elsRcvLOGO++; 10318 lpfc_send_els_event(vport, ndlp, payload); 10319 if (vport->port_state < LPFC_DISC_AUTH) { 10320 rjt_err = LSRJT_UNABLE_TPC; 10321 rjt_exp = LSEXP_NOTHING_MORE; 10322 break; 10323 } 10324 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10325 if (newnode) 10326 lpfc_disc_state_machine(vport, ndlp, NULL, 10327 NLP_EVT_DEVICE_RM); 10328 break; 10329 case ELS_CMD_PRLO: 10330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10331 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10332 did, vport->port_state, ndlp->nlp_flag); 10333 10334 phba->fc_stat.elsRcvPRLO++; 10335 lpfc_send_els_event(vport, ndlp, payload); 10336 if (vport->port_state < LPFC_DISC_AUTH) { 10337 rjt_err = LSRJT_UNABLE_TPC; 10338 rjt_exp = LSEXP_NOTHING_MORE; 10339 break; 10340 } 10341 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10342 break; 10343 case ELS_CMD_LCB: 10344 phba->fc_stat.elsRcvLCB++; 10345 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10346 break; 10347 case ELS_CMD_RDP: 10348 phba->fc_stat.elsRcvRDP++; 10349 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10350 break; 10351 case ELS_CMD_RSCN: 10352 phba->fc_stat.elsRcvRSCN++; 10353 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10354 if (newnode) 10355 lpfc_disc_state_machine(vport, ndlp, NULL, 10356 NLP_EVT_DEVICE_RM); 10357 break; 10358 case ELS_CMD_ADISC: 10359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10360 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10361 did, vport->port_state, ndlp->nlp_flag); 10362 10363 lpfc_send_els_event(vport, ndlp, payload); 10364 phba->fc_stat.elsRcvADISC++; 10365 if (vport->port_state < LPFC_DISC_AUTH) { 10366 rjt_err = LSRJT_UNABLE_TPC; 10367 rjt_exp = LSEXP_NOTHING_MORE; 10368 break; 10369 } 10370 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10371 NLP_EVT_RCV_ADISC); 10372 break; 10373 case ELS_CMD_PDISC: 10374 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10375 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10376 did, vport->port_state, ndlp->nlp_flag); 10377 10378 phba->fc_stat.elsRcvPDISC++; 10379 if (vport->port_state < LPFC_DISC_AUTH) { 10380 rjt_err = LSRJT_UNABLE_TPC; 10381 rjt_exp = LSEXP_NOTHING_MORE; 10382 break; 10383 } 10384 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10385 NLP_EVT_RCV_PDISC); 10386 break; 10387 case ELS_CMD_FARPR: 10388 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10389 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10390 did, vport->port_state, ndlp->nlp_flag); 10391 10392 phba->fc_stat.elsRcvFARPR++; 10393 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10394 break; 10395 case ELS_CMD_FARP: 10396 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10397 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10398 did, vport->port_state, ndlp->nlp_flag); 10399 10400 phba->fc_stat.elsRcvFARP++; 10401 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10402 break; 10403 case ELS_CMD_FAN: 10404 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10405 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10406 did, vport->port_state, ndlp->nlp_flag); 10407 10408 phba->fc_stat.elsRcvFAN++; 10409 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10410 break; 10411 case ELS_CMD_PRLI: 10412 case ELS_CMD_NVMEPRLI: 10413 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10414 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10415 did, vport->port_state, ndlp->nlp_flag); 10416 10417 phba->fc_stat.elsRcvPRLI++; 10418 if ((vport->port_state < LPFC_DISC_AUTH) && 10419 (vport->fc_flag & FC_FABRIC)) { 10420 rjt_err = LSRJT_UNABLE_TPC; 10421 rjt_exp = LSEXP_NOTHING_MORE; 10422 break; 10423 } 10424 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10425 break; 10426 case ELS_CMD_LIRR: 10427 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10428 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10429 did, vport->port_state, ndlp->nlp_flag); 10430 10431 phba->fc_stat.elsRcvLIRR++; 10432 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10433 if (newnode) 10434 lpfc_disc_state_machine(vport, ndlp, NULL, 10435 NLP_EVT_DEVICE_RM); 10436 break; 10437 case ELS_CMD_RLS: 10438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10439 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10440 did, vport->port_state, ndlp->nlp_flag); 10441 10442 phba->fc_stat.elsRcvRLS++; 10443 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10444 if (newnode) 10445 lpfc_disc_state_machine(vport, ndlp, NULL, 10446 NLP_EVT_DEVICE_RM); 10447 break; 10448 case ELS_CMD_RPL: 10449 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10450 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10451 did, vport->port_state, ndlp->nlp_flag); 10452 10453 phba->fc_stat.elsRcvRPL++; 10454 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10455 if (newnode) 10456 lpfc_disc_state_machine(vport, ndlp, NULL, 10457 NLP_EVT_DEVICE_RM); 10458 break; 10459 case ELS_CMD_RNID: 10460 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10461 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10462 did, vport->port_state, ndlp->nlp_flag); 10463 10464 phba->fc_stat.elsRcvRNID++; 10465 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10466 if (newnode) 10467 lpfc_disc_state_machine(vport, ndlp, NULL, 10468 NLP_EVT_DEVICE_RM); 10469 break; 10470 case ELS_CMD_RTV: 10471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10472 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10473 did, vport->port_state, ndlp->nlp_flag); 10474 phba->fc_stat.elsRcvRTV++; 10475 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10476 if (newnode) 10477 lpfc_disc_state_machine(vport, ndlp, NULL, 10478 NLP_EVT_DEVICE_RM); 10479 break; 10480 case ELS_CMD_RRQ: 10481 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10482 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10483 did, vport->port_state, ndlp->nlp_flag); 10484 10485 phba->fc_stat.elsRcvRRQ++; 10486 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10487 if (newnode) 10488 lpfc_disc_state_machine(vport, ndlp, NULL, 10489 NLP_EVT_DEVICE_RM); 10490 break; 10491 case ELS_CMD_ECHO: 10492 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10493 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10494 did, vport->port_state, ndlp->nlp_flag); 10495 10496 phba->fc_stat.elsRcvECHO++; 10497 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10498 if (newnode) 10499 lpfc_disc_state_machine(vport, ndlp, NULL, 10500 NLP_EVT_DEVICE_RM); 10501 break; 10502 case ELS_CMD_REC: 10503 /* receive this due to exchange closed */ 10504 rjt_err = LSRJT_UNABLE_TPC; 10505 rjt_exp = LSEXP_INVALID_OX_RX; 10506 break; 10507 case ELS_CMD_FPIN: 10508 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10509 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10510 did, vport->port_state, ndlp->nlp_flag); 10511 10512 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10513 payload_len); 10514 10515 /* There are no replies, so no rjt codes */ 10516 break; 10517 case ELS_CMD_EDC: 10518 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10519 break; 10520 case ELS_CMD_RDF: 10521 phba->fc_stat.elsRcvRDF++; 10522 /* Accept RDF only from fabric controller */ 10523 if (did != Fabric_Cntl_DID) { 10524 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10525 "1115 Received RDF from invalid DID " 10526 "x%x\n", did); 10527 rjt_err = LSRJT_PROTOCOL_ERR; 10528 rjt_exp = LSEXP_NOTHING_MORE; 10529 goto lsrjt; 10530 } 10531 10532 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10533 break; 10534 default: 10535 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10536 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10537 cmd, did, vport->port_state); 10538 10539 /* Unsupported ELS command, reject */ 10540 rjt_err = LSRJT_CMD_UNSUPPORTED; 10541 rjt_exp = LSEXP_NOTHING_MORE; 10542 10543 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10544 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10545 "0115 Unknown ELS command x%x " 10546 "received from NPORT x%x\n", cmd, did); 10547 if (newnode) 10548 lpfc_disc_state_machine(vport, ndlp, NULL, 10549 NLP_EVT_DEVICE_RM); 10550 break; 10551 } 10552 10553 lsrjt: 10554 /* check if need to LS_RJT received ELS cmd */ 10555 if (rjt_err) { 10556 memset(&stat, 0, sizeof(stat)); 10557 stat.un.b.lsRjtRsnCode = rjt_err; 10558 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10559 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10560 NULL); 10561 /* Remove the reference from above for new nodes. */ 10562 if (newnode) 10563 lpfc_disc_state_machine(vport, ndlp, NULL, 10564 NLP_EVT_DEVICE_RM); 10565 } 10566 10567 /* Release the reference on this elsiocb, not the ndlp. */ 10568 lpfc_nlp_put(elsiocb->ndlp); 10569 elsiocb->ndlp = NULL; 10570 10571 /* Special case. Driver received an unsolicited command that 10572 * unsupportable given the driver's current state. Reset the 10573 * link and start over. 10574 */ 10575 if (init_link) { 10576 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10577 if (!mbox) 10578 return; 10579 lpfc_linkdown(phba); 10580 lpfc_init_link(phba, mbox, 10581 phba->cfg_topology, 10582 phba->cfg_link_speed); 10583 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10584 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10585 mbox->vport = vport; 10586 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10587 MBX_NOT_FINISHED) 10588 mempool_free(mbox, phba->mbox_mem_pool); 10589 } 10590 10591 return; 10592 10593 dropit: 10594 if (vport && !(vport->load_flag & FC_UNLOADING)) 10595 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10596 "0111 Dropping received ELS cmd " 10597 "Data: x%x x%x x%x x%x\n", 10598 cmd, status, get_job_word4(phba, elsiocb), did); 10599 10600 phba->fc_stat.elsRcvDrop++; 10601 } 10602 10603 /** 10604 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10605 * @phba: pointer to lpfc hba data structure. 10606 * @pring: pointer to a SLI ring. 10607 * @elsiocb: pointer to lpfc els iocb data structure. 10608 * 10609 * This routine is used to process an unsolicited event received from a SLI 10610 * (Service Level Interface) ring. The actual processing of the data buffer 10611 * associated with the unsolicited event is done by invoking the routine 10612 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10613 * SLI ring on which the unsolicited event was received. 10614 **/ 10615 void 10616 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10617 struct lpfc_iocbq *elsiocb) 10618 { 10619 struct lpfc_vport *vport = elsiocb->vport; 10620 u32 ulp_command, status, parameter, bde_count = 0; 10621 IOCB_t *icmd; 10622 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10623 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10624 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10625 dma_addr_t paddr; 10626 10627 elsiocb->cmd_dmabuf = NULL; 10628 elsiocb->rsp_dmabuf = NULL; 10629 elsiocb->bpl_dmabuf = NULL; 10630 10631 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10632 ulp_command = get_job_cmnd(phba, elsiocb); 10633 status = get_job_ulpstatus(phba, elsiocb); 10634 parameter = get_job_word4(phba, elsiocb); 10635 if (phba->sli_rev == LPFC_SLI_REV4) 10636 bde_count = wcqe_cmpl->word3; 10637 else 10638 bde_count = elsiocb->iocb.ulpBdeCount; 10639 10640 if (status == IOSTAT_NEED_BUFFER) { 10641 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10642 } else if (status == IOSTAT_LOCAL_REJECT && 10643 (parameter & IOERR_PARAM_MASK) == 10644 IOERR_RCV_BUFFER_WAITING) { 10645 phba->fc_stat.NoRcvBuf++; 10646 /* Not enough posted buffers; Try posting more buffers */ 10647 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10648 lpfc_sli3_post_buffer(phba, pring, 0); 10649 return; 10650 } 10651 10652 if (phba->sli_rev == LPFC_SLI_REV3) { 10653 icmd = &elsiocb->iocb; 10654 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10655 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10656 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10657 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10658 vport = phba->pport; 10659 else 10660 vport = lpfc_find_vport_by_vpid(phba, 10661 icmd->unsli3.rcvsli3.vpi); 10662 } 10663 } 10664 10665 /* If there are no BDEs associated 10666 * with this IOCB, there is nothing to do. 10667 */ 10668 if (bde_count == 0) 10669 return; 10670 10671 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10672 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10673 elsiocb->cmd_dmabuf = bdeBuf1; 10674 if (bde_count == 2) 10675 elsiocb->bpl_dmabuf = bdeBuf2; 10676 } else { 10677 icmd = &elsiocb->iocb; 10678 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10679 icmd->un.cont64[0].addrLow); 10680 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10681 paddr); 10682 if (bde_count == 2) { 10683 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10684 icmd->un.cont64[1].addrLow); 10685 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10686 pring, 10687 paddr); 10688 } 10689 } 10690 10691 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10692 /* 10693 * The different unsolicited event handlers would tell us 10694 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10695 */ 10696 if (elsiocb->cmd_dmabuf) { 10697 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10698 elsiocb->cmd_dmabuf = NULL; 10699 } 10700 10701 if (elsiocb->bpl_dmabuf) { 10702 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10703 elsiocb->bpl_dmabuf = NULL; 10704 } 10705 10706 } 10707 10708 static void 10709 lpfc_start_fdmi(struct lpfc_vport *vport) 10710 { 10711 struct lpfc_nodelist *ndlp; 10712 10713 /* If this is the first time, allocate an ndlp and initialize 10714 * it. Otherwise, make sure the node is enabled and then do the 10715 * login. 10716 */ 10717 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10718 if (!ndlp) { 10719 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10720 if (ndlp) { 10721 ndlp->nlp_type |= NLP_FABRIC; 10722 } else { 10723 return; 10724 } 10725 } 10726 10727 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10728 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10729 } 10730 10731 /** 10732 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10733 * @phba: pointer to lpfc hba data structure. 10734 * @vport: pointer to a virtual N_Port data structure. 10735 * 10736 * This routine issues a Port Login (PLOGI) to the Name Server with 10737 * State Change Request (SCR) for a @vport. This routine will create an 10738 * ndlp for the Name Server associated to the @vport if such node does 10739 * not already exist. The PLOGI to Name Server is issued by invoking the 10740 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10741 * (FDMI) is configured to the @vport, a FDMI node will be created and 10742 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10743 **/ 10744 void 10745 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10746 { 10747 struct lpfc_nodelist *ndlp; 10748 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10749 10750 /* 10751 * If lpfc_delay_discovery parameter is set and the clean address 10752 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10753 * discovery. 10754 */ 10755 spin_lock_irq(shost->host_lock); 10756 if (vport->fc_flag & FC_DISC_DELAYED) { 10757 spin_unlock_irq(shost->host_lock); 10758 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10759 "3334 Delay fc port discovery for %d secs\n", 10760 phba->fc_ratov); 10761 mod_timer(&vport->delayed_disc_tmo, 10762 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10763 return; 10764 } 10765 spin_unlock_irq(shost->host_lock); 10766 10767 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10768 if (!ndlp) { 10769 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10770 if (!ndlp) { 10771 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10772 lpfc_disc_start(vport); 10773 return; 10774 } 10775 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10776 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10777 "0251 NameServer login: no memory\n"); 10778 return; 10779 } 10780 } 10781 10782 ndlp->nlp_type |= NLP_FABRIC; 10783 10784 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10785 10786 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10787 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10788 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10789 "0252 Cannot issue NameServer login\n"); 10790 return; 10791 } 10792 10793 if ((phba->cfg_enable_SmartSAN || 10794 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10795 (vport->load_flag & FC_ALLOW_FDMI)) 10796 lpfc_start_fdmi(vport); 10797 } 10798 10799 /** 10800 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10801 * @phba: pointer to lpfc hba data structure. 10802 * @pmb: pointer to the driver internal queue element for mailbox command. 10803 * 10804 * This routine is the completion callback function to register new vport 10805 * mailbox command. If the new vport mailbox command completes successfully, 10806 * the fabric registration login shall be performed on physical port (the 10807 * new vport created is actually a physical port, with VPI 0) or the port 10808 * login to Name Server for State Change Request (SCR) will be performed 10809 * on virtual port (real virtual port, with VPI greater than 0). 10810 **/ 10811 static void 10812 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10813 { 10814 struct lpfc_vport *vport = pmb->vport; 10815 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10816 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10817 MAILBOX_t *mb = &pmb->u.mb; 10818 int rc; 10819 10820 spin_lock_irq(shost->host_lock); 10821 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10822 spin_unlock_irq(shost->host_lock); 10823 10824 if (mb->mbxStatus) { 10825 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10826 "0915 Register VPI failed : Status: x%x" 10827 " upd bit: x%x \n", mb->mbxStatus, 10828 mb->un.varRegVpi.upd); 10829 if (phba->sli_rev == LPFC_SLI_REV4 && 10830 mb->un.varRegVpi.upd) 10831 goto mbox_err_exit ; 10832 10833 switch (mb->mbxStatus) { 10834 case 0x11: /* unsupported feature */ 10835 case 0x9603: /* max_vpi exceeded */ 10836 case 0x9602: /* Link event since CLEAR_LA */ 10837 /* giving up on vport registration */ 10838 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10839 spin_lock_irq(shost->host_lock); 10840 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10841 spin_unlock_irq(shost->host_lock); 10842 lpfc_can_disctmo(vport); 10843 break; 10844 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10845 case 0x20: 10846 spin_lock_irq(shost->host_lock); 10847 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10848 spin_unlock_irq(shost->host_lock); 10849 lpfc_init_vpi(phba, pmb, vport->vpi); 10850 pmb->vport = vport; 10851 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10852 rc = lpfc_sli_issue_mbox(phba, pmb, 10853 MBX_NOWAIT); 10854 if (rc == MBX_NOT_FINISHED) { 10855 lpfc_printf_vlog(vport, KERN_ERR, 10856 LOG_TRACE_EVENT, 10857 "2732 Failed to issue INIT_VPI" 10858 " mailbox command\n"); 10859 } else { 10860 lpfc_nlp_put(ndlp); 10861 return; 10862 } 10863 fallthrough; 10864 default: 10865 /* Try to recover from this error */ 10866 if (phba->sli_rev == LPFC_SLI_REV4) 10867 lpfc_sli4_unreg_all_rpis(vport); 10868 lpfc_mbx_unreg_vpi(vport); 10869 spin_lock_irq(shost->host_lock); 10870 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10871 spin_unlock_irq(shost->host_lock); 10872 if (mb->mbxStatus == MBX_NOT_FINISHED) 10873 break; 10874 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10875 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10876 if (phba->sli_rev == LPFC_SLI_REV4) 10877 lpfc_issue_init_vfi(vport); 10878 else 10879 lpfc_initial_flogi(vport); 10880 } else { 10881 lpfc_initial_fdisc(vport); 10882 } 10883 break; 10884 } 10885 } else { 10886 spin_lock_irq(shost->host_lock); 10887 vport->vpi_state |= LPFC_VPI_REGISTERED; 10888 spin_unlock_irq(shost->host_lock); 10889 if (vport == phba->pport) { 10890 if (phba->sli_rev < LPFC_SLI_REV4) 10891 lpfc_issue_fabric_reglogin(vport); 10892 else { 10893 /* 10894 * If the physical port is instantiated using 10895 * FDISC, do not start vport discovery. 10896 */ 10897 if (vport->port_state != LPFC_FDISC) 10898 lpfc_start_fdiscs(phba); 10899 lpfc_do_scr_ns_plogi(phba, vport); 10900 } 10901 } else { 10902 lpfc_do_scr_ns_plogi(phba, vport); 10903 } 10904 } 10905 mbox_err_exit: 10906 /* Now, we decrement the ndlp reference count held for this 10907 * callback function 10908 */ 10909 lpfc_nlp_put(ndlp); 10910 10911 mempool_free(pmb, phba->mbox_mem_pool); 10912 return; 10913 } 10914 10915 /** 10916 * lpfc_register_new_vport - Register a new vport with a HBA 10917 * @phba: pointer to lpfc hba data structure. 10918 * @vport: pointer to a host virtual N_Port data structure. 10919 * @ndlp: pointer to a node-list data structure. 10920 * 10921 * This routine registers the @vport as a new virtual port with a HBA. 10922 * It is done through a registering vpi mailbox command. 10923 **/ 10924 void 10925 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10926 struct lpfc_nodelist *ndlp) 10927 { 10928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10929 LPFC_MBOXQ_t *mbox; 10930 10931 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10932 if (mbox) { 10933 lpfc_reg_vpi(vport, mbox); 10934 mbox->vport = vport; 10935 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10936 if (!mbox->ctx_ndlp) { 10937 mempool_free(mbox, phba->mbox_mem_pool); 10938 goto mbox_err_exit; 10939 } 10940 10941 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10942 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10943 == MBX_NOT_FINISHED) { 10944 /* mailbox command not success, decrement ndlp 10945 * reference count for this command 10946 */ 10947 lpfc_nlp_put(ndlp); 10948 mempool_free(mbox, phba->mbox_mem_pool); 10949 10950 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10951 "0253 Register VPI: Can't send mbox\n"); 10952 goto mbox_err_exit; 10953 } 10954 } else { 10955 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10956 "0254 Register VPI: no memory\n"); 10957 goto mbox_err_exit; 10958 } 10959 return; 10960 10961 mbox_err_exit: 10962 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10963 spin_lock_irq(shost->host_lock); 10964 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10965 spin_unlock_irq(shost->host_lock); 10966 return; 10967 } 10968 10969 /** 10970 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10971 * @phba: pointer to lpfc hba data structure. 10972 * 10973 * This routine cancels the retry delay timers to all the vports. 10974 **/ 10975 void 10976 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10977 { 10978 struct lpfc_vport **vports; 10979 struct lpfc_nodelist *ndlp; 10980 uint32_t link_state; 10981 int i; 10982 10983 /* Treat this failure as linkdown for all vports */ 10984 link_state = phba->link_state; 10985 lpfc_linkdown(phba); 10986 phba->link_state = link_state; 10987 10988 vports = lpfc_create_vport_work_array(phba); 10989 10990 if (vports) { 10991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10992 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10993 if (ndlp) 10994 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10995 lpfc_els_flush_cmd(vports[i]); 10996 } 10997 lpfc_destroy_vport_work_array(phba, vports); 10998 } 10999 } 11000 11001 /** 11002 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11003 * @phba: pointer to lpfc hba data structure. 11004 * 11005 * This routine abort all pending discovery commands and 11006 * start a timer to retry FLOGI for the physical port 11007 * discovery. 11008 **/ 11009 void 11010 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11011 { 11012 struct lpfc_nodelist *ndlp; 11013 11014 /* Cancel the all vports retry delay retry timers */ 11015 lpfc_cancel_all_vport_retry_delay_timer(phba); 11016 11017 /* If fabric require FLOGI, then re-instantiate physical login */ 11018 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11019 if (!ndlp) 11020 return; 11021 11022 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11023 spin_lock_irq(&ndlp->lock); 11024 ndlp->nlp_flag |= NLP_DELAY_TMO; 11025 spin_unlock_irq(&ndlp->lock); 11026 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11027 phba->pport->port_state = LPFC_FLOGI; 11028 return; 11029 } 11030 11031 /** 11032 * lpfc_fabric_login_reqd - Check if FLOGI required. 11033 * @phba: pointer to lpfc hba data structure. 11034 * @cmdiocb: pointer to FDISC command iocb. 11035 * @rspiocb: pointer to FDISC response iocb. 11036 * 11037 * This routine checks if a FLOGI is reguired for FDISC 11038 * to succeed. 11039 **/ 11040 static int 11041 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11042 struct lpfc_iocbq *cmdiocb, 11043 struct lpfc_iocbq *rspiocb) 11044 { 11045 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11046 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11047 11048 if (ulp_status != IOSTAT_FABRIC_RJT || 11049 ulp_word4 != RJT_LOGIN_REQUIRED) 11050 return 0; 11051 else 11052 return 1; 11053 } 11054 11055 /** 11056 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11057 * @phba: pointer to lpfc hba data structure. 11058 * @cmdiocb: pointer to lpfc command iocb data structure. 11059 * @rspiocb: pointer to lpfc response iocb data structure. 11060 * 11061 * This routine is the completion callback function to a Fabric Discover 11062 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11063 * single threaded, each FDISC completion callback function will reset 11064 * the discovery timer for all vports such that the timers will not get 11065 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11066 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11067 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11068 * assigned to the vport has been changed with the completion of the FDISC 11069 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11070 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11071 * routine is invoked to register new vport with the HBA. Otherwise, the 11072 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11073 * Server for State Change Request (SCR). 11074 **/ 11075 static void 11076 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11077 struct lpfc_iocbq *rspiocb) 11078 { 11079 struct lpfc_vport *vport = cmdiocb->vport; 11080 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11081 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11082 struct lpfc_nodelist *np; 11083 struct lpfc_nodelist *next_np; 11084 struct lpfc_iocbq *piocb; 11085 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11086 struct serv_parm *sp; 11087 uint8_t fabric_param_changed; 11088 u32 ulp_status, ulp_word4; 11089 11090 ulp_status = get_job_ulpstatus(phba, rspiocb); 11091 ulp_word4 = get_job_word4(phba, rspiocb); 11092 11093 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11094 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11095 ulp_status, ulp_word4, 11096 vport->fc_prevDID); 11097 /* Since all FDISCs are being single threaded, we 11098 * must reset the discovery timer for ALL vports 11099 * waiting to send FDISC when one completes. 11100 */ 11101 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11102 lpfc_set_disctmo(piocb->vport); 11103 } 11104 11105 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11106 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11107 ulp_status, ulp_word4, vport->fc_prevDID); 11108 11109 if (ulp_status) { 11110 11111 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11112 lpfc_retry_pport_discovery(phba); 11113 goto out; 11114 } 11115 11116 /* Check for retry */ 11117 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11118 goto out; 11119 /* FDISC failed */ 11120 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11121 "0126 FDISC failed. (x%x/x%x)\n", 11122 ulp_status, ulp_word4); 11123 goto fdisc_failed; 11124 } 11125 11126 lpfc_check_nlp_post_devloss(vport, ndlp); 11127 11128 spin_lock_irq(shost->host_lock); 11129 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11130 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11131 vport->fc_flag |= FC_FABRIC; 11132 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11133 vport->fc_flag |= FC_PUBLIC_LOOP; 11134 spin_unlock_irq(shost->host_lock); 11135 11136 vport->fc_myDID = ulp_word4 & Mask_DID; 11137 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11138 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11139 if (!prsp) 11140 goto out; 11141 sp = prsp->virt + sizeof(uint32_t); 11142 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11143 memcpy(&vport->fabric_portname, &sp->portName, 11144 sizeof(struct lpfc_name)); 11145 memcpy(&vport->fabric_nodename, &sp->nodeName, 11146 sizeof(struct lpfc_name)); 11147 if (fabric_param_changed && 11148 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11149 /* If our NportID changed, we need to ensure all 11150 * remaining NPORTs get unreg_login'ed so we can 11151 * issue unreg_vpi. 11152 */ 11153 list_for_each_entry_safe(np, next_np, 11154 &vport->fc_nodes, nlp_listp) { 11155 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11156 !(np->nlp_flag & NLP_NPR_ADISC)) 11157 continue; 11158 spin_lock_irq(&ndlp->lock); 11159 np->nlp_flag &= ~NLP_NPR_ADISC; 11160 spin_unlock_irq(&ndlp->lock); 11161 lpfc_unreg_rpi(vport, np); 11162 } 11163 lpfc_cleanup_pending_mbox(vport); 11164 11165 if (phba->sli_rev == LPFC_SLI_REV4) 11166 lpfc_sli4_unreg_all_rpis(vport); 11167 11168 lpfc_mbx_unreg_vpi(vport); 11169 spin_lock_irq(shost->host_lock); 11170 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11171 if (phba->sli_rev == LPFC_SLI_REV4) 11172 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11173 else 11174 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11175 spin_unlock_irq(shost->host_lock); 11176 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11177 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11178 /* 11179 * Driver needs to re-reg VPI in order for f/w 11180 * to update the MAC address. 11181 */ 11182 lpfc_register_new_vport(phba, vport, ndlp); 11183 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11184 goto out; 11185 } 11186 11187 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11188 lpfc_issue_init_vpi(vport); 11189 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11190 lpfc_register_new_vport(phba, vport, ndlp); 11191 else 11192 lpfc_do_scr_ns_plogi(phba, vport); 11193 11194 /* The FDISC completed successfully. Move the fabric ndlp to 11195 * UNMAPPED state and register with the transport. 11196 */ 11197 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11198 goto out; 11199 11200 fdisc_failed: 11201 if (vport->fc_vport && 11202 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11203 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11204 /* Cancel discovery timer */ 11205 lpfc_can_disctmo(vport); 11206 out: 11207 lpfc_els_free_iocb(phba, cmdiocb); 11208 lpfc_nlp_put(ndlp); 11209 } 11210 11211 /** 11212 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11213 * @vport: pointer to a virtual N_Port data structure. 11214 * @ndlp: pointer to a node-list data structure. 11215 * @retry: number of retries to the command IOCB. 11216 * 11217 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11218 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11219 * routine to issue the IOCB, which makes sure only one outstanding fabric 11220 * IOCB will be sent off HBA at any given time. 11221 * 11222 * Note that the ndlp reference count will be incremented by 1 for holding the 11223 * ndlp and the reference to ndlp will be stored into the ndlp field of 11224 * the IOCB for the completion callback function to the FDISC ELS command. 11225 * 11226 * Return code 11227 * 0 - Successfully issued fdisc iocb command 11228 * 1 - Failed to issue fdisc iocb command 11229 **/ 11230 static int 11231 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11232 uint8_t retry) 11233 { 11234 struct lpfc_hba *phba = vport->phba; 11235 IOCB_t *icmd; 11236 union lpfc_wqe128 *wqe = NULL; 11237 struct lpfc_iocbq *elsiocb; 11238 struct serv_parm *sp; 11239 uint8_t *pcmd; 11240 uint16_t cmdsize; 11241 int did = ndlp->nlp_DID; 11242 int rc; 11243 11244 vport->port_state = LPFC_FDISC; 11245 vport->fc_myDID = 0; 11246 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11247 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11248 ELS_CMD_FDISC); 11249 if (!elsiocb) { 11250 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11251 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11252 "0255 Issue FDISC: no IOCB\n"); 11253 return 1; 11254 } 11255 11256 if (phba->sli_rev == LPFC_SLI_REV4) { 11257 wqe = &elsiocb->wqe; 11258 bf_set(els_req64_sid, &wqe->els_req, 0); 11259 bf_set(els_req64_sp, &wqe->els_req, 1); 11260 } else { 11261 icmd = &elsiocb->iocb; 11262 icmd->un.elsreq64.myID = 0; 11263 icmd->un.elsreq64.fl = 1; 11264 icmd->ulpCt_h = 1; 11265 icmd->ulpCt_l = 0; 11266 } 11267 11268 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11269 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11270 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11271 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11272 sp = (struct serv_parm *) pcmd; 11273 /* Setup CSPs accordingly for Fabric */ 11274 sp->cmn.e_d_tov = 0; 11275 sp->cmn.w2.r_a_tov = 0; 11276 sp->cmn.virtual_fabric_support = 0; 11277 sp->cls1.classValid = 0; 11278 sp->cls2.seqDelivery = 1; 11279 sp->cls3.seqDelivery = 1; 11280 11281 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11282 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11283 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11284 pcmd += sizeof(uint32_t); /* Port Name */ 11285 memcpy(pcmd, &vport->fc_portname, 8); 11286 pcmd += sizeof(uint32_t); /* Node Name */ 11287 pcmd += sizeof(uint32_t); /* Node Name */ 11288 memcpy(pcmd, &vport->fc_nodename, 8); 11289 sp->cmn.valid_vendor_ver_level = 0; 11290 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11291 lpfc_set_disctmo(vport); 11292 11293 phba->fc_stat.elsXmitFDISC++; 11294 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11295 11296 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11297 "Issue FDISC: did:x%x", 11298 did, 0, 0); 11299 11300 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11301 if (!elsiocb->ndlp) 11302 goto err_out; 11303 11304 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11305 if (rc == IOCB_ERROR) { 11306 lpfc_nlp_put(ndlp); 11307 goto err_out; 11308 } 11309 11310 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11311 return 0; 11312 11313 err_out: 11314 lpfc_els_free_iocb(phba, elsiocb); 11315 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11316 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11317 "0256 Issue FDISC: Cannot send IOCB\n"); 11318 return 1; 11319 } 11320 11321 /** 11322 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11323 * @phba: pointer to lpfc hba data structure. 11324 * @cmdiocb: pointer to lpfc command iocb data structure. 11325 * @rspiocb: pointer to lpfc response iocb data structure. 11326 * 11327 * This routine is the completion callback function to the issuing of a LOGO 11328 * ELS command off a vport. It frees the command IOCB and then decrement the 11329 * reference count held on ndlp for this completion function, indicating that 11330 * the reference to the ndlp is no long needed. Note that the 11331 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11332 * callback function and an additional explicit ndlp reference decrementation 11333 * will trigger the actual release of the ndlp. 11334 **/ 11335 static void 11336 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11337 struct lpfc_iocbq *rspiocb) 11338 { 11339 struct lpfc_vport *vport = cmdiocb->vport; 11340 IOCB_t *irsp; 11341 struct lpfc_nodelist *ndlp; 11342 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11343 u32 ulp_status, ulp_word4, did, tmo; 11344 11345 ndlp = cmdiocb->ndlp; 11346 11347 ulp_status = get_job_ulpstatus(phba, rspiocb); 11348 ulp_word4 = get_job_word4(phba, rspiocb); 11349 11350 if (phba->sli_rev == LPFC_SLI_REV4) { 11351 did = get_job_els_rsp64_did(phba, cmdiocb); 11352 tmo = get_wqe_tmo(cmdiocb); 11353 } else { 11354 irsp = &rspiocb->iocb; 11355 did = get_job_els_rsp64_did(phba, rspiocb); 11356 tmo = irsp->ulpTimeout; 11357 } 11358 11359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11360 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11361 ulp_status, ulp_word4, did); 11362 11363 /* NPIV LOGO completes to NPort <nlp_DID> */ 11364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11365 "2928 NPIV LOGO completes to NPort x%x " 11366 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11367 ndlp->nlp_DID, ulp_status, ulp_word4, 11368 tmo, vport->num_disc_nodes, 11369 kref_read(&ndlp->kref), ndlp->nlp_flag, 11370 ndlp->fc4_xpt_flags); 11371 11372 if (ulp_status == IOSTAT_SUCCESS) { 11373 spin_lock_irq(shost->host_lock); 11374 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11375 vport->fc_flag &= ~FC_FABRIC; 11376 spin_unlock_irq(shost->host_lock); 11377 lpfc_can_disctmo(vport); 11378 } 11379 11380 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11381 /* Wake up lpfc_vport_delete if waiting...*/ 11382 if (ndlp->logo_waitq) 11383 wake_up(ndlp->logo_waitq); 11384 spin_lock_irq(&ndlp->lock); 11385 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11386 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11387 spin_unlock_irq(&ndlp->lock); 11388 } 11389 11390 /* Safe to release resources now. */ 11391 lpfc_els_free_iocb(phba, cmdiocb); 11392 lpfc_nlp_put(ndlp); 11393 } 11394 11395 /** 11396 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11397 * @vport: pointer to a virtual N_Port data structure. 11398 * @ndlp: pointer to a node-list data structure. 11399 * 11400 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11401 * 11402 * Note that the ndlp reference count will be incremented by 1 for holding the 11403 * ndlp and the reference to ndlp will be stored into the ndlp field of 11404 * the IOCB for the completion callback function to the LOGO ELS command. 11405 * 11406 * Return codes 11407 * 0 - Successfully issued logo off the @vport 11408 * 1 - Failed to issue logo off the @vport 11409 **/ 11410 int 11411 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11412 { 11413 int rc = 0; 11414 struct lpfc_hba *phba = vport->phba; 11415 struct lpfc_iocbq *elsiocb; 11416 uint8_t *pcmd; 11417 uint16_t cmdsize; 11418 11419 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11420 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11421 ELS_CMD_LOGO); 11422 if (!elsiocb) 11423 return 1; 11424 11425 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11426 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11427 pcmd += sizeof(uint32_t); 11428 11429 /* Fill in LOGO payload */ 11430 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11431 pcmd += sizeof(uint32_t); 11432 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11433 11434 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11435 "Issue LOGO npiv did:x%x flg:x%x", 11436 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11437 11438 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11439 spin_lock_irq(&ndlp->lock); 11440 ndlp->nlp_flag |= NLP_LOGO_SND; 11441 spin_unlock_irq(&ndlp->lock); 11442 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11443 if (!elsiocb->ndlp) { 11444 lpfc_els_free_iocb(phba, elsiocb); 11445 goto err; 11446 } 11447 11448 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11449 if (rc == IOCB_ERROR) { 11450 lpfc_els_free_iocb(phba, elsiocb); 11451 lpfc_nlp_put(ndlp); 11452 goto err; 11453 } 11454 return 0; 11455 11456 err: 11457 spin_lock_irq(&ndlp->lock); 11458 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11459 spin_unlock_irq(&ndlp->lock); 11460 return 1; 11461 } 11462 11463 /** 11464 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11465 * @t: timer context used to obtain the lpfc hba. 11466 * 11467 * This routine is invoked by the fabric iocb block timer after 11468 * timeout. It posts the fabric iocb block timeout event by setting the 11469 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11470 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11471 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11472 * posted event WORKER_FABRIC_BLOCK_TMO. 11473 **/ 11474 void 11475 lpfc_fabric_block_timeout(struct timer_list *t) 11476 { 11477 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11478 unsigned long iflags; 11479 uint32_t tmo_posted; 11480 11481 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11482 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11483 if (!tmo_posted) 11484 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11485 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11486 11487 if (!tmo_posted) 11488 lpfc_worker_wake_up(phba); 11489 return; 11490 } 11491 11492 /** 11493 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11494 * @phba: pointer to lpfc hba data structure. 11495 * 11496 * This routine issues one fabric iocb from the driver internal list to 11497 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11498 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11499 * remove one pending fabric iocb from the driver internal list and invokes 11500 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11501 **/ 11502 static void 11503 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11504 { 11505 struct lpfc_iocbq *iocb; 11506 unsigned long iflags; 11507 int ret; 11508 11509 repeat: 11510 iocb = NULL; 11511 spin_lock_irqsave(&phba->hbalock, iflags); 11512 /* Post any pending iocb to the SLI layer */ 11513 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11514 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11515 list); 11516 if (iocb) 11517 /* Increment fabric iocb count to hold the position */ 11518 atomic_inc(&phba->fabric_iocb_count); 11519 } 11520 spin_unlock_irqrestore(&phba->hbalock, iflags); 11521 if (iocb) { 11522 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11523 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11524 iocb->cmd_flag |= LPFC_IO_FABRIC; 11525 11526 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11527 "Fabric sched1: ste:x%x", 11528 iocb->vport->port_state, 0, 0); 11529 11530 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11531 11532 if (ret == IOCB_ERROR) { 11533 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11534 iocb->fabric_cmd_cmpl = NULL; 11535 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11536 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11537 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11538 iocb->cmd_cmpl(phba, iocb, iocb); 11539 11540 atomic_dec(&phba->fabric_iocb_count); 11541 goto repeat; 11542 } 11543 } 11544 } 11545 11546 /** 11547 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11548 * @phba: pointer to lpfc hba data structure. 11549 * 11550 * This routine unblocks the issuing fabric iocb command. The function 11551 * will clear the fabric iocb block bit and then invoke the routine 11552 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11553 * from the driver internal fabric iocb list. 11554 **/ 11555 void 11556 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11557 { 11558 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11559 11560 lpfc_resume_fabric_iocbs(phba); 11561 return; 11562 } 11563 11564 /** 11565 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11566 * @phba: pointer to lpfc hba data structure. 11567 * 11568 * This routine blocks the issuing fabric iocb for a specified amount of 11569 * time (currently 100 ms). This is done by set the fabric iocb block bit 11570 * and set up a timeout timer for 100ms. When the block bit is set, no more 11571 * fabric iocb will be issued out of the HBA. 11572 **/ 11573 static void 11574 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11575 { 11576 int blocked; 11577 11578 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11579 /* Start a timer to unblock fabric iocbs after 100ms */ 11580 if (!blocked) 11581 mod_timer(&phba->fabric_block_timer, 11582 jiffies + msecs_to_jiffies(100)); 11583 11584 return; 11585 } 11586 11587 /** 11588 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11589 * @phba: pointer to lpfc hba data structure. 11590 * @cmdiocb: pointer to lpfc command iocb data structure. 11591 * @rspiocb: pointer to lpfc response iocb data structure. 11592 * 11593 * This routine is the callback function that is put to the fabric iocb's 11594 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11595 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11596 * function first restores and invokes the original iocb's callback function 11597 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11598 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11599 **/ 11600 static void 11601 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11602 struct lpfc_iocbq *rspiocb) 11603 { 11604 struct ls_rjt stat; 11605 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11606 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11607 11608 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11609 11610 switch (ulp_status) { 11611 case IOSTAT_NPORT_RJT: 11612 case IOSTAT_FABRIC_RJT: 11613 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11614 lpfc_block_fabric_iocbs(phba); 11615 break; 11616 11617 case IOSTAT_NPORT_BSY: 11618 case IOSTAT_FABRIC_BSY: 11619 lpfc_block_fabric_iocbs(phba); 11620 break; 11621 11622 case IOSTAT_LS_RJT: 11623 stat.un.ls_rjt_error_be = 11624 cpu_to_be32(ulp_word4); 11625 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11626 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11627 lpfc_block_fabric_iocbs(phba); 11628 break; 11629 } 11630 11631 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11632 11633 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11634 cmdiocb->fabric_cmd_cmpl = NULL; 11635 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11636 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11637 11638 atomic_dec(&phba->fabric_iocb_count); 11639 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11640 /* Post any pending iocbs to HBA */ 11641 lpfc_resume_fabric_iocbs(phba); 11642 } 11643 } 11644 11645 /** 11646 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11647 * @phba: pointer to lpfc hba data structure. 11648 * @iocb: pointer to lpfc command iocb data structure. 11649 * 11650 * This routine is used as the top-level API for issuing a fabric iocb command 11651 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11652 * function makes sure that only one fabric bound iocb will be outstanding at 11653 * any given time. As such, this function will first check to see whether there 11654 * is already an outstanding fabric iocb on the wire. If so, it will put the 11655 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11656 * issued later. Otherwise, it will issue the iocb on the wire and update the 11657 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11658 * 11659 * Note, this implementation has a potential sending out fabric IOCBs out of 11660 * order. The problem is caused by the construction of the "ready" boolen does 11661 * not include the condition that the internal fabric IOCB list is empty. As 11662 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11663 * ahead of the fabric IOCBs in the internal list. 11664 * 11665 * Return code 11666 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11667 * IOCB_ERROR - failed to issue fabric iocb 11668 **/ 11669 static int 11670 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11671 { 11672 unsigned long iflags; 11673 int ready; 11674 int ret; 11675 11676 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11677 11678 spin_lock_irqsave(&phba->hbalock, iflags); 11679 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11680 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11681 11682 if (ready) 11683 /* Increment fabric iocb count to hold the position */ 11684 atomic_inc(&phba->fabric_iocb_count); 11685 spin_unlock_irqrestore(&phba->hbalock, iflags); 11686 if (ready) { 11687 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11688 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11689 iocb->cmd_flag |= LPFC_IO_FABRIC; 11690 11691 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11692 "Fabric sched2: ste:x%x", 11693 iocb->vport->port_state, 0, 0); 11694 11695 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11696 11697 if (ret == IOCB_ERROR) { 11698 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11699 iocb->fabric_cmd_cmpl = NULL; 11700 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11701 atomic_dec(&phba->fabric_iocb_count); 11702 } 11703 } else { 11704 spin_lock_irqsave(&phba->hbalock, iflags); 11705 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11706 spin_unlock_irqrestore(&phba->hbalock, iflags); 11707 ret = IOCB_SUCCESS; 11708 } 11709 return ret; 11710 } 11711 11712 /** 11713 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11714 * @vport: pointer to a virtual N_Port data structure. 11715 * 11716 * This routine aborts all the IOCBs associated with a @vport from the 11717 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11718 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11719 * list, removes each IOCB associated with the @vport off the list, set the 11720 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11721 * associated with the IOCB. 11722 **/ 11723 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11724 { 11725 LIST_HEAD(completions); 11726 struct lpfc_hba *phba = vport->phba; 11727 struct lpfc_iocbq *tmp_iocb, *piocb; 11728 11729 spin_lock_irq(&phba->hbalock); 11730 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11731 list) { 11732 11733 if (piocb->vport != vport) 11734 continue; 11735 11736 list_move_tail(&piocb->list, &completions); 11737 } 11738 spin_unlock_irq(&phba->hbalock); 11739 11740 /* Cancel all the IOCBs from the completions list */ 11741 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11742 IOERR_SLI_ABORTED); 11743 } 11744 11745 /** 11746 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11747 * @ndlp: pointer to a node-list data structure. 11748 * 11749 * This routine aborts all the IOCBs associated with an @ndlp from the 11750 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11751 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11752 * list, removes each IOCB associated with the @ndlp off the list, set the 11753 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11754 * associated with the IOCB. 11755 **/ 11756 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11757 { 11758 LIST_HEAD(completions); 11759 struct lpfc_hba *phba = ndlp->phba; 11760 struct lpfc_iocbq *tmp_iocb, *piocb; 11761 struct lpfc_sli_ring *pring; 11762 11763 pring = lpfc_phba_elsring(phba); 11764 11765 if (unlikely(!pring)) 11766 return; 11767 11768 spin_lock_irq(&phba->hbalock); 11769 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11770 list) { 11771 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11772 11773 list_move_tail(&piocb->list, &completions); 11774 } 11775 } 11776 spin_unlock_irq(&phba->hbalock); 11777 11778 /* Cancel all the IOCBs from the completions list */ 11779 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11780 IOERR_SLI_ABORTED); 11781 } 11782 11783 /** 11784 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11785 * @phba: pointer to lpfc hba data structure. 11786 * 11787 * This routine aborts all the IOCBs currently on the driver internal 11788 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11789 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11790 * list, removes IOCBs off the list, set the status field to 11791 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11792 * the IOCB. 11793 **/ 11794 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11795 { 11796 LIST_HEAD(completions); 11797 11798 spin_lock_irq(&phba->hbalock); 11799 list_splice_init(&phba->fabric_iocb_list, &completions); 11800 spin_unlock_irq(&phba->hbalock); 11801 11802 /* Cancel all the IOCBs from the completions list */ 11803 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11804 IOERR_SLI_ABORTED); 11805 } 11806 11807 /** 11808 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11809 * @vport: pointer to lpfc vport data structure. 11810 * 11811 * This routine is invoked by the vport cleanup for deletions and the cleanup 11812 * for an ndlp on removal. 11813 **/ 11814 void 11815 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11816 { 11817 struct lpfc_hba *phba = vport->phba; 11818 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11819 struct lpfc_nodelist *ndlp = NULL; 11820 unsigned long iflag = 0; 11821 11822 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11823 list_for_each_entry_safe(sglq_entry, sglq_next, 11824 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11825 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11826 lpfc_nlp_put(sglq_entry->ndlp); 11827 ndlp = sglq_entry->ndlp; 11828 sglq_entry->ndlp = NULL; 11829 11830 /* If the xri on the abts_els_sgl list is for the Fport 11831 * node and the vport is unloading, the xri aborted wcqe 11832 * likely isn't coming back. Just release the sgl. 11833 */ 11834 if ((vport->load_flag & FC_UNLOADING) && 11835 ndlp->nlp_DID == Fabric_DID) { 11836 list_del(&sglq_entry->list); 11837 sglq_entry->state = SGL_FREED; 11838 list_add_tail(&sglq_entry->list, 11839 &phba->sli4_hba.lpfc_els_sgl_list); 11840 } 11841 } 11842 } 11843 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11844 return; 11845 } 11846 11847 /** 11848 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11849 * @phba: pointer to lpfc hba data structure. 11850 * @axri: pointer to the els xri abort wcqe structure. 11851 * 11852 * This routine is invoked by the worker thread to process a SLI4 slow-path 11853 * ELS aborted xri. 11854 **/ 11855 void 11856 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11857 struct sli4_wcqe_xri_aborted *axri) 11858 { 11859 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11860 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11861 uint16_t lxri = 0; 11862 11863 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11864 unsigned long iflag = 0; 11865 struct lpfc_nodelist *ndlp; 11866 struct lpfc_sli_ring *pring; 11867 11868 pring = lpfc_phba_elsring(phba); 11869 11870 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11871 list_for_each_entry_safe(sglq_entry, sglq_next, 11872 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11873 if (sglq_entry->sli4_xritag == xri) { 11874 list_del(&sglq_entry->list); 11875 ndlp = sglq_entry->ndlp; 11876 sglq_entry->ndlp = NULL; 11877 list_add_tail(&sglq_entry->list, 11878 &phba->sli4_hba.lpfc_els_sgl_list); 11879 sglq_entry->state = SGL_FREED; 11880 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11881 iflag); 11882 11883 if (ndlp) { 11884 lpfc_set_rrq_active(phba, ndlp, 11885 sglq_entry->sli4_lxritag, 11886 rxid, 1); 11887 lpfc_nlp_put(ndlp); 11888 } 11889 11890 /* Check if TXQ queue needs to be serviced */ 11891 if (pring && !list_empty(&pring->txq)) 11892 lpfc_worker_wake_up(phba); 11893 return; 11894 } 11895 } 11896 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11897 lxri = lpfc_sli4_xri_inrange(phba, xri); 11898 if (lxri == NO_XRI) 11899 return; 11900 11901 spin_lock_irqsave(&phba->hbalock, iflag); 11902 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11903 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11904 spin_unlock_irqrestore(&phba->hbalock, iflag); 11905 return; 11906 } 11907 sglq_entry->state = SGL_XRI_ABORTED; 11908 spin_unlock_irqrestore(&phba->hbalock, iflag); 11909 return; 11910 } 11911 11912 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11913 * @vport: pointer to virtual port object. 11914 * @ndlp: nodelist pointer for the impacted node. 11915 * 11916 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11917 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11918 * the driver is required to send a LOGO to the remote node before it 11919 * attempts to recover its login to the remote node. 11920 */ 11921 void 11922 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11923 struct lpfc_nodelist *ndlp) 11924 { 11925 struct Scsi_Host *shost; 11926 struct lpfc_hba *phba; 11927 unsigned long flags = 0; 11928 11929 shost = lpfc_shost_from_vport(vport); 11930 phba = vport->phba; 11931 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11932 lpfc_printf_log(phba, KERN_INFO, 11933 LOG_SLI, "3093 No rport recovery needed. " 11934 "rport in state 0x%x\n", ndlp->nlp_state); 11935 return; 11936 } 11937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11938 "3094 Start rport recovery on shost id 0x%x " 11939 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11940 "flags 0x%x\n", 11941 shost->host_no, ndlp->nlp_DID, 11942 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11943 ndlp->nlp_flag); 11944 /* 11945 * The rport is not responding. Remove the FCP-2 flag to prevent 11946 * an ADISC in the follow-up recovery code. 11947 */ 11948 spin_lock_irqsave(&ndlp->lock, flags); 11949 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11950 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11951 spin_unlock_irqrestore(&ndlp->lock, flags); 11952 lpfc_unreg_rpi(vport, ndlp); 11953 } 11954 11955 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11956 { 11957 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11958 } 11959 11960 static void 11961 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11962 { 11963 u32 i; 11964 11965 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11966 return; 11967 11968 for (i = min; i <= max; i++) 11969 set_bit(i, vport->vmid_priority_range); 11970 } 11971 11972 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11973 { 11974 set_bit(ctcl_vmid, vport->vmid_priority_range); 11975 } 11976 11977 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11978 { 11979 u32 i; 11980 11981 i = find_first_bit(vport->vmid_priority_range, 11982 LPFC_VMID_MAX_PRIORITY_RANGE); 11983 11984 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11985 return 0; 11986 11987 clear_bit(i, vport->vmid_priority_range); 11988 return i; 11989 } 11990 11991 #define MAX_PRIORITY_DESC 255 11992 11993 static void 11994 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11995 struct lpfc_iocbq *rspiocb) 11996 { 11997 struct lpfc_vport *vport = cmdiocb->vport; 11998 struct priority_range_desc *desc; 11999 struct lpfc_dmabuf *prsp = NULL; 12000 struct lpfc_vmid_priority_range *vmid_range = NULL; 12001 u32 *data; 12002 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12003 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12004 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12005 u8 *pcmd, max_desc; 12006 u32 len, i; 12007 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12008 12009 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12010 if (!prsp) 12011 goto out; 12012 12013 pcmd = prsp->virt; 12014 data = (u32 *)pcmd; 12015 if (data[0] == ELS_CMD_LS_RJT) { 12016 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12017 "3277 QFPA LS_RJT x%x x%x\n", 12018 data[0], data[1]); 12019 goto out; 12020 } 12021 if (ulp_status) { 12022 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12023 "6529 QFPA failed with status x%x x%x\n", 12024 ulp_status, ulp_word4); 12025 goto out; 12026 } 12027 12028 if (!vport->qfpa_res) { 12029 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12030 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12031 GFP_KERNEL); 12032 if (!vport->qfpa_res) 12033 goto out; 12034 } 12035 12036 len = *((u32 *)(pcmd + 4)); 12037 len = be32_to_cpu(len); 12038 memcpy(vport->qfpa_res, pcmd, len + 8); 12039 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12040 12041 desc = (struct priority_range_desc *)(pcmd + 8); 12042 vmid_range = vport->vmid_priority.vmid_range; 12043 if (!vmid_range) { 12044 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12045 GFP_KERNEL); 12046 if (!vmid_range) { 12047 kfree(vport->qfpa_res); 12048 goto out; 12049 } 12050 vport->vmid_priority.vmid_range = vmid_range; 12051 } 12052 vport->vmid_priority.num_descriptors = len; 12053 12054 for (i = 0; i < len; i++, vmid_range++, desc++) { 12055 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12056 "6539 vmid values low=%d, high=%d, qos=%d, " 12057 "local ve id=%d\n", desc->lo_range, 12058 desc->hi_range, desc->qos_priority, 12059 desc->local_ve_id); 12060 12061 vmid_range->low = desc->lo_range << 1; 12062 if (desc->local_ve_id == QFPA_ODD_ONLY) 12063 vmid_range->low++; 12064 if (desc->qos_priority) 12065 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12066 vmid_range->qos = desc->qos_priority; 12067 12068 vmid_range->high = desc->hi_range << 1; 12069 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12070 (desc->local_ve_id == QFPA_EVEN_ODD)) 12071 vmid_range->high++; 12072 } 12073 lpfc_init_cs_ctl_bitmap(vport); 12074 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12075 lpfc_vmid_set_cs_ctl_range(vport, 12076 vport->vmid_priority.vmid_range[i].low, 12077 vport->vmid_priority.vmid_range[i].high); 12078 } 12079 12080 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12081 out: 12082 lpfc_els_free_iocb(phba, cmdiocb); 12083 lpfc_nlp_put(ndlp); 12084 } 12085 12086 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12087 { 12088 struct lpfc_hba *phba = vport->phba; 12089 struct lpfc_nodelist *ndlp; 12090 struct lpfc_iocbq *elsiocb; 12091 u8 *pcmd; 12092 int ret; 12093 12094 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12095 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12096 return -ENXIO; 12097 12098 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12099 ndlp->nlp_DID, ELS_CMD_QFPA); 12100 if (!elsiocb) 12101 return -ENOMEM; 12102 12103 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12104 12105 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12106 pcmd += 4; 12107 12108 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12109 12110 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12111 if (!elsiocb->ndlp) { 12112 lpfc_els_free_iocb(vport->phba, elsiocb); 12113 return -ENXIO; 12114 } 12115 12116 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12117 if (ret != IOCB_SUCCESS) { 12118 lpfc_els_free_iocb(phba, elsiocb); 12119 lpfc_nlp_put(ndlp); 12120 return -EIO; 12121 } 12122 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12123 return 0; 12124 } 12125 12126 int 12127 lpfc_vmid_uvem(struct lpfc_vport *vport, 12128 struct lpfc_vmid *vmid, bool instantiated) 12129 { 12130 struct lpfc_vem_id_desc *vem_id_desc; 12131 struct lpfc_nodelist *ndlp; 12132 struct lpfc_iocbq *elsiocb; 12133 struct instantiated_ve_desc *inst_desc; 12134 struct lpfc_vmid_context *vmid_context; 12135 u8 *pcmd; 12136 u32 *len; 12137 int ret = 0; 12138 12139 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12140 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12141 return -ENXIO; 12142 12143 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12144 if (!vmid_context) 12145 return -ENOMEM; 12146 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12147 ndlp, Fabric_DID, ELS_CMD_UVEM); 12148 if (!elsiocb) 12149 goto out; 12150 12151 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12152 "3427 Host vmid %s %d\n", 12153 vmid->host_vmid, instantiated); 12154 vmid_context->vmp = vmid; 12155 vmid_context->nlp = ndlp; 12156 vmid_context->instantiated = instantiated; 12157 elsiocb->vmid_tag.vmid_context = vmid_context; 12158 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12159 12160 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12161 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12162 LPFC_COMPRESS_VMID_SIZE); 12163 12164 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12165 len = (u32 *)(pcmd + 4); 12166 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12167 12168 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12169 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12170 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12171 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12172 LPFC_COMPRESS_VMID_SIZE); 12173 12174 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12175 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12176 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12177 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12178 LPFC_COMPRESS_VMID_SIZE); 12179 12180 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12181 bf_set(lpfc_instantiated_local_id, inst_desc, 12182 vmid->un.cs_ctl_vmid); 12183 if (instantiated) { 12184 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12185 } else { 12186 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12187 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12188 } 12189 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12190 12191 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12192 12193 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12194 if (!elsiocb->ndlp) { 12195 lpfc_els_free_iocb(vport->phba, elsiocb); 12196 goto out; 12197 } 12198 12199 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12200 if (ret != IOCB_SUCCESS) { 12201 lpfc_els_free_iocb(vport->phba, elsiocb); 12202 lpfc_nlp_put(ndlp); 12203 goto out; 12204 } 12205 12206 return 0; 12207 out: 12208 kfree(vmid_context); 12209 return -EIO; 12210 } 12211 12212 static void 12213 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12214 struct lpfc_iocbq *rspiocb) 12215 { 12216 struct lpfc_vport *vport = icmdiocb->vport; 12217 struct lpfc_dmabuf *prsp = NULL; 12218 struct lpfc_vmid_context *vmid_context = 12219 icmdiocb->vmid_tag.vmid_context; 12220 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12221 u8 *pcmd; 12222 u32 *data; 12223 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12224 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12225 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12226 struct lpfc_vmid *vmid; 12227 12228 vmid = vmid_context->vmp; 12229 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12230 ndlp = NULL; 12231 12232 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12233 if (!prsp) 12234 goto out; 12235 pcmd = prsp->virt; 12236 data = (u32 *)pcmd; 12237 if (data[0] == ELS_CMD_LS_RJT) { 12238 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12239 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12240 goto out; 12241 } 12242 if (ulp_status) { 12243 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12244 "4533 UVEM error status %x: %x\n", 12245 ulp_status, ulp_word4); 12246 goto out; 12247 } 12248 spin_lock(&phba->hbalock); 12249 /* Set IN USE flag */ 12250 vport->vmid_flag |= LPFC_VMID_IN_USE; 12251 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12252 spin_unlock(&phba->hbalock); 12253 12254 if (vmid_context->instantiated) { 12255 write_lock(&vport->vmid_lock); 12256 vmid->flag |= LPFC_VMID_REGISTERED; 12257 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12258 write_unlock(&vport->vmid_lock); 12259 } 12260 12261 out: 12262 kfree(vmid_context); 12263 lpfc_els_free_iocb(phba, icmdiocb); 12264 lpfc_nlp_put(ndlp); 12265 } 12266