1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 956 /* Check to see if link went down during discovery */ 957 if (lpfc_els_chk_latt(vport)) { 958 /* One additional decrement on node reference count to 959 * trigger the release of the node 960 */ 961 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 962 lpfc_nlp_put(ndlp); 963 goto out; 964 } 965 966 ulp_status = get_job_ulpstatus(phba, rspiocb); 967 ulp_word4 = get_job_word4(phba, rspiocb); 968 969 if (phba->sli_rev == LPFC_SLI_REV4) { 970 tmo = get_wqe_tmo(cmdiocb); 971 } else { 972 irsp = &rspiocb->iocb; 973 tmo = irsp->ulpTimeout; 974 } 975 976 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 977 "FLOGI cmpl: status:x%x/x%x state:x%x", 978 ulp_status, ulp_word4, 979 vport->port_state); 980 981 if (ulp_status) { 982 /* 983 * In case of FIP mode, perform roundrobin FCF failover 984 * due to new FCF discovery 985 */ 986 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 987 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 988 if (phba->link_state < LPFC_LINK_UP) 989 goto stop_rr_fcf_flogi; 990 if ((phba->fcoe_cvl_eventtag_attn == 991 phba->fcoe_cvl_eventtag) && 992 (ulp_status == IOSTAT_LOCAL_REJECT) && 993 ((ulp_word4 & IOERR_PARAM_MASK) == 994 IOERR_SLI_ABORTED)) 995 goto stop_rr_fcf_flogi; 996 else 997 phba->fcoe_cvl_eventtag_attn = 998 phba->fcoe_cvl_eventtag; 999 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1000 "2611 FLOGI failed on FCF (x%x), " 1001 "status:x%x/x%x, tmo:x%x, perform " 1002 "roundrobin FCF failover\n", 1003 phba->fcf.current_rec.fcf_indx, 1004 ulp_status, ulp_word4, tmo); 1005 lpfc_sli4_set_fcf_flogi_fail(phba, 1006 phba->fcf.current_rec.fcf_indx); 1007 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1008 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1009 if (rc) 1010 goto out; 1011 } 1012 1013 stop_rr_fcf_flogi: 1014 /* FLOGI failure */ 1015 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1016 ((ulp_word4 & IOERR_PARAM_MASK) == 1017 IOERR_LOOP_OPEN_FAILURE))) 1018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1019 "2858 FLOGI failure Status:x%x/x%x TMO" 1020 ":x%x Data x%x x%x\n", 1021 ulp_status, ulp_word4, tmo, 1022 phba->hba_flag, phba->fcf.fcf_flag); 1023 1024 /* Check for retry */ 1025 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1026 goto out; 1027 1028 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1029 "0150 FLOGI failure Status:x%x/x%x " 1030 "xri x%x TMO:x%x refcnt %d\n", 1031 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1032 tmo, kref_read(&ndlp->kref)); 1033 1034 /* If this is not a loop open failure, bail out */ 1035 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1036 ((ulp_word4 & IOERR_PARAM_MASK) == 1037 IOERR_LOOP_OPEN_FAILURE))) { 1038 /* FLOGI failure */ 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "0100 FLOGI failure Status:x%x/x%x " 1041 "TMO:x%x\n", 1042 ulp_status, ulp_word4, tmo); 1043 goto flogifail; 1044 } 1045 1046 /* FLOGI failed, so there is no fabric */ 1047 spin_lock_irq(shost->host_lock); 1048 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1049 FC_PT2PT_NO_NVME); 1050 spin_unlock_irq(shost->host_lock); 1051 1052 /* If private loop, then allow max outstanding els to be 1053 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1054 * alpa map would take too long otherwise. 1055 */ 1056 if (phba->alpa_map[0] == 0) 1057 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1058 if ((phba->sli_rev == LPFC_SLI_REV4) && 1059 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1060 (vport->fc_prevDID != vport->fc_myDID) || 1061 phba->fc_topology_changed)) { 1062 if (vport->fc_flag & FC_VFI_REGISTERED) { 1063 if (phba->fc_topology_changed) { 1064 lpfc_unregister_fcf_prep(phba); 1065 spin_lock_irq(shost->host_lock); 1066 vport->fc_flag &= ~FC_VFI_REGISTERED; 1067 spin_unlock_irq(shost->host_lock); 1068 phba->fc_topology_changed = 0; 1069 } else { 1070 lpfc_sli4_unreg_all_rpis(vport); 1071 } 1072 } 1073 1074 /* Do not register VFI if the driver aborted FLOGI */ 1075 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 1076 lpfc_issue_reg_vfi(vport); 1077 1078 lpfc_nlp_put(ndlp); 1079 goto out; 1080 } 1081 goto flogifail; 1082 } 1083 spin_lock_irq(shost->host_lock); 1084 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1085 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1086 spin_unlock_irq(shost->host_lock); 1087 1088 /* 1089 * The FLogI succeeded. Sync the data for the CPU before 1090 * accessing it. 1091 */ 1092 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1093 if (!prsp) 1094 goto out; 1095 sp = prsp->virt + sizeof(uint32_t); 1096 1097 /* FLOGI completes successfully */ 1098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1099 "0101 FLOGI completes successfully, I/O tag:x%x " 1100 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1101 cmdiocb->iotag, cmdiocb->sli4_xritag, 1102 ulp_word4, sp->cmn.e_d_tov, 1103 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1104 vport->port_state, vport->fc_flag, 1105 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1106 1107 if (sp->cmn.priority_tagging) 1108 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1109 LPFC_VMID_TYPE_PRIO); 1110 1111 if (vport->port_state == LPFC_FLOGI) { 1112 /* 1113 * If Common Service Parameters indicate Nport 1114 * we are point to point, if Fport we are Fabric. 1115 */ 1116 if (sp->cmn.fPort) 1117 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1118 ulp_word4); 1119 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1120 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1121 else { 1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1123 "2831 FLOGI response with cleared Fabric " 1124 "bit fcf_index 0x%x " 1125 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1126 "Fabric Name " 1127 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1128 phba->fcf.current_rec.fcf_indx, 1129 phba->fcf.current_rec.switch_name[0], 1130 phba->fcf.current_rec.switch_name[1], 1131 phba->fcf.current_rec.switch_name[2], 1132 phba->fcf.current_rec.switch_name[3], 1133 phba->fcf.current_rec.switch_name[4], 1134 phba->fcf.current_rec.switch_name[5], 1135 phba->fcf.current_rec.switch_name[6], 1136 phba->fcf.current_rec.switch_name[7], 1137 phba->fcf.current_rec.fabric_name[0], 1138 phba->fcf.current_rec.fabric_name[1], 1139 phba->fcf.current_rec.fabric_name[2], 1140 phba->fcf.current_rec.fabric_name[3], 1141 phba->fcf.current_rec.fabric_name[4], 1142 phba->fcf.current_rec.fabric_name[5], 1143 phba->fcf.current_rec.fabric_name[6], 1144 phba->fcf.current_rec.fabric_name[7]); 1145 1146 lpfc_nlp_put(ndlp); 1147 spin_lock_irq(&phba->hbalock); 1148 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1149 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1150 spin_unlock_irq(&phba->hbalock); 1151 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1152 goto out; 1153 } 1154 if (!rc) { 1155 /* Mark the FCF discovery process done */ 1156 if (phba->hba_flag & HBA_FIP_SUPPORT) 1157 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1158 LOG_ELS, 1159 "2769 FLOGI to FCF (x%x) " 1160 "completed successfully\n", 1161 phba->fcf.current_rec.fcf_indx); 1162 spin_lock_irq(&phba->hbalock); 1163 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1164 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1165 spin_unlock_irq(&phba->hbalock); 1166 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1167 goto out; 1168 } 1169 } else if (vport->port_state > LPFC_FLOGI && 1170 vport->fc_flag & FC_PT2PT) { 1171 /* 1172 * In a p2p topology, it is possible that discovery has 1173 * already progressed, and this completion can be ignored. 1174 * Recheck the indicated topology. 1175 */ 1176 if (!sp->cmn.fPort) 1177 goto out; 1178 } 1179 1180 flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) { 1186 /* FLOGI failed, so just use loop map to make discovery list */ 1187 lpfc_disc_list_loopmap(vport); 1188 1189 /* Start discovery */ 1190 lpfc_disc_start(vport); 1191 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1192 (((ulp_word4 & IOERR_PARAM_MASK) != 1193 IOERR_SLI_ABORTED) && 1194 ((ulp_word4 & IOERR_PARAM_MASK) != 1195 IOERR_SLI_DOWN))) && 1196 (phba->link_state != LPFC_CLEAR_LA)) { 1197 /* If FLOGI failed enable link interrupt. */ 1198 lpfc_issue_clear_la(phba, vport); 1199 } 1200 out: 1201 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1202 lpfc_els_free_iocb(phba, cmdiocb); 1203 lpfc_nlp_put(ndlp); 1204 } 1205 1206 /** 1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1208 * aborted during a link down 1209 * @phba: pointer to lpfc hba data structure. 1210 * @cmdiocb: pointer to lpfc command iocb data structure. 1211 * @rspiocb: pointer to lpfc response iocb data structure. 1212 * 1213 */ 1214 static void 1215 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1216 struct lpfc_iocbq *rspiocb) 1217 { 1218 uint32_t *pcmd; 1219 uint32_t cmd; 1220 u32 ulp_status, ulp_word4; 1221 1222 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1223 cmd = *pcmd; 1224 1225 ulp_status = get_job_ulpstatus(phba, rspiocb); 1226 ulp_word4 = get_job_word4(phba, rspiocb); 1227 1228 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1229 "6445 ELS completes after LINK_DOWN: " 1230 " Status %x/%x cmd x%x flg x%x\n", 1231 ulp_status, ulp_word4, cmd, 1232 cmdiocb->cmd_flag); 1233 1234 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1235 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1236 atomic_dec(&phba->fabric_iocb_count); 1237 } 1238 lpfc_els_free_iocb(phba, cmdiocb); 1239 } 1240 1241 /** 1242 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1243 * @vport: pointer to a host virtual N_Port data structure. 1244 * @ndlp: pointer to a node-list data structure. 1245 * @retry: number of retries to the command IOCB. 1246 * 1247 * This routine issues a Fabric Login (FLOGI) Request ELS command 1248 * for a @vport. The initiator service parameters are put into the payload 1249 * of the FLOGI Request IOCB and the top-level callback function pointer 1250 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1251 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1252 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1253 * 1254 * Note that the ndlp reference count will be incremented by 1 for holding the 1255 * ndlp and the reference to ndlp will be stored into the ndlp field of 1256 * the IOCB for the completion callback function to the FLOGI ELS command. 1257 * 1258 * Return code 1259 * 0 - successfully issued flogi iocb for @vport 1260 * 1 - failed to issue flogi iocb for @vport 1261 **/ 1262 static int 1263 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1264 uint8_t retry) 1265 { 1266 struct lpfc_hba *phba = vport->phba; 1267 struct serv_parm *sp; 1268 union lpfc_wqe128 *wqe = NULL; 1269 IOCB_t *icmd = NULL; 1270 struct lpfc_iocbq *elsiocb; 1271 struct lpfc_iocbq defer_flogi_acc; 1272 u8 *pcmd, ct; 1273 uint16_t cmdsize; 1274 uint32_t tmo, did; 1275 int rc; 1276 1277 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1278 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1279 ndlp->nlp_DID, ELS_CMD_FLOGI); 1280 1281 if (!elsiocb) 1282 return 1; 1283 1284 wqe = &elsiocb->wqe; 1285 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1286 icmd = &elsiocb->iocb; 1287 1288 /* For FLOGI request, remainder of payload is service parameters */ 1289 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1290 pcmd += sizeof(uint32_t); 1291 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1292 sp = (struct serv_parm *) pcmd; 1293 1294 /* Setup CSPs accordingly for Fabric */ 1295 sp->cmn.e_d_tov = 0; 1296 sp->cmn.w2.r_a_tov = 0; 1297 sp->cmn.virtual_fabric_support = 0; 1298 sp->cls1.classValid = 0; 1299 if (sp->cmn.fcphLow < FC_PH3) 1300 sp->cmn.fcphLow = FC_PH3; 1301 if (sp->cmn.fcphHigh < FC_PH3) 1302 sp->cmn.fcphHigh = FC_PH3; 1303 1304 /* Determine if switch supports priority tagging */ 1305 if (phba->cfg_vmid_priority_tagging) { 1306 sp->cmn.priority_tagging = 1; 1307 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1308 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1309 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1310 sizeof(phba->wwpn)); 1311 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1312 sizeof(phba->wwnn)); 1313 } 1314 } 1315 1316 if (phba->sli_rev == LPFC_SLI_REV4) { 1317 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1318 LPFC_SLI_INTF_IF_TYPE_0) { 1319 /* FLOGI needs to be 3 for WQE FCFI */ 1320 ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1); 1321 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1322 1323 /* Set the fcfi to the fcfi we registered with */ 1324 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1325 phba->fcf.fcfi); 1326 } 1327 1328 /* Can't do SLI4 class2 without support sequence coalescing */ 1329 sp->cls2.classValid = 0; 1330 sp->cls2.seqDelivery = 0; 1331 } else { 1332 /* Historical, setting sequential-delivery bit for SLI3 */ 1333 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1334 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1335 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1336 sp->cmn.request_multiple_Nport = 1; 1337 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1338 icmd->ulpCt_h = 1; 1339 icmd->ulpCt_l = 0; 1340 } else { 1341 sp->cmn.request_multiple_Nport = 0; 1342 } 1343 1344 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1345 icmd->un.elsreq64.myID = 0; 1346 icmd->un.elsreq64.fl = 1; 1347 } 1348 } 1349 1350 tmo = phba->fc_ratov; 1351 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1352 lpfc_set_disctmo(vport); 1353 phba->fc_ratov = tmo; 1354 1355 phba->fc_stat.elsXmitFLOGI++; 1356 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1357 1358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1359 "Issue FLOGI: opt:x%x", 1360 phba->sli3_options, 0, 0); 1361 1362 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1363 if (!elsiocb->ndlp) { 1364 lpfc_els_free_iocb(phba, elsiocb); 1365 return 1; 1366 } 1367 1368 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1369 if (rc == IOCB_ERROR) { 1370 lpfc_els_free_iocb(phba, elsiocb); 1371 lpfc_nlp_put(ndlp); 1372 return 1; 1373 } 1374 1375 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1376 1377 /* Clear external loopback plug detected flag */ 1378 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1379 1380 /* Check for a deferred FLOGI ACC condition */ 1381 if (phba->defer_flogi_acc_flag) { 1382 /* lookup ndlp for received FLOGI */ 1383 ndlp = lpfc_findnode_did(vport, 0); 1384 if (!ndlp) 1385 return 0; 1386 1387 did = vport->fc_myDID; 1388 vport->fc_myDID = Fabric_DID; 1389 1390 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1391 1392 if (phba->sli_rev == LPFC_SLI_REV4) { 1393 bf_set(wqe_ctxt_tag, 1394 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1395 phba->defer_flogi_acc_rx_id); 1396 bf_set(wqe_rcvoxid, 1397 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1398 phba->defer_flogi_acc_ox_id); 1399 } else { 1400 icmd = &defer_flogi_acc.iocb; 1401 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1402 icmd->unsli3.rcvsli3.ox_id = 1403 phba->defer_flogi_acc_ox_id; 1404 } 1405 1406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1407 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1408 " ox_id: x%x, hba_flag x%x\n", 1409 phba->defer_flogi_acc_rx_id, 1410 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1411 1412 /* Send deferred FLOGI ACC */ 1413 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1414 ndlp, NULL); 1415 1416 phba->defer_flogi_acc_flag = false; 1417 vport->fc_myDID = did; 1418 1419 /* Decrement ndlp reference count to indicate the node can be 1420 * released when other references are removed. 1421 */ 1422 lpfc_nlp_put(ndlp); 1423 } 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1430 * @phba: pointer to lpfc hba data structure. 1431 * 1432 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1433 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1434 * list and issues an abort IOCB commond on each outstanding IOCB that 1435 * contains a active Fabric_DID ndlp. Note that this function is to issue 1436 * the abort IOCB command on all the outstanding IOCBs, thus when this 1437 * function returns, it does not guarantee all the IOCBs are actually aborted. 1438 * 1439 * Return code 1440 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1441 **/ 1442 int 1443 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1444 { 1445 struct lpfc_sli_ring *pring; 1446 struct lpfc_iocbq *iocb, *next_iocb; 1447 struct lpfc_nodelist *ndlp; 1448 u32 ulp_command; 1449 1450 /* Abort outstanding I/O on NPort <nlp_DID> */ 1451 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1452 "0201 Abort outstanding I/O on NPort x%x\n", 1453 Fabric_DID); 1454 1455 pring = lpfc_phba_elsring(phba); 1456 if (unlikely(!pring)) 1457 return -EIO; 1458 1459 /* 1460 * Check the txcmplq for an iocb that matches the nport the driver is 1461 * searching for. 1462 */ 1463 spin_lock_irq(&phba->hbalock); 1464 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1465 ulp_command = get_job_cmnd(phba, iocb); 1466 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1467 ndlp = iocb->ndlp; 1468 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1469 if ((phba->pport->fc_flag & FC_PT2PT) && 1470 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1471 iocb->fabric_cmd_cmpl = 1472 lpfc_ignore_els_cmpl; 1473 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1474 NULL); 1475 } 1476 } 1477 } 1478 /* Make sure HBA is alive */ 1479 lpfc_issue_hb_tmo(phba); 1480 1481 spin_unlock_irq(&phba->hbalock); 1482 1483 return 0; 1484 } 1485 1486 /** 1487 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1488 * @vport: pointer to a host virtual N_Port data structure. 1489 * 1490 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1494 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1495 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1496 * @vport. 1497 * 1498 * Return code 1499 * 0 - failed to issue initial flogi for @vport 1500 * 1 - successfully issued initial flogi for @vport 1501 **/ 1502 int 1503 lpfc_initial_flogi(struct lpfc_vport *vport) 1504 { 1505 struct lpfc_nodelist *ndlp; 1506 1507 vport->port_state = LPFC_FLOGI; 1508 lpfc_set_disctmo(vport); 1509 1510 /* First look for the Fabric ndlp */ 1511 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1512 if (!ndlp) { 1513 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1514 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1515 if (!ndlp) 1516 return 0; 1517 /* Set the node type */ 1518 ndlp->nlp_type |= NLP_FABRIC; 1519 1520 /* Put ndlp onto node list */ 1521 lpfc_enqueue_node(vport, ndlp); 1522 } 1523 1524 /* Reset the Fabric flag, topology change may have happened */ 1525 vport->fc_flag &= ~FC_FABRIC; 1526 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1527 /* A node reference should be retained while registered with a 1528 * transport or dev-loss-evt work is pending. 1529 * Otherwise, decrement node reference to trigger release. 1530 */ 1531 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1532 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1533 lpfc_nlp_put(ndlp); 1534 return 0; 1535 } 1536 return 1; 1537 } 1538 1539 /** 1540 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1541 * @vport: pointer to a host virtual N_Port data structure. 1542 * 1543 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1544 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1545 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1546 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1547 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1548 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1549 * @vport. 1550 * 1551 * Return code 1552 * 0 - failed to issue initial fdisc for @vport 1553 * 1 - successfully issued initial fdisc for @vport 1554 **/ 1555 int 1556 lpfc_initial_fdisc(struct lpfc_vport *vport) 1557 { 1558 struct lpfc_nodelist *ndlp; 1559 1560 /* First look for the Fabric ndlp */ 1561 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1562 if (!ndlp) { 1563 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1564 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1565 if (!ndlp) 1566 return 0; 1567 1568 /* NPIV is only supported in Fabrics. */ 1569 ndlp->nlp_type |= NLP_FABRIC; 1570 1571 /* Put ndlp onto node list */ 1572 lpfc_enqueue_node(vport, ndlp); 1573 } 1574 1575 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1576 /* A node reference should be retained while registered with a 1577 * transport or dev-loss-evt work is pending. 1578 * Otherwise, decrement node reference to trigger release. 1579 */ 1580 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1581 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1582 lpfc_nlp_put(ndlp); 1583 return 0; 1584 } 1585 return 1; 1586 } 1587 1588 /** 1589 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1590 * @vport: pointer to a host virtual N_Port data structure. 1591 * 1592 * This routine checks whether there are more remaining Port Logins 1593 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1594 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1595 * to issue ELS PLOGIs up to the configured discover threads with the 1596 * @vport (@vport->cfg_discovery_threads). The function also decrement 1597 * the @vport's num_disc_node by 1 if it is not already 0. 1598 **/ 1599 void 1600 lpfc_more_plogi(struct lpfc_vport *vport) 1601 { 1602 if (vport->num_disc_nodes) 1603 vport->num_disc_nodes--; 1604 1605 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1606 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1607 "0232 Continue discovery with %d PLOGIs to go " 1608 "Data: x%x x%x x%x\n", 1609 vport->num_disc_nodes, vport->fc_plogi_cnt, 1610 vport->fc_flag, vport->port_state); 1611 /* Check to see if there are more PLOGIs to be sent */ 1612 if (vport->fc_flag & FC_NLP_MORE) 1613 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1614 lpfc_els_disc_plogi(vport); 1615 1616 return; 1617 } 1618 1619 /** 1620 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1621 * @phba: pointer to lpfc hba data structure. 1622 * @prsp: pointer to response IOCB payload. 1623 * @ndlp: pointer to a node-list data structure. 1624 * 1625 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1626 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1627 * The following cases are considered N_Port confirmed: 1628 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1629 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1630 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1631 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1632 * 1) if there is a node on vport list other than the @ndlp with the same 1633 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1634 * on that node to release the RPI associated with the node; 2) if there is 1635 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1636 * into, a new node shall be allocated (or activated). In either case, the 1637 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1638 * be released and the new_ndlp shall be put on to the vport node list and 1639 * its pointer returned as the confirmed node. 1640 * 1641 * Note that before the @ndlp got "released", the keepDID from not-matching 1642 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1643 * of the @ndlp. This is because the release of @ndlp is actually to put it 1644 * into an inactive state on the vport node list and the vport node list 1645 * management algorithm does not allow two node with a same DID. 1646 * 1647 * Return code 1648 * pointer to the PLOGI N_Port @ndlp 1649 **/ 1650 static struct lpfc_nodelist * 1651 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1652 struct lpfc_nodelist *ndlp) 1653 { 1654 struct lpfc_vport *vport = ndlp->vport; 1655 struct lpfc_nodelist *new_ndlp; 1656 struct serv_parm *sp; 1657 uint8_t name[sizeof(struct lpfc_name)]; 1658 uint32_t keepDID = 0, keep_nlp_flag = 0; 1659 uint32_t keep_new_nlp_flag = 0; 1660 uint16_t keep_nlp_state; 1661 u32 keep_nlp_fc4_type = 0; 1662 struct lpfc_nvme_rport *keep_nrport = NULL; 1663 unsigned long *active_rrqs_xri_bitmap = NULL; 1664 1665 /* Fabric nodes can have the same WWPN so we don't bother searching 1666 * by WWPN. Just return the ndlp that was given to us. 1667 */ 1668 if (ndlp->nlp_type & NLP_FABRIC) 1669 return ndlp; 1670 1671 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1672 memset(name, 0, sizeof(struct lpfc_name)); 1673 1674 /* Now we find out if the NPort we are logging into, matches the WWPN 1675 * we have for that ndlp. If not, we have some work to do. 1676 */ 1677 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1678 1679 /* return immediately if the WWPN matches ndlp */ 1680 if (!new_ndlp || (new_ndlp == ndlp)) 1681 return ndlp; 1682 1683 /* 1684 * Unregister from backend if not done yet. Could have been skipped 1685 * due to ADISC 1686 */ 1687 lpfc_nlp_unreg_node(vport, new_ndlp); 1688 1689 if (phba->sli_rev == LPFC_SLI_REV4) { 1690 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1691 GFP_KERNEL); 1692 if (active_rrqs_xri_bitmap) 1693 memset(active_rrqs_xri_bitmap, 0, 1694 phba->cfg_rrq_xri_bitmap_sz); 1695 } 1696 1697 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1698 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1699 "new_ndlp x%x x%x x%x\n", 1700 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1701 (new_ndlp ? new_ndlp->nlp_DID : 0), 1702 (new_ndlp ? new_ndlp->nlp_flag : 0), 1703 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1704 1705 keepDID = new_ndlp->nlp_DID; 1706 1707 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1708 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1709 phba->cfg_rrq_xri_bitmap_sz); 1710 1711 /* At this point in this routine, we know new_ndlp will be 1712 * returned. however, any previous GID_FTs that were done 1713 * would have updated nlp_fc4_type in ndlp, so we must ensure 1714 * new_ndlp has the right value. 1715 */ 1716 if (vport->fc_flag & FC_FABRIC) { 1717 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1718 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1719 } 1720 1721 lpfc_unreg_rpi(vport, new_ndlp); 1722 new_ndlp->nlp_DID = ndlp->nlp_DID; 1723 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1724 if (phba->sli_rev == LPFC_SLI_REV4) 1725 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1726 ndlp->active_rrqs_xri_bitmap, 1727 phba->cfg_rrq_xri_bitmap_sz); 1728 1729 /* Lock both ndlps */ 1730 spin_lock_irq(&ndlp->lock); 1731 spin_lock_irq(&new_ndlp->lock); 1732 keep_new_nlp_flag = new_ndlp->nlp_flag; 1733 keep_nlp_flag = ndlp->nlp_flag; 1734 new_ndlp->nlp_flag = ndlp->nlp_flag; 1735 1736 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1737 if (keep_new_nlp_flag & NLP_UNREG_INP) 1738 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1739 else 1740 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1741 1742 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1743 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1744 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1745 else 1746 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1747 1748 /* 1749 * Retain the DROPPED flag. This will take care of the init 1750 * refcount when affecting the state change 1751 */ 1752 if (keep_new_nlp_flag & NLP_DROPPED) 1753 new_ndlp->nlp_flag |= NLP_DROPPED; 1754 else 1755 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1756 1757 ndlp->nlp_flag = keep_new_nlp_flag; 1758 1759 /* if ndlp had NLP_UNREG_INP set, keep it */ 1760 if (keep_nlp_flag & NLP_UNREG_INP) 1761 ndlp->nlp_flag |= NLP_UNREG_INP; 1762 else 1763 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1764 1765 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1766 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1767 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1768 else 1769 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1770 1771 /* 1772 * Retain the DROPPED flag. This will take care of the init 1773 * refcount when affecting the state change 1774 */ 1775 if (keep_nlp_flag & NLP_DROPPED) 1776 ndlp->nlp_flag |= NLP_DROPPED; 1777 else 1778 ndlp->nlp_flag &= ~NLP_DROPPED; 1779 1780 spin_unlock_irq(&new_ndlp->lock); 1781 spin_unlock_irq(&ndlp->lock); 1782 1783 /* Set nlp_states accordingly */ 1784 keep_nlp_state = new_ndlp->nlp_state; 1785 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1786 1787 /* interchange the nvme remoteport structs */ 1788 keep_nrport = new_ndlp->nrport; 1789 new_ndlp->nrport = ndlp->nrport; 1790 1791 /* Move this back to NPR state */ 1792 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1793 /* The new_ndlp is replacing ndlp totally, so we need 1794 * to put ndlp on UNUSED list and try to free it. 1795 */ 1796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1797 "3179 PLOGI confirm NEW: %x %x\n", 1798 new_ndlp->nlp_DID, keepDID); 1799 1800 /* Two ndlps cannot have the same did on the nodelist. 1801 * Note: for this case, ndlp has a NULL WWPN so setting 1802 * the nlp_fc4_type isn't required. 1803 */ 1804 ndlp->nlp_DID = keepDID; 1805 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1806 if (phba->sli_rev == LPFC_SLI_REV4 && 1807 active_rrqs_xri_bitmap) 1808 memcpy(ndlp->active_rrqs_xri_bitmap, 1809 active_rrqs_xri_bitmap, 1810 phba->cfg_rrq_xri_bitmap_sz); 1811 1812 } else { 1813 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1814 "3180 PLOGI confirm SWAP: %x %x\n", 1815 new_ndlp->nlp_DID, keepDID); 1816 1817 lpfc_unreg_rpi(vport, ndlp); 1818 1819 /* Two ndlps cannot have the same did and the fc4 1820 * type must be transferred because the ndlp is in 1821 * flight. 1822 */ 1823 ndlp->nlp_DID = keepDID; 1824 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1825 1826 if (phba->sli_rev == LPFC_SLI_REV4 && 1827 active_rrqs_xri_bitmap) 1828 memcpy(ndlp->active_rrqs_xri_bitmap, 1829 active_rrqs_xri_bitmap, 1830 phba->cfg_rrq_xri_bitmap_sz); 1831 1832 /* Since we are switching over to the new_ndlp, 1833 * reset the old ndlp state 1834 */ 1835 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1836 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1837 keep_nlp_state = NLP_STE_NPR_NODE; 1838 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1839 ndlp->nrport = keep_nrport; 1840 } 1841 1842 /* 1843 * If ndlp is not associated with any rport we can drop it here else 1844 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1845 */ 1846 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1847 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1848 1849 if (phba->sli_rev == LPFC_SLI_REV4 && 1850 active_rrqs_xri_bitmap) 1851 mempool_free(active_rrqs_xri_bitmap, 1852 phba->active_rrq_pool); 1853 1854 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1855 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1856 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1857 new_ndlp->nlp_fc4_type); 1858 1859 return new_ndlp; 1860 } 1861 1862 /** 1863 * lpfc_end_rscn - Check and handle more rscn for a vport 1864 * @vport: pointer to a host virtual N_Port data structure. 1865 * 1866 * This routine checks whether more Registration State Change 1867 * Notifications (RSCNs) came in while the discovery state machine was in 1868 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1869 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1870 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1871 * handling the RSCNs. 1872 **/ 1873 void 1874 lpfc_end_rscn(struct lpfc_vport *vport) 1875 { 1876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1877 1878 if (vport->fc_flag & FC_RSCN_MODE) { 1879 /* 1880 * Check to see if more RSCNs came in while we were 1881 * processing this one. 1882 */ 1883 if (vport->fc_rscn_id_cnt || 1884 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1885 lpfc_els_handle_rscn(vport); 1886 else { 1887 spin_lock_irq(shost->host_lock); 1888 vport->fc_flag &= ~FC_RSCN_MODE; 1889 vport->fc_flag |= FC_RSCN_MEMENTO; 1890 spin_unlock_irq(shost->host_lock); 1891 } 1892 } 1893 } 1894 1895 /** 1896 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1897 * @phba: pointer to lpfc hba data structure. 1898 * @cmdiocb: pointer to lpfc command iocb data structure. 1899 * @rspiocb: pointer to lpfc response iocb data structure. 1900 * 1901 * This routine will call the clear rrq function to free the rrq and 1902 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1903 * exist then the clear_rrq is still called because the rrq needs to 1904 * be freed. 1905 **/ 1906 1907 static void 1908 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1909 struct lpfc_iocbq *rspiocb) 1910 { 1911 struct lpfc_vport *vport = cmdiocb->vport; 1912 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1913 struct lpfc_node_rrq *rrq; 1914 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1915 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1916 1917 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1918 rrq = cmdiocb->context_un.rrq; 1919 cmdiocb->rsp_iocb = rspiocb; 1920 1921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1922 "RRQ cmpl: status:x%x/x%x did:x%x", 1923 ulp_status, ulp_word4, 1924 get_job_els_rsp64_did(phba, cmdiocb)); 1925 1926 1927 /* rrq completes to NPort <nlp_DID> */ 1928 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1929 "2880 RRQ completes to DID x%x " 1930 "Data: x%x x%x x%x x%x x%x\n", 1931 ndlp->nlp_DID, ulp_status, ulp_word4, 1932 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1933 1934 if (ulp_status) { 1935 /* Check for retry */ 1936 /* RRQ failed Don't print the vport to vport rjts */ 1937 if (ulp_status != IOSTAT_LS_RJT || 1938 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1939 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1940 (phba)->pport->cfg_log_verbose & LOG_ELS) 1941 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1942 "2881 RRQ failure DID:%06X Status:" 1943 "x%x/x%x\n", 1944 ndlp->nlp_DID, ulp_status, 1945 ulp_word4); 1946 } 1947 1948 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1949 lpfc_els_free_iocb(phba, cmdiocb); 1950 lpfc_nlp_put(ndlp); 1951 return; 1952 } 1953 /** 1954 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1955 * @phba: pointer to lpfc hba data structure. 1956 * @cmdiocb: pointer to lpfc command iocb data structure. 1957 * @rspiocb: pointer to lpfc response iocb data structure. 1958 * 1959 * This routine is the completion callback function for issuing the Port 1960 * Login (PLOGI) command. For PLOGI completion, there must be an active 1961 * ndlp on the vport node list that matches the remote node ID from the 1962 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1963 * ignored and command IOCB released. The PLOGI response IOCB status is 1964 * checked for error conditions. If there is error status reported, PLOGI 1965 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1966 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1967 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1968 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1969 * there are additional N_Port nodes with the vport that need to perform 1970 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1971 * PLOGIs. 1972 **/ 1973 static void 1974 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1975 struct lpfc_iocbq *rspiocb) 1976 { 1977 struct lpfc_vport *vport = cmdiocb->vport; 1978 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1979 IOCB_t *irsp; 1980 struct lpfc_nodelist *ndlp, *free_ndlp; 1981 struct lpfc_dmabuf *prsp; 1982 int disc; 1983 struct serv_parm *sp = NULL; 1984 u32 ulp_status, ulp_word4, did, iotag; 1985 bool release_node = false; 1986 1987 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1988 cmdiocb->rsp_iocb = rspiocb; 1989 1990 ulp_status = get_job_ulpstatus(phba, rspiocb); 1991 ulp_word4 = get_job_word4(phba, rspiocb); 1992 did = get_job_els_rsp64_did(phba, cmdiocb); 1993 1994 if (phba->sli_rev == LPFC_SLI_REV4) { 1995 iotag = get_wqe_reqtag(cmdiocb); 1996 } else { 1997 irsp = &rspiocb->iocb; 1998 iotag = irsp->ulpIoTag; 1999 } 2000 2001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2002 "PLOGI cmpl: status:x%x/x%x did:x%x", 2003 ulp_status, ulp_word4, did); 2004 2005 ndlp = lpfc_findnode_did(vport, did); 2006 if (!ndlp) { 2007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2008 "0136 PLOGI completes to NPort x%x " 2009 "with no ndlp. Data: x%x x%x x%x\n", 2010 did, ulp_status, ulp_word4, iotag); 2011 goto out_freeiocb; 2012 } 2013 2014 /* Since ndlp can be freed in the disc state machine, note if this node 2015 * is being used during discovery. 2016 */ 2017 spin_lock_irq(&ndlp->lock); 2018 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2019 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2020 spin_unlock_irq(&ndlp->lock); 2021 2022 /* PLOGI completes to NPort <nlp_DID> */ 2023 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2024 "0102 PLOGI completes to NPort x%06x " 2025 "Data: x%x x%x x%x x%x x%x\n", 2026 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2027 ulp_status, ulp_word4, 2028 disc, vport->num_disc_nodes); 2029 2030 /* Check to see if link went down during discovery */ 2031 if (lpfc_els_chk_latt(vport)) { 2032 spin_lock_irq(&ndlp->lock); 2033 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2034 spin_unlock_irq(&ndlp->lock); 2035 goto out; 2036 } 2037 2038 if (ulp_status) { 2039 /* Check for retry */ 2040 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2041 /* ELS command is being retried */ 2042 if (disc) { 2043 spin_lock_irq(&ndlp->lock); 2044 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2045 spin_unlock_irq(&ndlp->lock); 2046 } 2047 goto out; 2048 } 2049 /* PLOGI failed Don't print the vport to vport rjts */ 2050 if (ulp_status != IOSTAT_LS_RJT || 2051 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2052 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2053 (phba)->pport->cfg_log_verbose & LOG_ELS) 2054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2055 "2753 PLOGI failure DID:%06X " 2056 "Status:x%x/x%x\n", 2057 ndlp->nlp_DID, ulp_status, 2058 ulp_word4); 2059 2060 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2061 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2062 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2063 NLP_EVT_CMPL_PLOGI); 2064 2065 /* If a PLOGI collision occurred, the node needs to continue 2066 * with the reglogin process. 2067 */ 2068 spin_lock_irq(&ndlp->lock); 2069 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2070 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2071 spin_unlock_irq(&ndlp->lock); 2072 goto out; 2073 } 2074 2075 /* No PLOGI collision and the node is not registered with the 2076 * scsi or nvme transport. It is no longer an active node. Just 2077 * start the device remove process. 2078 */ 2079 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2080 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2081 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2082 release_node = true; 2083 } 2084 spin_unlock_irq(&ndlp->lock); 2085 2086 if (release_node) 2087 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2088 NLP_EVT_DEVICE_RM); 2089 } else { 2090 /* Good status, call state machine */ 2091 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2092 struct lpfc_dmabuf, list); 2093 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2094 2095 sp = (struct serv_parm *)((u8 *)prsp->virt + 2096 sizeof(u32)); 2097 2098 ndlp->vmid_support = 0; 2099 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2100 (phba->cfg_vmid_priority_tagging && 2101 sp->cmn.priority_tagging)) { 2102 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2103 "4018 app_hdr_support %d tagging %d DID x%x\n", 2104 sp->cmn.app_hdr_support, 2105 sp->cmn.priority_tagging, 2106 ndlp->nlp_DID); 2107 /* if the dest port supports VMID, mark it in ndlp */ 2108 ndlp->vmid_support = 1; 2109 } 2110 2111 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2112 NLP_EVT_CMPL_PLOGI); 2113 } 2114 2115 if (disc && vport->num_disc_nodes) { 2116 /* Check to see if there are more PLOGIs to be sent */ 2117 lpfc_more_plogi(vport); 2118 2119 if (vport->num_disc_nodes == 0) { 2120 spin_lock_irq(shost->host_lock); 2121 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2122 spin_unlock_irq(shost->host_lock); 2123 2124 lpfc_can_disctmo(vport); 2125 lpfc_end_rscn(vport); 2126 } 2127 } 2128 2129 out: 2130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2131 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2132 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2133 2134 out_freeiocb: 2135 /* Release the reference on the original I/O request. */ 2136 free_ndlp = cmdiocb->ndlp; 2137 2138 lpfc_els_free_iocb(phba, cmdiocb); 2139 lpfc_nlp_put(free_ndlp); 2140 return; 2141 } 2142 2143 /** 2144 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2145 * @vport: pointer to a host virtual N_Port data structure. 2146 * @did: destination port identifier. 2147 * @retry: number of retries to the command IOCB. 2148 * 2149 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2150 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2151 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2152 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2153 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2154 * 2155 * Note that the ndlp reference count will be incremented by 1 for holding 2156 * the ndlp and the reference to ndlp will be stored into the ndlp field 2157 * of the IOCB for the completion callback function to the PLOGI ELS command. 2158 * 2159 * Return code 2160 * 0 - Successfully issued a plogi for @vport 2161 * 1 - failed to issue a plogi for @vport 2162 **/ 2163 int 2164 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2165 { 2166 struct lpfc_hba *phba = vport->phba; 2167 struct serv_parm *sp; 2168 struct lpfc_nodelist *ndlp; 2169 struct lpfc_iocbq *elsiocb; 2170 uint8_t *pcmd; 2171 uint16_t cmdsize; 2172 int ret; 2173 2174 ndlp = lpfc_findnode_did(vport, did); 2175 if (!ndlp) 2176 return 1; 2177 2178 /* Defer the processing of the issue PLOGI until after the 2179 * outstanding UNREG_RPI mbox command completes, unless we 2180 * are going offline. This logic does not apply for Fabric DIDs 2181 */ 2182 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2183 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2184 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2185 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2186 "4110 Issue PLOGI x%x deferred " 2187 "on NPort x%x rpi x%x Data: x%px\n", 2188 ndlp->nlp_defer_did, ndlp->nlp_DID, 2189 ndlp->nlp_rpi, ndlp); 2190 2191 /* We can only defer 1st PLOGI */ 2192 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2193 ndlp->nlp_defer_did = did; 2194 return 0; 2195 } 2196 2197 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2198 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2199 ELS_CMD_PLOGI); 2200 if (!elsiocb) 2201 return 1; 2202 2203 spin_lock_irq(&ndlp->lock); 2204 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2205 spin_unlock_irq(&ndlp->lock); 2206 2207 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2208 2209 /* For PLOGI request, remainder of payload is service parameters */ 2210 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2211 pcmd += sizeof(uint32_t); 2212 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2213 sp = (struct serv_parm *) pcmd; 2214 2215 /* 2216 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2217 * to device on remote loops work. 2218 */ 2219 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2220 sp->cmn.altBbCredit = 1; 2221 2222 if (sp->cmn.fcphLow < FC_PH_4_3) 2223 sp->cmn.fcphLow = FC_PH_4_3; 2224 2225 if (sp->cmn.fcphHigh < FC_PH3) 2226 sp->cmn.fcphHigh = FC_PH3; 2227 2228 sp->cmn.valid_vendor_ver_level = 0; 2229 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2230 sp->cmn.bbRcvSizeMsb &= 0xF; 2231 2232 /* Check if the destination port supports VMID */ 2233 ndlp->vmid_support = 0; 2234 if (vport->vmid_priority_tagging) 2235 sp->cmn.priority_tagging = 1; 2236 else if (phba->cfg_vmid_app_header && 2237 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2238 sp->cmn.app_hdr_support = 1; 2239 2240 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2241 "Issue PLOGI: did:x%x", 2242 did, 0, 0); 2243 2244 /* If our firmware supports this feature, convey that 2245 * information to the target using the vendor specific field. 2246 */ 2247 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2248 sp->cmn.valid_vendor_ver_level = 1; 2249 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2250 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2251 } 2252 2253 phba->fc_stat.elsXmitPLOGI++; 2254 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2255 2256 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2257 "Issue PLOGI: did:x%x refcnt %d", 2258 did, kref_read(&ndlp->kref), 0); 2259 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2260 if (!elsiocb->ndlp) { 2261 lpfc_els_free_iocb(phba, elsiocb); 2262 return 1; 2263 } 2264 2265 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2266 if (ret) { 2267 lpfc_els_free_iocb(phba, elsiocb); 2268 lpfc_nlp_put(ndlp); 2269 return 1; 2270 } 2271 2272 return 0; 2273 } 2274 2275 /** 2276 * lpfc_cmpl_els_prli - Completion callback function for prli 2277 * @phba: pointer to lpfc hba data structure. 2278 * @cmdiocb: pointer to lpfc command iocb data structure. 2279 * @rspiocb: pointer to lpfc response iocb data structure. 2280 * 2281 * This routine is the completion callback function for a Process Login 2282 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2283 * status. If there is error status reported, PRLI retry shall be attempted 2284 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2285 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2286 * ndlp to mark the PRLI completion. 2287 **/ 2288 static void 2289 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2290 struct lpfc_iocbq *rspiocb) 2291 { 2292 struct lpfc_vport *vport = cmdiocb->vport; 2293 struct lpfc_nodelist *ndlp; 2294 char *mode; 2295 u32 loglevel; 2296 u32 ulp_status; 2297 u32 ulp_word4; 2298 bool release_node = false; 2299 2300 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2301 cmdiocb->rsp_iocb = rspiocb; 2302 2303 ndlp = cmdiocb->ndlp; 2304 2305 ulp_status = get_job_ulpstatus(phba, rspiocb); 2306 ulp_word4 = get_job_word4(phba, rspiocb); 2307 2308 spin_lock_irq(&ndlp->lock); 2309 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2310 2311 /* Driver supports multiple FC4 types. Counters matter. */ 2312 vport->fc_prli_sent--; 2313 ndlp->fc4_prli_sent--; 2314 spin_unlock_irq(&ndlp->lock); 2315 2316 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2317 "PRLI cmpl: status:x%x/x%x did:x%x", 2318 ulp_status, ulp_word4, 2319 ndlp->nlp_DID); 2320 2321 /* PRLI completes to NPort <nlp_DID> */ 2322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2323 "0103 PRLI completes to NPort x%06x " 2324 "Data: x%x x%x x%x x%x\n", 2325 ndlp->nlp_DID, ulp_status, ulp_word4, 2326 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2327 2328 /* Check to see if link went down during discovery */ 2329 if (lpfc_els_chk_latt(vport)) 2330 goto out; 2331 2332 if (ulp_status) { 2333 /* Check for retry */ 2334 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2335 /* ELS command is being retried */ 2336 goto out; 2337 } 2338 2339 /* If we don't send GFT_ID to Fabric, a PRLI error 2340 * could be expected. 2341 */ 2342 if ((vport->fc_flag & FC_FABRIC) || 2343 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2344 mode = KERN_ERR; 2345 loglevel = LOG_TRACE_EVENT; 2346 } else { 2347 mode = KERN_INFO; 2348 loglevel = LOG_ELS; 2349 } 2350 2351 /* PRLI failed */ 2352 lpfc_printf_vlog(vport, mode, loglevel, 2353 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2354 "data: x%x\n", 2355 ndlp->nlp_DID, ulp_status, 2356 ulp_word4, ndlp->fc4_prli_sent); 2357 2358 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2359 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2360 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2361 NLP_EVT_CMPL_PRLI); 2362 2363 /* 2364 * For P2P topology, retain the node so that PLOGI can be 2365 * attempted on it again. 2366 */ 2367 if (vport->fc_flag & FC_PT2PT) 2368 goto out; 2369 2370 /* As long as this node is not registered with the SCSI 2371 * or NVMe transport and no other PRLIs are outstanding, 2372 * it is no longer an active node. Otherwise devloss 2373 * handles the final cleanup. 2374 */ 2375 spin_lock_irq(&ndlp->lock); 2376 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2377 !ndlp->fc4_prli_sent) { 2378 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2379 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2380 release_node = true; 2381 } 2382 spin_unlock_irq(&ndlp->lock); 2383 2384 if (release_node) 2385 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2386 NLP_EVT_DEVICE_RM); 2387 } else { 2388 /* Good status, call state machine. However, if another 2389 * PRLI is outstanding, don't call the state machine 2390 * because final disposition to Mapped or Unmapped is 2391 * completed there. 2392 */ 2393 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2394 NLP_EVT_CMPL_PRLI); 2395 } 2396 2397 out: 2398 lpfc_els_free_iocb(phba, cmdiocb); 2399 lpfc_nlp_put(ndlp); 2400 return; 2401 } 2402 2403 /** 2404 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2405 * @vport: pointer to a host virtual N_Port data structure. 2406 * @ndlp: pointer to a node-list data structure. 2407 * @retry: number of retries to the command IOCB. 2408 * 2409 * This routine issues a Process Login (PRLI) ELS command for the 2410 * @vport. The PRLI service parameters are set up in the payload of the 2411 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2412 * is put to the IOCB completion callback func field before invoking the 2413 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2414 * 2415 * Note that the ndlp reference count will be incremented by 1 for holding the 2416 * ndlp and the reference to ndlp will be stored into the ndlp field of 2417 * the IOCB for the completion callback function to the PRLI ELS command. 2418 * 2419 * Return code 2420 * 0 - successfully issued prli iocb command for @vport 2421 * 1 - failed to issue prli iocb command for @vport 2422 **/ 2423 int 2424 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2425 uint8_t retry) 2426 { 2427 int rc = 0; 2428 struct lpfc_hba *phba = vport->phba; 2429 PRLI *npr; 2430 struct lpfc_nvme_prli *npr_nvme; 2431 struct lpfc_iocbq *elsiocb; 2432 uint8_t *pcmd; 2433 uint16_t cmdsize; 2434 u32 local_nlp_type, elscmd; 2435 2436 /* 2437 * If discovery was kicked off from RSCN mode, 2438 * the FC4 types supported from a 2439 * previous GFT_ID command may not be accurate. So, if we 2440 * are a NVME Initiator, always look for the possibility of 2441 * the remote NPort beng a NVME Target. 2442 */ 2443 if (phba->sli_rev == LPFC_SLI_REV4 && 2444 vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) && 2445 vport->nvmei_support) 2446 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2447 local_nlp_type = ndlp->nlp_fc4_type; 2448 2449 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2450 * fields here before any of them can complete. 2451 */ 2452 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2453 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2454 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2455 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2456 ndlp->nvme_fb_size = 0; 2457 2458 send_next_prli: 2459 if (local_nlp_type & NLP_FC4_FCP) { 2460 /* Payload is 4 + 16 = 20 x14 bytes. */ 2461 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2462 elscmd = ELS_CMD_PRLI; 2463 } else if (local_nlp_type & NLP_FC4_NVME) { 2464 /* Payload is 4 + 20 = 24 x18 bytes. */ 2465 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2466 elscmd = ELS_CMD_NVMEPRLI; 2467 } else { 2468 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2469 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2470 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2471 return 1; 2472 } 2473 2474 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2475 * FC4 type, implicitly LOGO. 2476 */ 2477 if (phba->sli_rev == LPFC_SLI_REV3 && 2478 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2479 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2480 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2481 ndlp->nlp_type); 2482 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2483 return 1; 2484 } 2485 2486 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2487 ndlp->nlp_DID, elscmd); 2488 if (!elsiocb) 2489 return 1; 2490 2491 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2492 2493 /* For PRLI request, remainder of payload is service parameters */ 2494 memset(pcmd, 0, cmdsize); 2495 2496 if (local_nlp_type & NLP_FC4_FCP) { 2497 /* Remainder of payload is FCP PRLI parameter page. 2498 * Note: this data structure is defined as 2499 * BE/LE in the structure definition so no 2500 * byte swap call is made. 2501 */ 2502 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2503 pcmd += sizeof(uint32_t); 2504 npr = (PRLI *)pcmd; 2505 2506 /* 2507 * If our firmware version is 3.20 or later, 2508 * set the following bits for FC-TAPE support. 2509 */ 2510 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2511 npr->ConfmComplAllowed = 1; 2512 npr->Retry = 1; 2513 npr->TaskRetryIdReq = 1; 2514 } 2515 npr->estabImagePair = 1; 2516 npr->readXferRdyDis = 1; 2517 if (vport->cfg_first_burst_size) 2518 npr->writeXferRdyDis = 1; 2519 2520 /* For FCP support */ 2521 npr->prliType = PRLI_FCP_TYPE; 2522 npr->initiatorFunc = 1; 2523 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2524 2525 /* Remove FCP type - processed. */ 2526 local_nlp_type &= ~NLP_FC4_FCP; 2527 } else if (local_nlp_type & NLP_FC4_NVME) { 2528 /* Remainder of payload is NVME PRLI parameter page. 2529 * This data structure is the newer definition that 2530 * uses bf macros so a byte swap is required. 2531 */ 2532 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2533 pcmd += sizeof(uint32_t); 2534 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2535 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2536 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2537 if (phba->nsler) { 2538 bf_set(prli_nsler, npr_nvme, 1); 2539 bf_set(prli_conf, npr_nvme, 1); 2540 } 2541 2542 /* Only initiators request first burst. */ 2543 if ((phba->cfg_nvme_enable_fb) && 2544 !phba->nvmet_support) 2545 bf_set(prli_fba, npr_nvme, 1); 2546 2547 if (phba->nvmet_support) { 2548 bf_set(prli_tgt, npr_nvme, 1); 2549 bf_set(prli_disc, npr_nvme, 1); 2550 } else { 2551 bf_set(prli_init, npr_nvme, 1); 2552 bf_set(prli_conf, npr_nvme, 1); 2553 } 2554 2555 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2556 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2557 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2558 2559 /* Remove NVME type - processed. */ 2560 local_nlp_type &= ~NLP_FC4_NVME; 2561 } 2562 2563 phba->fc_stat.elsXmitPRLI++; 2564 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2565 2566 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2567 "Issue PRLI: did:x%x refcnt %d", 2568 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2569 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2570 if (!elsiocb->ndlp) { 2571 lpfc_els_free_iocb(phba, elsiocb); 2572 return 1; 2573 } 2574 2575 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2576 if (rc == IOCB_ERROR) { 2577 lpfc_els_free_iocb(phba, elsiocb); 2578 lpfc_nlp_put(ndlp); 2579 return 1; 2580 } 2581 2582 /* The vport counters are used for lpfc_scan_finished, but 2583 * the ndlp is used to track outstanding PRLIs for different 2584 * FC4 types. 2585 */ 2586 spin_lock_irq(&ndlp->lock); 2587 ndlp->nlp_flag |= NLP_PRLI_SND; 2588 vport->fc_prli_sent++; 2589 ndlp->fc4_prli_sent++; 2590 spin_unlock_irq(&ndlp->lock); 2591 2592 /* The driver supports 2 FC4 types. Make sure 2593 * a PRLI is issued for all types before exiting. 2594 */ 2595 if (phba->sli_rev == LPFC_SLI_REV4 && 2596 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2597 goto send_next_prli; 2598 else 2599 return 0; 2600 } 2601 2602 /** 2603 * lpfc_rscn_disc - Perform rscn discovery for a vport 2604 * @vport: pointer to a host virtual N_Port data structure. 2605 * 2606 * This routine performs Registration State Change Notification (RSCN) 2607 * discovery for a @vport. If the @vport's node port recovery count is not 2608 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2609 * the nodes that need recovery. If none of the PLOGI were needed through 2610 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2611 * invoked to check and handle possible more RSCN came in during the period 2612 * of processing the current ones. 2613 **/ 2614 static void 2615 lpfc_rscn_disc(struct lpfc_vport *vport) 2616 { 2617 lpfc_can_disctmo(vport); 2618 2619 /* RSCN discovery */ 2620 /* go thru NPR nodes and issue ELS PLOGIs */ 2621 if (vport->fc_npr_cnt) 2622 if (lpfc_els_disc_plogi(vport)) 2623 return; 2624 2625 lpfc_end_rscn(vport); 2626 } 2627 2628 /** 2629 * lpfc_adisc_done - Complete the adisc phase of discovery 2630 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2631 * 2632 * This function is called when the final ADISC is completed during discovery. 2633 * This function handles clearing link attention or issuing reg_vpi depending 2634 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2635 * discovery. 2636 * This function is called with no locks held. 2637 **/ 2638 static void 2639 lpfc_adisc_done(struct lpfc_vport *vport) 2640 { 2641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2642 struct lpfc_hba *phba = vport->phba; 2643 2644 /* 2645 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2646 * and continue discovery. 2647 */ 2648 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2649 !(vport->fc_flag & FC_RSCN_MODE) && 2650 (phba->sli_rev < LPFC_SLI_REV4)) { 2651 2652 /* 2653 * If link is down, clear_la and reg_vpi will be done after 2654 * flogi following a link up event 2655 */ 2656 if (!lpfc_is_link_up(phba)) 2657 return; 2658 2659 /* The ADISCs are complete. Doesn't matter if they 2660 * succeeded or failed because the ADISC completion 2661 * routine guarantees to call the state machine and 2662 * the RPI is either unregistered (failed ADISC response) 2663 * or the RPI is still valid and the node is marked 2664 * mapped for a target. The exchanges should be in the 2665 * correct state. This code is specific to SLI3. 2666 */ 2667 lpfc_issue_clear_la(phba, vport); 2668 lpfc_issue_reg_vpi(phba, vport); 2669 return; 2670 } 2671 /* 2672 * For SLI2, we need to set port_state to READY 2673 * and continue discovery. 2674 */ 2675 if (vport->port_state < LPFC_VPORT_READY) { 2676 /* If we get here, there is nothing to ADISC */ 2677 lpfc_issue_clear_la(phba, vport); 2678 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2679 vport->num_disc_nodes = 0; 2680 /* go thru NPR list, issue ELS PLOGIs */ 2681 if (vport->fc_npr_cnt) 2682 lpfc_els_disc_plogi(vport); 2683 if (!vport->num_disc_nodes) { 2684 spin_lock_irq(shost->host_lock); 2685 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2686 spin_unlock_irq(shost->host_lock); 2687 lpfc_can_disctmo(vport); 2688 lpfc_end_rscn(vport); 2689 } 2690 } 2691 vport->port_state = LPFC_VPORT_READY; 2692 } else 2693 lpfc_rscn_disc(vport); 2694 } 2695 2696 /** 2697 * lpfc_more_adisc - Issue more adisc as needed 2698 * @vport: pointer to a host virtual N_Port data structure. 2699 * 2700 * This routine determines whether there are more ndlps on a @vport 2701 * node list need to have Address Discover (ADISC) issued. If so, it will 2702 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2703 * remaining nodes which need to have ADISC sent. 2704 **/ 2705 void 2706 lpfc_more_adisc(struct lpfc_vport *vport) 2707 { 2708 if (vport->num_disc_nodes) 2709 vport->num_disc_nodes--; 2710 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2711 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2712 "0210 Continue discovery with %d ADISCs to go " 2713 "Data: x%x x%x x%x\n", 2714 vport->num_disc_nodes, vport->fc_adisc_cnt, 2715 vport->fc_flag, vport->port_state); 2716 /* Check to see if there are more ADISCs to be sent */ 2717 if (vport->fc_flag & FC_NLP_MORE) { 2718 lpfc_set_disctmo(vport); 2719 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2720 lpfc_els_disc_adisc(vport); 2721 } 2722 if (!vport->num_disc_nodes) 2723 lpfc_adisc_done(vport); 2724 return; 2725 } 2726 2727 /** 2728 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2729 * @phba: pointer to lpfc hba data structure. 2730 * @cmdiocb: pointer to lpfc command iocb data structure. 2731 * @rspiocb: pointer to lpfc response iocb data structure. 2732 * 2733 * This routine is the completion function for issuing the Address Discover 2734 * (ADISC) command. It first checks to see whether link went down during 2735 * the discovery process. If so, the node will be marked as node port 2736 * recovery for issuing discover IOCB by the link attention handler and 2737 * exit. Otherwise, the response status is checked. If error was reported 2738 * in the response status, the ADISC command shall be retried by invoking 2739 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2740 * the response status, the state machine is invoked to set transition 2741 * with respect to NLP_EVT_CMPL_ADISC event. 2742 **/ 2743 static void 2744 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2745 struct lpfc_iocbq *rspiocb) 2746 { 2747 struct lpfc_vport *vport = cmdiocb->vport; 2748 IOCB_t *irsp; 2749 struct lpfc_nodelist *ndlp; 2750 int disc; 2751 u32 ulp_status, ulp_word4, tmo; 2752 bool release_node = false; 2753 2754 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2755 cmdiocb->rsp_iocb = rspiocb; 2756 2757 ndlp = cmdiocb->ndlp; 2758 2759 ulp_status = get_job_ulpstatus(phba, rspiocb); 2760 ulp_word4 = get_job_word4(phba, rspiocb); 2761 2762 if (phba->sli_rev == LPFC_SLI_REV4) { 2763 tmo = get_wqe_tmo(cmdiocb); 2764 } else { 2765 irsp = &rspiocb->iocb; 2766 tmo = irsp->ulpTimeout; 2767 } 2768 2769 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2770 "ADISC cmpl: status:x%x/x%x did:x%x", 2771 ulp_status, ulp_word4, 2772 ndlp->nlp_DID); 2773 2774 /* Since ndlp can be freed in the disc state machine, note if this node 2775 * is being used during discovery. 2776 */ 2777 spin_lock_irq(&ndlp->lock); 2778 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2779 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2780 spin_unlock_irq(&ndlp->lock); 2781 /* ADISC completes to NPort <nlp_DID> */ 2782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2783 "0104 ADISC completes to NPort x%x " 2784 "Data: x%x x%x x%x x%x x%x\n", 2785 ndlp->nlp_DID, ulp_status, ulp_word4, 2786 tmo, disc, vport->num_disc_nodes); 2787 /* Check to see if link went down during discovery */ 2788 if (lpfc_els_chk_latt(vport)) { 2789 spin_lock_irq(&ndlp->lock); 2790 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2791 spin_unlock_irq(&ndlp->lock); 2792 goto out; 2793 } 2794 2795 if (ulp_status) { 2796 /* Check for retry */ 2797 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2798 /* ELS command is being retried */ 2799 if (disc) { 2800 spin_lock_irq(&ndlp->lock); 2801 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2802 spin_unlock_irq(&ndlp->lock); 2803 lpfc_set_disctmo(vport); 2804 } 2805 goto out; 2806 } 2807 /* ADISC failed */ 2808 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2809 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2810 ndlp->nlp_DID, ulp_status, 2811 ulp_word4); 2812 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2813 NLP_EVT_CMPL_ADISC); 2814 2815 /* As long as this node is not registered with the SCSI or NVMe 2816 * transport, it is no longer an active node. Otherwise 2817 * devloss handles the final cleanup. 2818 */ 2819 spin_lock_irq(&ndlp->lock); 2820 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2821 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2822 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2823 release_node = true; 2824 } 2825 spin_unlock_irq(&ndlp->lock); 2826 2827 if (release_node) 2828 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2829 NLP_EVT_DEVICE_RM); 2830 } else 2831 /* Good status, call state machine */ 2832 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2833 NLP_EVT_CMPL_ADISC); 2834 2835 /* Check to see if there are more ADISCs to be sent */ 2836 if (disc && vport->num_disc_nodes) 2837 lpfc_more_adisc(vport); 2838 out: 2839 lpfc_els_free_iocb(phba, cmdiocb); 2840 lpfc_nlp_put(ndlp); 2841 return; 2842 } 2843 2844 /** 2845 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2846 * @vport: pointer to a virtual N_Port data structure. 2847 * @ndlp: pointer to a node-list data structure. 2848 * @retry: number of retries to the command IOCB. 2849 * 2850 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2851 * @vport. It prepares the payload of the ADISC ELS command, updates the 2852 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2853 * to issue the ADISC ELS command. 2854 * 2855 * Note that the ndlp reference count will be incremented by 1 for holding the 2856 * ndlp and the reference to ndlp will be stored into the ndlp field of 2857 * the IOCB for the completion callback function to the ADISC ELS command. 2858 * 2859 * Return code 2860 * 0 - successfully issued adisc 2861 * 1 - failed to issue adisc 2862 **/ 2863 int 2864 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2865 uint8_t retry) 2866 { 2867 int rc = 0; 2868 struct lpfc_hba *phba = vport->phba; 2869 ADISC *ap; 2870 struct lpfc_iocbq *elsiocb; 2871 uint8_t *pcmd; 2872 uint16_t cmdsize; 2873 2874 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2875 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2876 ndlp->nlp_DID, ELS_CMD_ADISC); 2877 if (!elsiocb) 2878 return 1; 2879 2880 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2881 2882 /* For ADISC request, remainder of payload is service parameters */ 2883 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2884 pcmd += sizeof(uint32_t); 2885 2886 /* Fill in ADISC payload */ 2887 ap = (ADISC *) pcmd; 2888 ap->hardAL_PA = phba->fc_pref_ALPA; 2889 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2890 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2891 ap->DID = be32_to_cpu(vport->fc_myDID); 2892 2893 phba->fc_stat.elsXmitADISC++; 2894 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2895 spin_lock_irq(&ndlp->lock); 2896 ndlp->nlp_flag |= NLP_ADISC_SND; 2897 spin_unlock_irq(&ndlp->lock); 2898 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2899 if (!elsiocb->ndlp) { 2900 lpfc_els_free_iocb(phba, elsiocb); 2901 goto err; 2902 } 2903 2904 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2905 "Issue ADISC: did:x%x refcnt %d", 2906 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2907 2908 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2909 if (rc == IOCB_ERROR) { 2910 lpfc_els_free_iocb(phba, elsiocb); 2911 lpfc_nlp_put(ndlp); 2912 goto err; 2913 } 2914 2915 return 0; 2916 2917 err: 2918 spin_lock_irq(&ndlp->lock); 2919 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2920 spin_unlock_irq(&ndlp->lock); 2921 return 1; 2922 } 2923 2924 /** 2925 * lpfc_cmpl_els_logo - Completion callback function for logo 2926 * @phba: pointer to lpfc hba data structure. 2927 * @cmdiocb: pointer to lpfc command iocb data structure. 2928 * @rspiocb: pointer to lpfc response iocb data structure. 2929 * 2930 * This routine is the completion function for issuing the ELS Logout (LOGO) 2931 * command. If no error status was reported from the LOGO response, the 2932 * state machine of the associated ndlp shall be invoked for transition with 2933 * respect to NLP_EVT_CMPL_LOGO event. 2934 **/ 2935 static void 2936 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2937 struct lpfc_iocbq *rspiocb) 2938 { 2939 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2940 struct lpfc_vport *vport = ndlp->vport; 2941 IOCB_t *irsp; 2942 unsigned long flags; 2943 uint32_t skip_recovery = 0; 2944 int wake_up_waiter = 0; 2945 u32 ulp_status; 2946 u32 ulp_word4; 2947 u32 tmo; 2948 2949 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2950 cmdiocb->rsp_iocb = rspiocb; 2951 2952 ulp_status = get_job_ulpstatus(phba, rspiocb); 2953 ulp_word4 = get_job_word4(phba, rspiocb); 2954 2955 if (phba->sli_rev == LPFC_SLI_REV4) { 2956 tmo = get_wqe_tmo(cmdiocb); 2957 } else { 2958 irsp = &rspiocb->iocb; 2959 tmo = irsp->ulpTimeout; 2960 } 2961 2962 spin_lock_irq(&ndlp->lock); 2963 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2964 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2965 wake_up_waiter = 1; 2966 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2967 } 2968 spin_unlock_irq(&ndlp->lock); 2969 2970 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2971 "LOGO cmpl: status:x%x/x%x did:x%x", 2972 ulp_status, ulp_word4, 2973 ndlp->nlp_DID); 2974 2975 /* LOGO completes to NPort <nlp_DID> */ 2976 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2977 "0105 LOGO completes to NPort x%x " 2978 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2979 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2980 ulp_status, ulp_word4, 2981 tmo, vport->num_disc_nodes); 2982 2983 if (lpfc_els_chk_latt(vport)) { 2984 skip_recovery = 1; 2985 goto out; 2986 } 2987 2988 /* The LOGO will not be retried on failure. A LOGO was 2989 * issued to the remote rport and a ACC or RJT or no Answer are 2990 * all acceptable. Note the failure and move forward with 2991 * discovery. The PLOGI will retry. 2992 */ 2993 if (ulp_status) { 2994 /* LOGO failed */ 2995 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2996 "2756 LOGO failure, No Retry DID:%06X " 2997 "Status:x%x/x%x\n", 2998 ndlp->nlp_DID, ulp_status, 2999 ulp_word4); 3000 3001 /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */ 3002 if (lpfc_error_lost_link(ulp_status, ulp_word4)) { 3003 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3004 NLP_EVT_DEVICE_RM); 3005 skip_recovery = 1; 3006 goto out; 3007 } 3008 } 3009 3010 /* Call state machine. This will unregister the rpi if needed. */ 3011 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3012 3013 /* The driver sets this flag for an NPIV instance that doesn't want to 3014 * log into the remote port. 3015 */ 3016 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3017 spin_lock_irq(&ndlp->lock); 3018 if (phba->sli_rev == LPFC_SLI_REV4) 3019 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3020 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3021 spin_unlock_irq(&ndlp->lock); 3022 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3023 NLP_EVT_DEVICE_RM); 3024 lpfc_els_free_iocb(phba, cmdiocb); 3025 lpfc_nlp_put(ndlp); 3026 3027 /* Presume the node was released. */ 3028 return; 3029 } 3030 3031 out: 3032 /* Driver is done with the IO. */ 3033 lpfc_els_free_iocb(phba, cmdiocb); 3034 lpfc_nlp_put(ndlp); 3035 3036 /* At this point, the LOGO processing is complete. NOTE: For a 3037 * pt2pt topology, we are assuming the NPortID will only change 3038 * on link up processing. For a LOGO / PLOGI initiated by the 3039 * Initiator, we are assuming the NPortID is not going to change. 3040 */ 3041 3042 if (wake_up_waiter && ndlp->logo_waitq) 3043 wake_up(ndlp->logo_waitq); 3044 /* 3045 * If the node is a target, the handling attempts to recover the port. 3046 * For any other port type, the rpi is unregistered as an implicit 3047 * LOGO. 3048 */ 3049 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3050 skip_recovery == 0) { 3051 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3052 spin_lock_irqsave(&ndlp->lock, flags); 3053 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3054 spin_unlock_irqrestore(&ndlp->lock, flags); 3055 3056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3057 "3187 LOGO completes to NPort x%x: Start " 3058 "Recovery Data: x%x x%x x%x x%x\n", 3059 ndlp->nlp_DID, ulp_status, 3060 ulp_word4, tmo, 3061 vport->num_disc_nodes); 3062 lpfc_disc_start(vport); 3063 return; 3064 } 3065 3066 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3067 * driver sends a LOGO to the rport to cleanup. For fabric and 3068 * initiator ports cleanup the node as long as it the node is not 3069 * register with the transport. 3070 */ 3071 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3072 spin_lock_irq(&ndlp->lock); 3073 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3074 spin_unlock_irq(&ndlp->lock); 3075 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3076 NLP_EVT_DEVICE_RM); 3077 } 3078 } 3079 3080 /** 3081 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3082 * @vport: pointer to a virtual N_Port data structure. 3083 * @ndlp: pointer to a node-list data structure. 3084 * @retry: number of retries to the command IOCB. 3085 * 3086 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3087 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3088 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3089 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3090 * 3091 * Note that the ndlp reference count will be incremented by 1 for holding the 3092 * ndlp and the reference to ndlp will be stored into the ndlp field of 3093 * the IOCB for the completion callback function to the LOGO ELS command. 3094 * 3095 * Callers of this routine are expected to unregister the RPI first 3096 * 3097 * Return code 3098 * 0 - successfully issued logo 3099 * 1 - failed to issue logo 3100 **/ 3101 int 3102 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3103 uint8_t retry) 3104 { 3105 struct lpfc_hba *phba = vport->phba; 3106 struct lpfc_iocbq *elsiocb; 3107 uint8_t *pcmd; 3108 uint16_t cmdsize; 3109 int rc; 3110 3111 spin_lock_irq(&ndlp->lock); 3112 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3113 spin_unlock_irq(&ndlp->lock); 3114 return 0; 3115 } 3116 spin_unlock_irq(&ndlp->lock); 3117 3118 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3119 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3120 ndlp->nlp_DID, ELS_CMD_LOGO); 3121 if (!elsiocb) 3122 return 1; 3123 3124 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3125 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3126 pcmd += sizeof(uint32_t); 3127 3128 /* Fill in LOGO payload */ 3129 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3130 pcmd += sizeof(uint32_t); 3131 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3132 3133 phba->fc_stat.elsXmitLOGO++; 3134 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3135 spin_lock_irq(&ndlp->lock); 3136 ndlp->nlp_flag |= NLP_LOGO_SND; 3137 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3138 spin_unlock_irq(&ndlp->lock); 3139 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3140 if (!elsiocb->ndlp) { 3141 lpfc_els_free_iocb(phba, elsiocb); 3142 goto err; 3143 } 3144 3145 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3146 "Issue LOGO: did:x%x refcnt %d", 3147 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3148 3149 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3150 if (rc == IOCB_ERROR) { 3151 lpfc_els_free_iocb(phba, elsiocb); 3152 lpfc_nlp_put(ndlp); 3153 goto err; 3154 } 3155 3156 spin_lock_irq(&ndlp->lock); 3157 ndlp->nlp_prev_state = ndlp->nlp_state; 3158 spin_unlock_irq(&ndlp->lock); 3159 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3160 return 0; 3161 3162 err: 3163 spin_lock_irq(&ndlp->lock); 3164 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3165 spin_unlock_irq(&ndlp->lock); 3166 return 1; 3167 } 3168 3169 /** 3170 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3171 * @phba: pointer to lpfc hba data structure. 3172 * @cmdiocb: pointer to lpfc command iocb data structure. 3173 * @rspiocb: pointer to lpfc response iocb data structure. 3174 * 3175 * This routine is a generic completion callback function for ELS commands. 3176 * Specifically, it is the callback function which does not need to perform 3177 * any command specific operations. It is currently used by the ELS command 3178 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3179 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3180 * Other than certain debug loggings, this callback function simply invokes the 3181 * lpfc_els_chk_latt() routine to check whether link went down during the 3182 * discovery process. 3183 **/ 3184 static void 3185 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3186 struct lpfc_iocbq *rspiocb) 3187 { 3188 struct lpfc_vport *vport = cmdiocb->vport; 3189 struct lpfc_nodelist *free_ndlp; 3190 IOCB_t *irsp; 3191 u32 ulp_status, ulp_word4, tmo, did, iotag; 3192 3193 ulp_status = get_job_ulpstatus(phba, rspiocb); 3194 ulp_word4 = get_job_word4(phba, rspiocb); 3195 did = get_job_els_rsp64_did(phba, cmdiocb); 3196 3197 if (phba->sli_rev == LPFC_SLI_REV4) { 3198 tmo = get_wqe_tmo(cmdiocb); 3199 iotag = get_wqe_reqtag(cmdiocb); 3200 } else { 3201 irsp = &rspiocb->iocb; 3202 tmo = irsp->ulpTimeout; 3203 iotag = irsp->ulpIoTag; 3204 } 3205 3206 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3207 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3208 ulp_status, ulp_word4, did); 3209 3210 /* ELS cmd tag <ulpIoTag> completes */ 3211 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3212 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3213 iotag, ulp_status, ulp_word4, tmo); 3214 3215 /* Check to see if link went down during discovery */ 3216 lpfc_els_chk_latt(vport); 3217 3218 free_ndlp = cmdiocb->ndlp; 3219 3220 lpfc_els_free_iocb(phba, cmdiocb); 3221 lpfc_nlp_put(free_ndlp); 3222 } 3223 3224 /** 3225 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3226 * @vport: pointer to lpfc_vport data structure. 3227 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3228 * 3229 * This routine registers the rpi assigned to the fabric controller 3230 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3231 * state triggering a registration with the SCSI transport. 3232 * 3233 * This routine is single out because the fabric controller node 3234 * does not receive a PLOGI. This routine is consumed by the 3235 * SCR and RDF ELS commands. Callers are expected to qualify 3236 * with SLI4 first. 3237 **/ 3238 static int 3239 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3240 { 3241 int rc = 0; 3242 struct lpfc_hba *phba = vport->phba; 3243 struct lpfc_nodelist *ns_ndlp; 3244 LPFC_MBOXQ_t *mbox; 3245 3246 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3247 return rc; 3248 3249 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3250 if (!ns_ndlp) 3251 return -ENODEV; 3252 3253 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3254 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3255 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3256 ns_ndlp->nlp_state); 3257 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3258 return -ENODEV; 3259 3260 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3261 if (!mbox) { 3262 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3263 "0936 %s: no memory for reg_login " 3264 "Data: x%x x%x x%x x%x\n", __func__, 3265 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3266 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3267 return -ENOMEM; 3268 } 3269 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3270 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3271 if (rc) { 3272 rc = -EACCES; 3273 goto out; 3274 } 3275 3276 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3277 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3278 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3279 if (!mbox->ctx_ndlp) { 3280 rc = -ENOMEM; 3281 goto out; 3282 } 3283 3284 mbox->vport = vport; 3285 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3286 if (rc == MBX_NOT_FINISHED) { 3287 rc = -ENODEV; 3288 lpfc_nlp_put(fc_ndlp); 3289 goto out; 3290 } 3291 /* Success path. Exit. */ 3292 lpfc_nlp_set_state(vport, fc_ndlp, 3293 NLP_STE_REG_LOGIN_ISSUE); 3294 return 0; 3295 3296 out: 3297 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3298 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3299 "0938 %s: failed to format reg_login " 3300 "Data: x%x x%x x%x x%x\n", __func__, 3301 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3302 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3303 return rc; 3304 } 3305 3306 /** 3307 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3308 * @phba: pointer to lpfc hba data structure. 3309 * @cmdiocb: pointer to lpfc command iocb data structure. 3310 * @rspiocb: pointer to lpfc response iocb data structure. 3311 * 3312 * This routine is a generic completion callback function for Discovery ELS cmd. 3313 * Currently used by the ELS command issuing routines for the ELS State Change 3314 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3315 * These commands will be retried once only for ELS timeout errors. 3316 **/ 3317 static void 3318 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3319 struct lpfc_iocbq *rspiocb) 3320 { 3321 struct lpfc_vport *vport = cmdiocb->vport; 3322 IOCB_t *irsp; 3323 struct lpfc_els_rdf_rsp *prdf; 3324 struct lpfc_dmabuf *pcmd, *prsp; 3325 u32 *pdata; 3326 u32 cmd; 3327 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3328 u32 ulp_status, ulp_word4, tmo, did, iotag; 3329 3330 ulp_status = get_job_ulpstatus(phba, rspiocb); 3331 ulp_word4 = get_job_word4(phba, rspiocb); 3332 did = get_job_els_rsp64_did(phba, cmdiocb); 3333 3334 if (phba->sli_rev == LPFC_SLI_REV4) { 3335 tmo = get_wqe_tmo(cmdiocb); 3336 iotag = get_wqe_reqtag(cmdiocb); 3337 } else { 3338 irsp = &rspiocb->iocb; 3339 tmo = irsp->ulpTimeout; 3340 iotag = irsp->ulpIoTag; 3341 } 3342 3343 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3344 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3345 ulp_status, ulp_word4, did); 3346 3347 /* ELS cmd tag <ulpIoTag> completes */ 3348 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3349 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3350 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3351 3352 pcmd = cmdiocb->cmd_dmabuf; 3353 if (!pcmd) 3354 goto out; 3355 3356 pdata = (u32 *)pcmd->virt; 3357 if (!pdata) 3358 goto out; 3359 cmd = *pdata; 3360 3361 /* Only 1 retry for ELS Timeout only */ 3362 if (ulp_status == IOSTAT_LOCAL_REJECT && 3363 ((ulp_word4 & IOERR_PARAM_MASK) == 3364 IOERR_SEQUENCE_TIMEOUT)) { 3365 cmdiocb->retry++; 3366 if (cmdiocb->retry <= 1) { 3367 switch (cmd) { 3368 case ELS_CMD_SCR: 3369 lpfc_issue_els_scr(vport, cmdiocb->retry); 3370 break; 3371 case ELS_CMD_EDC: 3372 lpfc_issue_els_edc(vport, cmdiocb->retry); 3373 break; 3374 case ELS_CMD_RDF: 3375 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3376 break; 3377 } 3378 goto out; 3379 } 3380 phba->fc_stat.elsRetryExceeded++; 3381 } 3382 if (cmd == ELS_CMD_EDC) { 3383 /* must be called before checking uplStatus and returning */ 3384 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3385 return; 3386 } 3387 if (ulp_status) { 3388 /* ELS discovery cmd completes with error */ 3389 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3390 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3391 ulp_status, ulp_word4); 3392 goto out; 3393 } 3394 3395 /* The RDF response doesn't have any impact on the running driver 3396 * but the notification descriptors are dumped here for support. 3397 */ 3398 if (cmd == ELS_CMD_RDF) { 3399 int i; 3400 3401 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3402 if (!prsp) 3403 goto out; 3404 3405 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3406 if (!prdf) 3407 goto out; 3408 3409 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3410 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3411 lpfc_printf_vlog(vport, KERN_INFO, 3412 LOG_ELS | LOG_CGN_MGMT, 3413 "4677 Fabric RDF Notification Grant " 3414 "Data: 0x%08x Reg: %x %x\n", 3415 be32_to_cpu( 3416 prdf->reg_d1.desc_tags[i]), 3417 phba->cgn_reg_signal, 3418 phba->cgn_reg_fpin); 3419 } 3420 3421 out: 3422 /* Check to see if link went down during discovery */ 3423 lpfc_els_chk_latt(vport); 3424 lpfc_els_free_iocb(phba, cmdiocb); 3425 lpfc_nlp_put(ndlp); 3426 return; 3427 } 3428 3429 /** 3430 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3431 * @vport: pointer to a host virtual N_Port data structure. 3432 * @retry: retry counter for the command IOCB. 3433 * 3434 * This routine issues a State Change Request (SCR) to a fabric node 3435 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3436 * first search the @vport node list to find the matching ndlp. If no such 3437 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3438 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3439 * routine is invoked to send the SCR IOCB. 3440 * 3441 * Note that the ndlp reference count will be incremented by 1 for holding the 3442 * ndlp and the reference to ndlp will be stored into the ndlp field of 3443 * the IOCB for the completion callback function to the SCR ELS command. 3444 * 3445 * Return code 3446 * 0 - Successfully issued scr command 3447 * 1 - Failed to issue scr command 3448 **/ 3449 int 3450 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3451 { 3452 int rc = 0; 3453 struct lpfc_hba *phba = vport->phba; 3454 struct lpfc_iocbq *elsiocb; 3455 uint8_t *pcmd; 3456 uint16_t cmdsize; 3457 struct lpfc_nodelist *ndlp; 3458 3459 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3460 3461 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3462 if (!ndlp) { 3463 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3464 if (!ndlp) 3465 return 1; 3466 lpfc_enqueue_node(vport, ndlp); 3467 } 3468 3469 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3470 ndlp->nlp_DID, ELS_CMD_SCR); 3471 if (!elsiocb) 3472 return 1; 3473 3474 if (phba->sli_rev == LPFC_SLI_REV4) { 3475 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3476 if (rc) { 3477 lpfc_els_free_iocb(phba, elsiocb); 3478 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3479 "0937 %s: Failed to reg fc node, rc %d\n", 3480 __func__, rc); 3481 return 1; 3482 } 3483 } 3484 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3485 3486 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3487 pcmd += sizeof(uint32_t); 3488 3489 /* For SCR, remainder of payload is SCR parameter page */ 3490 memset(pcmd, 0, sizeof(SCR)); 3491 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3492 3493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3494 "Issue SCR: did:x%x", 3495 ndlp->nlp_DID, 0, 0); 3496 3497 phba->fc_stat.elsXmitSCR++; 3498 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3499 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3500 if (!elsiocb->ndlp) { 3501 lpfc_els_free_iocb(phba, elsiocb); 3502 return 1; 3503 } 3504 3505 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3506 "Issue SCR: did:x%x refcnt %d", 3507 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3508 3509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3510 if (rc == IOCB_ERROR) { 3511 lpfc_els_free_iocb(phba, elsiocb); 3512 lpfc_nlp_put(ndlp); 3513 return 1; 3514 } 3515 3516 return 0; 3517 } 3518 3519 /** 3520 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3521 * or the other nport (pt2pt). 3522 * @vport: pointer to a host virtual N_Port data structure. 3523 * @retry: number of retries to the command IOCB. 3524 * 3525 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3526 * when connected to a fabric, or to the remote port when connected 3527 * in point-to-point mode. When sent to the Fabric Controller, it will 3528 * replay the RSCN to registered recipients. 3529 * 3530 * Note that the ndlp reference count will be incremented by 1 for holding the 3531 * ndlp and the reference to ndlp will be stored into the ndlp field of 3532 * the IOCB for the completion callback function to the RSCN ELS command. 3533 * 3534 * Return code 3535 * 0 - Successfully issued RSCN command 3536 * 1 - Failed to issue RSCN command 3537 **/ 3538 int 3539 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3540 { 3541 int rc = 0; 3542 struct lpfc_hba *phba = vport->phba; 3543 struct lpfc_iocbq *elsiocb; 3544 struct lpfc_nodelist *ndlp; 3545 struct { 3546 struct fc_els_rscn rscn; 3547 struct fc_els_rscn_page portid; 3548 } *event; 3549 uint32_t nportid; 3550 uint16_t cmdsize = sizeof(*event); 3551 3552 /* Not supported for private loop */ 3553 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3554 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3555 return 1; 3556 3557 if (vport->fc_flag & FC_PT2PT) { 3558 /* find any mapped nport - that would be the other nport */ 3559 ndlp = lpfc_findnode_mapped(vport); 3560 if (!ndlp) 3561 return 1; 3562 } else { 3563 nportid = FC_FID_FCTRL; 3564 /* find the fabric controller node */ 3565 ndlp = lpfc_findnode_did(vport, nportid); 3566 if (!ndlp) { 3567 /* if one didn't exist, make one */ 3568 ndlp = lpfc_nlp_init(vport, nportid); 3569 if (!ndlp) 3570 return 1; 3571 lpfc_enqueue_node(vport, ndlp); 3572 } 3573 } 3574 3575 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3576 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3577 3578 if (!elsiocb) 3579 return 1; 3580 3581 event = elsiocb->cmd_dmabuf->virt; 3582 3583 event->rscn.rscn_cmd = ELS_RSCN; 3584 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3585 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3586 3587 nportid = vport->fc_myDID; 3588 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3589 event->portid.rscn_page_flags = 0; 3590 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3591 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3592 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3593 3594 phba->fc_stat.elsXmitRSCN++; 3595 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3596 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3597 if (!elsiocb->ndlp) { 3598 lpfc_els_free_iocb(phba, elsiocb); 3599 return 1; 3600 } 3601 3602 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3603 "Issue RSCN: did:x%x", 3604 ndlp->nlp_DID, 0, 0); 3605 3606 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3607 if (rc == IOCB_ERROR) { 3608 lpfc_els_free_iocb(phba, elsiocb); 3609 lpfc_nlp_put(ndlp); 3610 return 1; 3611 } 3612 3613 return 0; 3614 } 3615 3616 /** 3617 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3618 * @vport: pointer to a host virtual N_Port data structure. 3619 * @nportid: N_Port identifier to the remote node. 3620 * @retry: number of retries to the command IOCB. 3621 * 3622 * This routine issues a Fibre Channel Address Resolution Response 3623 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3624 * is passed into the function. It first search the @vport node list to find 3625 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3626 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3627 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3628 * 3629 * Note that the ndlp reference count will be incremented by 1 for holding the 3630 * ndlp and the reference to ndlp will be stored into the ndlp field of 3631 * the IOCB for the completion callback function to the FARPR ELS command. 3632 * 3633 * Return code 3634 * 0 - Successfully issued farpr command 3635 * 1 - Failed to issue farpr command 3636 **/ 3637 static int 3638 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3639 { 3640 int rc = 0; 3641 struct lpfc_hba *phba = vport->phba; 3642 struct lpfc_iocbq *elsiocb; 3643 FARP *fp; 3644 uint8_t *pcmd; 3645 uint32_t *lp; 3646 uint16_t cmdsize; 3647 struct lpfc_nodelist *ondlp; 3648 struct lpfc_nodelist *ndlp; 3649 3650 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3651 3652 ndlp = lpfc_findnode_did(vport, nportid); 3653 if (!ndlp) { 3654 ndlp = lpfc_nlp_init(vport, nportid); 3655 if (!ndlp) 3656 return 1; 3657 lpfc_enqueue_node(vport, ndlp); 3658 } 3659 3660 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3661 ndlp->nlp_DID, ELS_CMD_FARPR); 3662 if (!elsiocb) 3663 return 1; 3664 3665 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3666 3667 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3668 pcmd += sizeof(uint32_t); 3669 3670 /* Fill in FARPR payload */ 3671 fp = (FARP *) (pcmd); 3672 memset(fp, 0, sizeof(FARP)); 3673 lp = (uint32_t *) pcmd; 3674 *lp++ = be32_to_cpu(nportid); 3675 *lp++ = be32_to_cpu(vport->fc_myDID); 3676 fp->Rflags = 0; 3677 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3678 3679 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3680 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3681 ondlp = lpfc_findnode_did(vport, nportid); 3682 if (ondlp) { 3683 memcpy(&fp->OportName, &ondlp->nlp_portname, 3684 sizeof(struct lpfc_name)); 3685 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3686 sizeof(struct lpfc_name)); 3687 } 3688 3689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3690 "Issue FARPR: did:x%x", 3691 ndlp->nlp_DID, 0, 0); 3692 3693 phba->fc_stat.elsXmitFARPR++; 3694 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3695 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3696 if (!elsiocb->ndlp) { 3697 lpfc_els_free_iocb(phba, elsiocb); 3698 return 1; 3699 } 3700 3701 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3702 if (rc == IOCB_ERROR) { 3703 /* The additional lpfc_nlp_put will cause the following 3704 * lpfc_els_free_iocb routine to trigger the release of 3705 * the node. 3706 */ 3707 lpfc_els_free_iocb(phba, elsiocb); 3708 lpfc_nlp_put(ndlp); 3709 return 1; 3710 } 3711 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3712 * trigger the release of the node. 3713 */ 3714 /* Don't release reference count as RDF is likely outstanding */ 3715 return 0; 3716 } 3717 3718 /** 3719 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3720 * @vport: pointer to a host virtual N_Port data structure. 3721 * @retry: retry counter for the command IOCB. 3722 * 3723 * This routine issues an ELS RDF to the Fabric Controller to register 3724 * for diagnostic functions. 3725 * 3726 * Note that the ndlp reference count will be incremented by 1 for holding the 3727 * ndlp and the reference to ndlp will be stored into the ndlp field of 3728 * the IOCB for the completion callback function to the RDF ELS command. 3729 * 3730 * Return code 3731 * 0 - Successfully issued rdf command 3732 * 1 - Failed to issue rdf command 3733 **/ 3734 int 3735 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3736 { 3737 struct lpfc_hba *phba = vport->phba; 3738 struct lpfc_iocbq *elsiocb; 3739 struct lpfc_els_rdf_req *prdf; 3740 struct lpfc_nodelist *ndlp; 3741 uint16_t cmdsize; 3742 int rc; 3743 3744 cmdsize = sizeof(*prdf); 3745 3746 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3747 if (!ndlp) { 3748 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3749 if (!ndlp) 3750 return -ENODEV; 3751 lpfc_enqueue_node(vport, ndlp); 3752 } 3753 3754 /* RDF ELS is not required on an NPIV VN_Port. */ 3755 if (vport->port_type == LPFC_NPIV_PORT) 3756 return -EACCES; 3757 3758 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3759 ndlp->nlp_DID, ELS_CMD_RDF); 3760 if (!elsiocb) 3761 return -ENOMEM; 3762 3763 /* Configure the payload for the supported FPIN events. */ 3764 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3765 memset(prdf, 0, cmdsize); 3766 prdf->rdf.fpin_cmd = ELS_RDF; 3767 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3768 sizeof(struct fc_els_rdf)); 3769 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3770 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3771 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3772 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3773 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3774 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3775 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3776 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3777 3778 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3779 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3780 ndlp->nlp_DID, phba->cgn_reg_signal, 3781 phba->cgn_reg_fpin); 3782 3783 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3784 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3785 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3786 if (!elsiocb->ndlp) { 3787 lpfc_els_free_iocb(phba, elsiocb); 3788 return -EIO; 3789 } 3790 3791 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3792 "Issue RDF: did:x%x refcnt %d", 3793 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3794 3795 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3796 if (rc == IOCB_ERROR) { 3797 lpfc_els_free_iocb(phba, elsiocb); 3798 lpfc_nlp_put(ndlp); 3799 return -EIO; 3800 } 3801 return 0; 3802 } 3803 3804 /** 3805 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3806 * @vport: pointer to a host virtual N_Port data structure. 3807 * @cmdiocb: pointer to lpfc command iocb data structure. 3808 * @ndlp: pointer to a node-list data structure. 3809 * 3810 * A received RDF implies a possible change to fabric supported diagnostic 3811 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3812 * RDF request to reregister for supported diagnostic functions. 3813 * 3814 * Return code 3815 * 0 - Success 3816 * -EIO - Failed to process received RDF 3817 **/ 3818 static int 3819 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3820 struct lpfc_nodelist *ndlp) 3821 { 3822 /* Send LS_ACC */ 3823 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3825 "1623 Failed to RDF_ACC from x%x for x%x\n", 3826 ndlp->nlp_DID, vport->fc_myDID); 3827 return -EIO; 3828 } 3829 3830 /* Issue new RDF for reregistering */ 3831 if (lpfc_issue_els_rdf(vport, 0)) { 3832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3833 "2623 Failed to re register RDF for x%x\n", 3834 vport->fc_myDID); 3835 return -EIO; 3836 } 3837 3838 return 0; 3839 } 3840 3841 /** 3842 * lpfc_least_capable_settings - helper function for EDC rsp processing 3843 * @phba: pointer to lpfc hba data structure. 3844 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3845 * 3846 * This helper routine determines the least capable setting for 3847 * congestion signals, signal freq, including scale, from the 3848 * congestion detection descriptor in the EDC rsp. The routine 3849 * sets @phba values in preparation for a set_featues mailbox. 3850 **/ 3851 static void 3852 lpfc_least_capable_settings(struct lpfc_hba *phba, 3853 struct fc_diag_cg_sig_desc *pcgd) 3854 { 3855 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3856 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3857 3858 /* Get rsp signal and frequency capabilities. */ 3859 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3860 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3861 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3862 3863 /* If the Fport does not support signals. Set FPIN only */ 3864 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3865 goto out_no_support; 3866 3867 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3868 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3869 * to milliSeconds. 3870 */ 3871 switch (rsp_sig_freq_scale) { 3872 case EDC_CG_SIGFREQ_SEC: 3873 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3874 break; 3875 case EDC_CG_SIGFREQ_MSEC: 3876 rsp_sig_freq_cyc = 1; 3877 break; 3878 default: 3879 goto out_no_support; 3880 } 3881 3882 /* Convenient shorthand. */ 3883 drv_sig_cap = phba->cgn_reg_signal; 3884 3885 /* Choose the least capable frequency. */ 3886 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3887 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3888 3889 /* Should be some common signals support. Settle on least capable 3890 * signal and adjust FPIN values. Initialize defaults to ease the 3891 * decision. 3892 */ 3893 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3894 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3895 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3896 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3897 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3898 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3899 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3900 } 3901 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3902 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3903 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3904 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3905 } 3906 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3907 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3908 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3909 } 3910 } 3911 3912 /* We are NOT recording signal frequency in congestion info buffer */ 3913 return; 3914 3915 out_no_support: 3916 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3917 phba->cgn_sig_freq = 0; 3918 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3919 } 3920 3921 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3922 FC_LS_TLV_DTAG_INIT); 3923 3924 /** 3925 * lpfc_cmpl_els_edc - Completion callback function for EDC 3926 * @phba: pointer to lpfc hba data structure. 3927 * @cmdiocb: pointer to lpfc command iocb data structure. 3928 * @rspiocb: pointer to lpfc response iocb data structure. 3929 * 3930 * This routine is the completion callback function for issuing the Exchange 3931 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3932 * notify the FPort of its Congestion and Link Fault capabilities. This 3933 * routine parses the FPort's response and decides on the least common 3934 * values applicable to both FPort and NPort for Warnings and Alarms that 3935 * are communicated via hardware signals. 3936 **/ 3937 static void 3938 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3939 struct lpfc_iocbq *rspiocb) 3940 { 3941 IOCB_t *irsp_iocb; 3942 struct fc_els_edc_resp *edc_rsp; 3943 struct fc_tlv_desc *tlv; 3944 struct fc_diag_cg_sig_desc *pcgd; 3945 struct fc_diag_lnkflt_desc *plnkflt; 3946 struct lpfc_dmabuf *pcmd, *prsp; 3947 const char *dtag_nm; 3948 u32 *pdata, dtag; 3949 int desc_cnt = 0, bytes_remain; 3950 bool rcv_cap_desc = false; 3951 struct lpfc_nodelist *ndlp; 3952 u32 ulp_status, ulp_word4, tmo, did, iotag; 3953 3954 ndlp = cmdiocb->ndlp; 3955 3956 ulp_status = get_job_ulpstatus(phba, rspiocb); 3957 ulp_word4 = get_job_word4(phba, rspiocb); 3958 did = get_job_els_rsp64_did(phba, rspiocb); 3959 3960 if (phba->sli_rev == LPFC_SLI_REV4) { 3961 tmo = get_wqe_tmo(rspiocb); 3962 iotag = get_wqe_reqtag(rspiocb); 3963 } else { 3964 irsp_iocb = &rspiocb->iocb; 3965 tmo = irsp_iocb->ulpTimeout; 3966 iotag = irsp_iocb->ulpIoTag; 3967 } 3968 3969 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3970 "EDC cmpl: status:x%x/x%x did:x%x", 3971 ulp_status, ulp_word4, did); 3972 3973 /* ELS cmd tag <ulpIoTag> completes */ 3974 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3975 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3976 iotag, ulp_status, ulp_word4, tmo); 3977 3978 pcmd = cmdiocb->cmd_dmabuf; 3979 if (!pcmd) 3980 goto out; 3981 3982 pdata = (u32 *)pcmd->virt; 3983 if (!pdata) 3984 goto out; 3985 3986 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3987 if (ulp_status) 3988 goto out; 3989 3990 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3991 if (!prsp) 3992 goto out; 3993 3994 edc_rsp = prsp->virt; 3995 if (!edc_rsp) 3996 goto out; 3997 3998 /* ELS cmd tag <ulpIoTag> completes */ 3999 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4000 "4676 Fabric EDC Rsp: " 4001 "0x%02x, 0x%08x\n", 4002 edc_rsp->acc_hdr.la_cmd, 4003 be32_to_cpu(edc_rsp->desc_list_len)); 4004 4005 /* 4006 * Payload length in bytes is the response descriptor list 4007 * length minus the 12 bytes of Link Service Request 4008 * Information descriptor in the reply. 4009 */ 4010 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4011 sizeof(struct fc_els_lsri_desc); 4012 if (bytes_remain <= 0) 4013 goto out; 4014 4015 tlv = edc_rsp->desc; 4016 4017 /* 4018 * cycle through EDC diagnostic descriptors to find the 4019 * congestion signaling capability descriptor 4020 */ 4021 while (bytes_remain) { 4022 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4023 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4024 "6461 Truncated TLV hdr on " 4025 "Diagnostic descriptor[%d]\n", 4026 desc_cnt); 4027 goto out; 4028 } 4029 4030 dtag = be32_to_cpu(tlv->desc_tag); 4031 switch (dtag) { 4032 case ELS_DTAG_LNK_FAULT_CAP: 4033 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4034 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4035 sizeof(struct fc_diag_lnkflt_desc)) { 4036 lpfc_printf_log( 4037 phba, KERN_WARNING, LOG_CGN_MGMT, 4038 "6462 Truncated Link Fault Diagnostic " 4039 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4040 desc_cnt, bytes_remain, 4041 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4042 sizeof(struct fc_diag_cg_sig_desc)); 4043 goto out; 4044 } 4045 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4046 lpfc_printf_log( 4047 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4048 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4049 "0x%08x 0x%08x 0x%08x\n", 4050 be32_to_cpu(plnkflt->desc_tag), 4051 be32_to_cpu(plnkflt->desc_len), 4052 be32_to_cpu( 4053 plnkflt->degrade_activate_threshold), 4054 be32_to_cpu( 4055 plnkflt->degrade_deactivate_threshold), 4056 be32_to_cpu(plnkflt->fec_degrade_interval)); 4057 break; 4058 case ELS_DTAG_CG_SIGNAL_CAP: 4059 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4060 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4061 sizeof(struct fc_diag_cg_sig_desc)) { 4062 lpfc_printf_log( 4063 phba, KERN_WARNING, LOG_CGN_MGMT, 4064 "6463 Truncated Cgn Signal Diagnostic " 4065 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4066 desc_cnt, bytes_remain, 4067 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4068 sizeof(struct fc_diag_cg_sig_desc)); 4069 goto out; 4070 } 4071 4072 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4073 lpfc_printf_log( 4074 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4075 "4616 CGN Desc Data: 0x%08x 0x%08x " 4076 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4077 be32_to_cpu(pcgd->desc_tag), 4078 be32_to_cpu(pcgd->desc_len), 4079 be32_to_cpu(pcgd->xmt_signal_capability), 4080 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4081 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4082 be32_to_cpu(pcgd->rcv_signal_capability), 4083 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4084 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4085 4086 /* Compare driver and Fport capabilities and choose 4087 * least common. 4088 */ 4089 lpfc_least_capable_settings(phba, pcgd); 4090 rcv_cap_desc = true; 4091 break; 4092 default: 4093 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4094 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4095 "4919 unknown Diagnostic " 4096 "Descriptor[%d]: tag x%x (%s)\n", 4097 desc_cnt, dtag, dtag_nm); 4098 } 4099 4100 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4101 tlv = fc_tlv_next_desc(tlv); 4102 desc_cnt++; 4103 } 4104 4105 out: 4106 if (!rcv_cap_desc) { 4107 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4108 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4109 phba->cgn_sig_freq = 0; 4110 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4111 "4202 EDC rsp error - sending RDF " 4112 "for FPIN only.\n"); 4113 } 4114 4115 lpfc_config_cgn_signal(phba); 4116 4117 /* Check to see if link went down during discovery */ 4118 lpfc_els_chk_latt(phba->pport); 4119 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4120 "EDC Cmpl: did:x%x refcnt %d", 4121 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4122 lpfc_els_free_iocb(phba, cmdiocb); 4123 lpfc_nlp_put(ndlp); 4124 } 4125 4126 static void 4127 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4128 { 4129 /* We are assuming cgd was zero'ed before calling this routine */ 4130 4131 /* Configure the congestion detection capability */ 4132 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4133 4134 /* Descriptor len doesn't include the tag or len fields. */ 4135 cgd->desc_len = cpu_to_be32( 4136 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4137 4138 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4139 * xmt_signal_frequency.count already set to 0. 4140 * xmt_signal_frequency.units already set to 0. 4141 */ 4142 4143 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4144 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4145 * rcv_signal_frequency.count already set to 0. 4146 * rcv_signal_frequency.units already set to 0. 4147 */ 4148 phba->cgn_sig_freq = 0; 4149 return; 4150 } 4151 switch (phba->cgn_reg_signal) { 4152 case EDC_CG_SIG_WARN_ONLY: 4153 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4154 break; 4155 case EDC_CG_SIG_WARN_ALARM: 4156 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4157 break; 4158 default: 4159 /* rcv_signal_capability left 0 thus no support */ 4160 break; 4161 } 4162 4163 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4164 * the completion we settle on the higher frequency. 4165 */ 4166 cgd->rcv_signal_frequency.count = 4167 cpu_to_be16(lpfc_fabric_cgn_frequency); 4168 cgd->rcv_signal_frequency.units = 4169 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4170 } 4171 4172 /** 4173 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4174 * @vport: pointer to a host virtual N_Port data structure. 4175 * @retry: retry counter for the command iocb. 4176 * 4177 * This routine issues an ELS EDC to the F-Port Controller to communicate 4178 * this N_Port's support of hardware signals in its Congestion 4179 * Capabilities Descriptor. 4180 * 4181 * Note: This routine does not check if one or more signals are 4182 * set in the cgn_reg_signal parameter. The caller makes the 4183 * decision to enforce cgn_reg_signal as nonzero or zero depending 4184 * on the conditions. During Fabric requests, the driver 4185 * requires cgn_reg_signals to be nonzero. But a dynamic request 4186 * to set the congestion mode to OFF from Monitor or Manage 4187 * would correctly issue an EDC with no signals enabled to 4188 * turn off switch functionality and then update the FW. 4189 * 4190 * Return code 4191 * 0 - Successfully issued edc command 4192 * 1 - Failed to issue edc command 4193 **/ 4194 int 4195 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4196 { 4197 struct lpfc_hba *phba = vport->phba; 4198 struct lpfc_iocbq *elsiocb; 4199 struct lpfc_els_edc_req *edc_req; 4200 struct fc_diag_cg_sig_desc *cgn_desc; 4201 u16 cmdsize; 4202 struct lpfc_nodelist *ndlp; 4203 u8 *pcmd = NULL; 4204 u32 edc_req_size, cgn_desc_size; 4205 int rc; 4206 4207 if (vport->port_type == LPFC_NPIV_PORT) 4208 return -EACCES; 4209 4210 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4211 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4212 return -ENODEV; 4213 4214 /* If HBA doesn't support signals, drop into RDF */ 4215 if (!phba->cgn_init_reg_signal) 4216 goto try_rdf; 4217 4218 edc_req_size = sizeof(struct fc_els_edc); 4219 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4220 cmdsize = edc_req_size + cgn_desc_size; 4221 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4222 ndlp->nlp_DID, ELS_CMD_EDC); 4223 if (!elsiocb) 4224 goto try_rdf; 4225 4226 /* Configure the payload for the supported Diagnostics capabilities. */ 4227 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4228 memset(pcmd, 0, cmdsize); 4229 edc_req = (struct lpfc_els_edc_req *)pcmd; 4230 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4231 edc_req->edc.edc_cmd = ELS_EDC; 4232 4233 cgn_desc = &edc_req->cgn_desc; 4234 4235 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4236 4237 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4238 4239 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4240 "4623 Xmit EDC to remote " 4241 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4242 ndlp->nlp_DID, phba->cgn_reg_signal, 4243 phba->cgn_reg_fpin); 4244 4245 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4246 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4247 if (!elsiocb->ndlp) { 4248 lpfc_els_free_iocb(phba, elsiocb); 4249 return -EIO; 4250 } 4251 4252 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4253 "Issue EDC: did:x%x refcnt %d", 4254 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4255 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4256 if (rc == IOCB_ERROR) { 4257 /* The additional lpfc_nlp_put will cause the following 4258 * lpfc_els_free_iocb routine to trigger the rlease of 4259 * the node. 4260 */ 4261 lpfc_els_free_iocb(phba, elsiocb); 4262 lpfc_nlp_put(ndlp); 4263 goto try_rdf; 4264 } 4265 return 0; 4266 try_rdf: 4267 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4268 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4269 rc = lpfc_issue_els_rdf(vport, 0); 4270 return rc; 4271 } 4272 4273 /** 4274 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4275 * @vport: pointer to a host virtual N_Port data structure. 4276 * @nlp: pointer to a node-list data structure. 4277 * 4278 * This routine cancels the timer with a delayed IOCB-command retry for 4279 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4280 * removes the ELS retry event if it presents. In addition, if the 4281 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4282 * commands are sent for the @vport's nodes that require issuing discovery 4283 * ADISC. 4284 **/ 4285 void 4286 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4287 { 4288 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4289 struct lpfc_work_evt *evtp; 4290 4291 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4292 return; 4293 spin_lock_irq(&nlp->lock); 4294 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4295 spin_unlock_irq(&nlp->lock); 4296 del_timer_sync(&nlp->nlp_delayfunc); 4297 nlp->nlp_last_elscmd = 0; 4298 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4299 list_del_init(&nlp->els_retry_evt.evt_listp); 4300 /* Decrement nlp reference count held for the delayed retry */ 4301 evtp = &nlp->els_retry_evt; 4302 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4303 } 4304 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4305 spin_lock_irq(&nlp->lock); 4306 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4307 spin_unlock_irq(&nlp->lock); 4308 if (vport->num_disc_nodes) { 4309 if (vport->port_state < LPFC_VPORT_READY) { 4310 /* Check if there are more ADISCs to be sent */ 4311 lpfc_more_adisc(vport); 4312 } else { 4313 /* Check if there are more PLOGIs to be sent */ 4314 lpfc_more_plogi(vport); 4315 if (vport->num_disc_nodes == 0) { 4316 spin_lock_irq(shost->host_lock); 4317 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4318 spin_unlock_irq(shost->host_lock); 4319 lpfc_can_disctmo(vport); 4320 lpfc_end_rscn(vport); 4321 } 4322 } 4323 } 4324 } 4325 return; 4326 } 4327 4328 /** 4329 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4330 * @t: pointer to the timer function associated data (ndlp). 4331 * 4332 * This routine is invoked by the ndlp delayed-function timer to check 4333 * whether there is any pending ELS retry event(s) with the node. If not, it 4334 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4335 * adds the delayed events to the HBA work list and invokes the 4336 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4337 * event. Note that lpfc_nlp_get() is called before posting the event to 4338 * the work list to hold reference count of ndlp so that it guarantees the 4339 * reference to ndlp will still be available when the worker thread gets 4340 * to the event associated with the ndlp. 4341 **/ 4342 void 4343 lpfc_els_retry_delay(struct timer_list *t) 4344 { 4345 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4346 struct lpfc_vport *vport = ndlp->vport; 4347 struct lpfc_hba *phba = vport->phba; 4348 unsigned long flags; 4349 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4350 4351 spin_lock_irqsave(&phba->hbalock, flags); 4352 if (!list_empty(&evtp->evt_listp)) { 4353 spin_unlock_irqrestore(&phba->hbalock, flags); 4354 return; 4355 } 4356 4357 /* We need to hold the node by incrementing the reference 4358 * count until the queued work is done 4359 */ 4360 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4361 if (evtp->evt_arg1) { 4362 evtp->evt = LPFC_EVT_ELS_RETRY; 4363 list_add_tail(&evtp->evt_listp, &phba->work_list); 4364 lpfc_worker_wake_up(phba); 4365 } 4366 spin_unlock_irqrestore(&phba->hbalock, flags); 4367 return; 4368 } 4369 4370 /** 4371 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4372 * @ndlp: pointer to a node-list data structure. 4373 * 4374 * This routine is the worker-thread handler for processing the @ndlp delayed 4375 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4376 * the last ELS command from the associated ndlp and invokes the proper ELS 4377 * function according to the delayed ELS command to retry the command. 4378 **/ 4379 void 4380 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4381 { 4382 struct lpfc_vport *vport = ndlp->vport; 4383 uint32_t cmd, retry; 4384 4385 spin_lock_irq(&ndlp->lock); 4386 cmd = ndlp->nlp_last_elscmd; 4387 ndlp->nlp_last_elscmd = 0; 4388 4389 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4390 spin_unlock_irq(&ndlp->lock); 4391 return; 4392 } 4393 4394 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4395 spin_unlock_irq(&ndlp->lock); 4396 /* 4397 * If a discovery event readded nlp_delayfunc after timer 4398 * firing and before processing the timer, cancel the 4399 * nlp_delayfunc. 4400 */ 4401 del_timer_sync(&ndlp->nlp_delayfunc); 4402 retry = ndlp->nlp_retry; 4403 ndlp->nlp_retry = 0; 4404 4405 switch (cmd) { 4406 case ELS_CMD_FLOGI: 4407 lpfc_issue_els_flogi(vport, ndlp, retry); 4408 break; 4409 case ELS_CMD_PLOGI: 4410 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4411 ndlp->nlp_prev_state = ndlp->nlp_state; 4412 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4413 } 4414 break; 4415 case ELS_CMD_ADISC: 4416 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4417 ndlp->nlp_prev_state = ndlp->nlp_state; 4418 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4419 } 4420 break; 4421 case ELS_CMD_PRLI: 4422 case ELS_CMD_NVMEPRLI: 4423 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4424 ndlp->nlp_prev_state = ndlp->nlp_state; 4425 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4426 } 4427 break; 4428 case ELS_CMD_LOGO: 4429 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4430 ndlp->nlp_prev_state = ndlp->nlp_state; 4431 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4432 } 4433 break; 4434 case ELS_CMD_FDISC: 4435 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4436 lpfc_issue_els_fdisc(vport, ndlp, retry); 4437 break; 4438 } 4439 return; 4440 } 4441 4442 /** 4443 * lpfc_link_reset - Issue link reset 4444 * @vport: pointer to a virtual N_Port data structure. 4445 * 4446 * This routine performs link reset by sending INIT_LINK mailbox command. 4447 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4448 * INIT_LINK mailbox command. 4449 * 4450 * Return code 4451 * 0 - Link reset initiated successfully 4452 * 1 - Failed to initiate link reset 4453 **/ 4454 int 4455 lpfc_link_reset(struct lpfc_vport *vport) 4456 { 4457 struct lpfc_hba *phba = vport->phba; 4458 LPFC_MBOXQ_t *mbox; 4459 uint32_t control; 4460 int rc; 4461 4462 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4463 "2851 Attempt link reset\n"); 4464 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4465 if (!mbox) { 4466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4467 "2852 Failed to allocate mbox memory"); 4468 return 1; 4469 } 4470 4471 /* Enable Link attention interrupts */ 4472 if (phba->sli_rev <= LPFC_SLI_REV3) { 4473 spin_lock_irq(&phba->hbalock); 4474 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4475 control = readl(phba->HCregaddr); 4476 control |= HC_LAINT_ENA; 4477 writel(control, phba->HCregaddr); 4478 readl(phba->HCregaddr); /* flush */ 4479 spin_unlock_irq(&phba->hbalock); 4480 } 4481 4482 lpfc_init_link(phba, mbox, phba->cfg_topology, 4483 phba->cfg_link_speed); 4484 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4485 mbox->vport = vport; 4486 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4487 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4489 "2853 Failed to issue INIT_LINK " 4490 "mbox command, rc:x%x\n", rc); 4491 mempool_free(mbox, phba->mbox_mem_pool); 4492 return 1; 4493 } 4494 4495 return 0; 4496 } 4497 4498 /** 4499 * lpfc_els_retry - Make retry decision on an els command iocb 4500 * @phba: pointer to lpfc hba data structure. 4501 * @cmdiocb: pointer to lpfc command iocb data structure. 4502 * @rspiocb: pointer to lpfc response iocb data structure. 4503 * 4504 * This routine makes a retry decision on an ELS command IOCB, which has 4505 * failed. The following ELS IOCBs use this function for retrying the command 4506 * when previously issued command responsed with error status: FLOGI, PLOGI, 4507 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4508 * returned error status, it makes the decision whether a retry shall be 4509 * issued for the command, and whether a retry shall be made immediately or 4510 * delayed. In the former case, the corresponding ELS command issuing-function 4511 * is called to retry the command. In the later case, the ELS command shall 4512 * be posted to the ndlp delayed event and delayed function timer set to the 4513 * ndlp for the delayed command issusing. 4514 * 4515 * Return code 4516 * 0 - No retry of els command is made 4517 * 1 - Immediate or delayed retry of els command is made 4518 **/ 4519 static int 4520 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4521 struct lpfc_iocbq *rspiocb) 4522 { 4523 struct lpfc_vport *vport = cmdiocb->vport; 4524 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4525 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4526 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4527 uint32_t *elscmd; 4528 struct ls_rjt stat; 4529 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4530 int logerr = 0; 4531 uint32_t cmd = 0; 4532 uint32_t did; 4533 int link_reset = 0, rc; 4534 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4535 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4536 4537 4538 /* Note: cmd_dmabuf may be 0 for internal driver abort 4539 * of delays ELS command. 4540 */ 4541 4542 if (pcmd && pcmd->virt) { 4543 elscmd = (uint32_t *) (pcmd->virt); 4544 cmd = *elscmd++; 4545 } 4546 4547 if (ndlp) 4548 did = ndlp->nlp_DID; 4549 else { 4550 /* We should only hit this case for retrying PLOGI */ 4551 did = get_job_els_rsp64_did(phba, rspiocb); 4552 ndlp = lpfc_findnode_did(vport, did); 4553 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4554 return 0; 4555 } 4556 4557 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4558 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4559 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4560 4561 switch (ulp_status) { 4562 case IOSTAT_FCP_RSP_ERROR: 4563 break; 4564 case IOSTAT_REMOTE_STOP: 4565 if (phba->sli_rev == LPFC_SLI_REV4) { 4566 /* This IO was aborted by the target, we don't 4567 * know the rxid and because we did not send the 4568 * ABTS we cannot generate and RRQ. 4569 */ 4570 lpfc_set_rrq_active(phba, ndlp, 4571 cmdiocb->sli4_lxritag, 0, 0); 4572 } 4573 break; 4574 case IOSTAT_LOCAL_REJECT: 4575 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4576 case IOERR_LOOP_OPEN_FAILURE: 4577 if (cmd == ELS_CMD_FLOGI) { 4578 if (PCI_DEVICE_ID_HORNET == 4579 phba->pcidev->device) { 4580 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4581 phba->pport->fc_myDID = 0; 4582 phba->alpa_map[0] = 0; 4583 phba->alpa_map[1] = 0; 4584 } 4585 } 4586 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4587 delay = 1000; 4588 retry = 1; 4589 break; 4590 4591 case IOERR_ILLEGAL_COMMAND: 4592 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4593 "0124 Retry illegal cmd x%x " 4594 "retry:x%x delay:x%x\n", 4595 cmd, cmdiocb->retry, delay); 4596 retry = 1; 4597 /* All command's retry policy */ 4598 maxretry = 8; 4599 if (cmdiocb->retry > 2) 4600 delay = 1000; 4601 break; 4602 4603 case IOERR_NO_RESOURCES: 4604 logerr = 1; /* HBA out of resources */ 4605 retry = 1; 4606 if (cmdiocb->retry > 100) 4607 delay = 100; 4608 maxretry = 250; 4609 break; 4610 4611 case IOERR_ILLEGAL_FRAME: 4612 delay = 100; 4613 retry = 1; 4614 break; 4615 4616 case IOERR_INVALID_RPI: 4617 if (cmd == ELS_CMD_PLOGI && 4618 did == NameServer_DID) { 4619 /* Continue forever if plogi to */ 4620 /* the nameserver fails */ 4621 maxretry = 0; 4622 delay = 100; 4623 } 4624 retry = 1; 4625 break; 4626 4627 case IOERR_SEQUENCE_TIMEOUT: 4628 if (cmd == ELS_CMD_PLOGI && 4629 did == NameServer_DID && 4630 (cmdiocb->retry + 1) == maxretry) { 4631 /* Reset the Link */ 4632 link_reset = 1; 4633 break; 4634 } 4635 retry = 1; 4636 delay = 100; 4637 break; 4638 case IOERR_SLI_ABORTED: 4639 /* Retry ELS PLOGI command? 4640 * Possibly the rport just wasn't ready. 4641 */ 4642 if (cmd == ELS_CMD_PLOGI) { 4643 /* No retry if state change */ 4644 if (ndlp && 4645 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4646 goto out_retry; 4647 retry = 1; 4648 maxretry = 2; 4649 } 4650 break; 4651 } 4652 break; 4653 4654 case IOSTAT_NPORT_RJT: 4655 case IOSTAT_FABRIC_RJT: 4656 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4657 retry = 1; 4658 break; 4659 } 4660 break; 4661 4662 case IOSTAT_NPORT_BSY: 4663 case IOSTAT_FABRIC_BSY: 4664 logerr = 1; /* Fabric / Remote NPort out of resources */ 4665 retry = 1; 4666 break; 4667 4668 case IOSTAT_LS_RJT: 4669 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4670 /* Added for Vendor specifc support 4671 * Just keep retrying for these Rsn / Exp codes 4672 */ 4673 if ((vport->fc_flag & FC_PT2PT) && 4674 cmd == ELS_CMD_NVMEPRLI) { 4675 switch (stat.un.b.lsRjtRsnCode) { 4676 case LSRJT_UNABLE_TPC: 4677 case LSRJT_INVALID_CMD: 4678 case LSRJT_LOGICAL_ERR: 4679 case LSRJT_CMD_UNSUPPORTED: 4680 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4681 "0168 NVME PRLI LS_RJT " 4682 "reason %x port doesn't " 4683 "support NVME, disabling NVME\n", 4684 stat.un.b.lsRjtRsnCode); 4685 retry = 0; 4686 vport->fc_flag |= FC_PT2PT_NO_NVME; 4687 goto out_retry; 4688 } 4689 } 4690 switch (stat.un.b.lsRjtRsnCode) { 4691 case LSRJT_UNABLE_TPC: 4692 /* The driver has a VALID PLOGI but the rport has 4693 * rejected the PRLI - can't do it now. Delay 4694 * for 1 second and try again. 4695 * 4696 * However, if explanation is REQ_UNSUPPORTED there's 4697 * no point to retry PRLI. 4698 */ 4699 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4700 stat.un.b.lsRjtRsnCodeExp != 4701 LSEXP_REQ_UNSUPPORTED) { 4702 delay = 1000; 4703 maxretry = lpfc_max_els_tries + 1; 4704 retry = 1; 4705 break; 4706 } 4707 4708 /* Legacy bug fix code for targets with PLOGI delays. */ 4709 if (stat.un.b.lsRjtRsnCodeExp == 4710 LSEXP_CMD_IN_PROGRESS) { 4711 if (cmd == ELS_CMD_PLOGI) { 4712 delay = 1000; 4713 maxretry = 48; 4714 } 4715 retry = 1; 4716 break; 4717 } 4718 if (stat.un.b.lsRjtRsnCodeExp == 4719 LSEXP_CANT_GIVE_DATA) { 4720 if (cmd == ELS_CMD_PLOGI) { 4721 delay = 1000; 4722 maxretry = 48; 4723 } 4724 retry = 1; 4725 break; 4726 } 4727 if (cmd == ELS_CMD_PLOGI) { 4728 delay = 1000; 4729 maxretry = lpfc_max_els_tries + 1; 4730 retry = 1; 4731 break; 4732 } 4733 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4734 (cmd == ELS_CMD_FDISC) && 4735 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4736 lpfc_printf_vlog(vport, KERN_ERR, 4737 LOG_TRACE_EVENT, 4738 "0125 FDISC Failed (x%x). " 4739 "Fabric out of resources\n", 4740 stat.un.lsRjtError); 4741 lpfc_vport_set_state(vport, 4742 FC_VPORT_NO_FABRIC_RSCS); 4743 } 4744 break; 4745 4746 case LSRJT_LOGICAL_BSY: 4747 if ((cmd == ELS_CMD_PLOGI) || 4748 (cmd == ELS_CMD_PRLI) || 4749 (cmd == ELS_CMD_NVMEPRLI)) { 4750 delay = 1000; 4751 maxretry = 48; 4752 } else if (cmd == ELS_CMD_FDISC) { 4753 /* FDISC retry policy */ 4754 maxretry = 48; 4755 if (cmdiocb->retry >= 32) 4756 delay = 1000; 4757 } 4758 retry = 1; 4759 break; 4760 4761 case LSRJT_LOGICAL_ERR: 4762 /* There are some cases where switches return this 4763 * error when they are not ready and should be returning 4764 * Logical Busy. We should delay every time. 4765 */ 4766 if (cmd == ELS_CMD_FDISC && 4767 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4768 maxretry = 3; 4769 delay = 1000; 4770 retry = 1; 4771 } else if (cmd == ELS_CMD_FLOGI && 4772 stat.un.b.lsRjtRsnCodeExp == 4773 LSEXP_NOTHING_MORE) { 4774 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4775 retry = 1; 4776 lpfc_printf_vlog(vport, KERN_ERR, 4777 LOG_TRACE_EVENT, 4778 "0820 FLOGI Failed (x%x). " 4779 "BBCredit Not Supported\n", 4780 stat.un.lsRjtError); 4781 } 4782 break; 4783 4784 case LSRJT_PROTOCOL_ERR: 4785 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4786 (cmd == ELS_CMD_FDISC) && 4787 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4788 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4789 ) { 4790 lpfc_printf_vlog(vport, KERN_ERR, 4791 LOG_TRACE_EVENT, 4792 "0122 FDISC Failed (x%x). " 4793 "Fabric Detected Bad WWN\n", 4794 stat.un.lsRjtError); 4795 lpfc_vport_set_state(vport, 4796 FC_VPORT_FABRIC_REJ_WWN); 4797 } 4798 break; 4799 case LSRJT_VENDOR_UNIQUE: 4800 if ((stat.un.b.vendorUnique == 0x45) && 4801 (cmd == ELS_CMD_FLOGI)) { 4802 goto out_retry; 4803 } 4804 break; 4805 case LSRJT_CMD_UNSUPPORTED: 4806 /* lpfc nvmet returns this type of LS_RJT when it 4807 * receives an FCP PRLI because lpfc nvmet only 4808 * support NVME. ELS request is terminated for FCP4 4809 * on this rport. 4810 */ 4811 if (stat.un.b.lsRjtRsnCodeExp == 4812 LSEXP_REQ_UNSUPPORTED) { 4813 if (cmd == ELS_CMD_PRLI) { 4814 spin_lock_irq(&ndlp->lock); 4815 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4816 spin_unlock_irq(&ndlp->lock); 4817 retry = 0; 4818 goto out_retry; 4819 } 4820 } 4821 break; 4822 } 4823 break; 4824 4825 case IOSTAT_INTERMED_RSP: 4826 case IOSTAT_BA_RJT: 4827 break; 4828 4829 default: 4830 break; 4831 } 4832 4833 if (link_reset) { 4834 rc = lpfc_link_reset(vport); 4835 if (rc) { 4836 /* Do not give up. Retry PLOGI one more time and attempt 4837 * link reset if PLOGI fails again. 4838 */ 4839 retry = 1; 4840 delay = 100; 4841 goto out_retry; 4842 } 4843 return 1; 4844 } 4845 4846 if (did == FDMI_DID) 4847 retry = 1; 4848 4849 if ((cmd == ELS_CMD_FLOGI) && 4850 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4851 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4852 /* FLOGI retry policy */ 4853 retry = 1; 4854 /* retry FLOGI forever */ 4855 if (phba->link_flag != LS_LOOPBACK_MODE) 4856 maxretry = 0; 4857 else 4858 maxretry = 2; 4859 4860 if (cmdiocb->retry >= 100) 4861 delay = 5000; 4862 else if (cmdiocb->retry >= 32) 4863 delay = 1000; 4864 } else if ((cmd == ELS_CMD_FDISC) && 4865 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4866 /* retry FDISCs every second up to devloss */ 4867 retry = 1; 4868 maxretry = vport->cfg_devloss_tmo; 4869 delay = 1000; 4870 } 4871 4872 cmdiocb->retry++; 4873 if (maxretry && (cmdiocb->retry >= maxretry)) { 4874 phba->fc_stat.elsRetryExceeded++; 4875 retry = 0; 4876 } 4877 4878 if ((vport->load_flag & FC_UNLOADING) != 0) 4879 retry = 0; 4880 4881 out_retry: 4882 if (retry) { 4883 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4884 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4885 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4886 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4887 "2849 Stop retry ELS command " 4888 "x%x to remote NPORT x%x, " 4889 "Data: x%x x%x\n", cmd, did, 4890 cmdiocb->retry, delay); 4891 return 0; 4892 } 4893 } 4894 4895 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4896 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4897 "0107 Retry ELS command x%x to remote " 4898 "NPORT x%x Data: x%x x%x\n", 4899 cmd, did, cmdiocb->retry, delay); 4900 4901 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4902 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4903 ((ulp_word4 & IOERR_PARAM_MASK) != 4904 IOERR_NO_RESOURCES))) { 4905 /* Don't reset timer for no resources */ 4906 4907 /* If discovery / RSCN timer is running, reset it */ 4908 if (timer_pending(&vport->fc_disctmo) || 4909 (vport->fc_flag & FC_RSCN_MODE)) 4910 lpfc_set_disctmo(vport); 4911 } 4912 4913 phba->fc_stat.elsXmitRetry++; 4914 if (ndlp && delay) { 4915 phba->fc_stat.elsDelayRetry++; 4916 ndlp->nlp_retry = cmdiocb->retry; 4917 4918 /* delay is specified in milliseconds */ 4919 mod_timer(&ndlp->nlp_delayfunc, 4920 jiffies + msecs_to_jiffies(delay)); 4921 spin_lock_irq(&ndlp->lock); 4922 ndlp->nlp_flag |= NLP_DELAY_TMO; 4923 spin_unlock_irq(&ndlp->lock); 4924 4925 ndlp->nlp_prev_state = ndlp->nlp_state; 4926 if ((cmd == ELS_CMD_PRLI) || 4927 (cmd == ELS_CMD_NVMEPRLI)) 4928 lpfc_nlp_set_state(vport, ndlp, 4929 NLP_STE_PRLI_ISSUE); 4930 else if (cmd != ELS_CMD_ADISC) 4931 lpfc_nlp_set_state(vport, ndlp, 4932 NLP_STE_NPR_NODE); 4933 ndlp->nlp_last_elscmd = cmd; 4934 4935 return 1; 4936 } 4937 switch (cmd) { 4938 case ELS_CMD_FLOGI: 4939 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4940 return 1; 4941 case ELS_CMD_FDISC: 4942 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4943 return 1; 4944 case ELS_CMD_PLOGI: 4945 if (ndlp) { 4946 ndlp->nlp_prev_state = ndlp->nlp_state; 4947 lpfc_nlp_set_state(vport, ndlp, 4948 NLP_STE_PLOGI_ISSUE); 4949 } 4950 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4951 return 1; 4952 case ELS_CMD_ADISC: 4953 ndlp->nlp_prev_state = ndlp->nlp_state; 4954 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4955 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4956 return 1; 4957 case ELS_CMD_PRLI: 4958 case ELS_CMD_NVMEPRLI: 4959 ndlp->nlp_prev_state = ndlp->nlp_state; 4960 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4961 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4962 return 1; 4963 case ELS_CMD_LOGO: 4964 ndlp->nlp_prev_state = ndlp->nlp_state; 4965 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4966 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4967 return 1; 4968 } 4969 } 4970 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4971 if (logerr) { 4972 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4973 "0137 No retry ELS command x%x to remote " 4974 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4975 cmd, did, ulp_status, 4976 ulp_word4); 4977 } 4978 else { 4979 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4980 "0108 No retry ELS command x%x to remote " 4981 "NPORT x%x Retried:%d Error:x%x/%x\n", 4982 cmd, did, cmdiocb->retry, ulp_status, 4983 ulp_word4); 4984 } 4985 return 0; 4986 } 4987 4988 /** 4989 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4990 * @phba: pointer to lpfc hba data structure. 4991 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4992 * 4993 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4994 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4995 * checks to see whether there is a lpfc DMA buffer associated with the 4996 * response of the command IOCB. If so, it will be released before releasing 4997 * the lpfc DMA buffer associated with the IOCB itself. 4998 * 4999 * Return code 5000 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5001 **/ 5002 static int 5003 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5004 { 5005 struct lpfc_dmabuf *buf_ptr; 5006 5007 /* Free the response before processing the command. */ 5008 if (!list_empty(&buf_ptr1->list)) { 5009 list_remove_head(&buf_ptr1->list, buf_ptr, 5010 struct lpfc_dmabuf, 5011 list); 5012 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5013 kfree(buf_ptr); 5014 } 5015 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5016 kfree(buf_ptr1); 5017 return 0; 5018 } 5019 5020 /** 5021 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5022 * @phba: pointer to lpfc hba data structure. 5023 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5024 * 5025 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5026 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5027 * pool. 5028 * 5029 * Return code 5030 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5031 **/ 5032 static int 5033 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5034 { 5035 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5036 kfree(buf_ptr); 5037 return 0; 5038 } 5039 5040 /** 5041 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5042 * @phba: pointer to lpfc hba data structure. 5043 * @elsiocb: pointer to lpfc els command iocb data structure. 5044 * 5045 * This routine frees a command IOCB and its associated resources. The 5046 * command IOCB data structure contains the reference to various associated 5047 * resources, these fields must be set to NULL if the associated reference 5048 * not present: 5049 * cmd_dmabuf - reference to cmd. 5050 * cmd_dmabuf->next - reference to rsp 5051 * rsp_dmabuf - unused 5052 * bpl_dmabuf - reference to bpl 5053 * 5054 * It first properly decrements the reference count held on ndlp for the 5055 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5056 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5057 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5058 * adds the DMA buffer the @phba data structure for the delayed release. 5059 * If reference to the Buffer Pointer List (BPL) is present, the 5060 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5061 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5062 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5063 * 5064 * Return code 5065 * 0 - Success (currently, always return 0) 5066 **/ 5067 int 5068 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5069 { 5070 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5071 5072 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5073 elsiocb->ndlp = NULL; 5074 5075 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5076 if (elsiocb->cmd_dmabuf) { 5077 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5078 /* Firmware could still be in progress of DMAing 5079 * payload, so don't free data buffer till after 5080 * a hbeat. 5081 */ 5082 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5083 buf_ptr = elsiocb->cmd_dmabuf; 5084 elsiocb->cmd_dmabuf = NULL; 5085 if (buf_ptr) { 5086 buf_ptr1 = NULL; 5087 spin_lock_irq(&phba->hbalock); 5088 if (!list_empty(&buf_ptr->list)) { 5089 list_remove_head(&buf_ptr->list, 5090 buf_ptr1, struct lpfc_dmabuf, 5091 list); 5092 INIT_LIST_HEAD(&buf_ptr1->list); 5093 list_add_tail(&buf_ptr1->list, 5094 &phba->elsbuf); 5095 phba->elsbuf_cnt++; 5096 } 5097 INIT_LIST_HEAD(&buf_ptr->list); 5098 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5099 phba->elsbuf_cnt++; 5100 spin_unlock_irq(&phba->hbalock); 5101 } 5102 } else { 5103 buf_ptr1 = elsiocb->cmd_dmabuf; 5104 lpfc_els_free_data(phba, buf_ptr1); 5105 elsiocb->cmd_dmabuf = NULL; 5106 } 5107 } 5108 5109 if (elsiocb->bpl_dmabuf) { 5110 buf_ptr = elsiocb->bpl_dmabuf; 5111 lpfc_els_free_bpl(phba, buf_ptr); 5112 elsiocb->bpl_dmabuf = NULL; 5113 } 5114 lpfc_sli_release_iocbq(phba, elsiocb); 5115 return 0; 5116 } 5117 5118 /** 5119 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5120 * @phba: pointer to lpfc hba data structure. 5121 * @cmdiocb: pointer to lpfc command iocb data structure. 5122 * @rspiocb: pointer to lpfc response iocb data structure. 5123 * 5124 * This routine is the completion callback function to the Logout (LOGO) 5125 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5126 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5127 * release the ndlp if it has the last reference remaining (reference count 5128 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp 5129 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5130 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5131 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5132 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5133 * IOCB data structure. 5134 **/ 5135 static void 5136 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5137 struct lpfc_iocbq *rspiocb) 5138 { 5139 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5140 struct lpfc_vport *vport = cmdiocb->vport; 5141 u32 ulp_status, ulp_word4; 5142 5143 ulp_status = get_job_ulpstatus(phba, rspiocb); 5144 ulp_word4 = get_job_word4(phba, rspiocb); 5145 5146 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5147 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5148 ulp_status, ulp_word4, ndlp->nlp_DID); 5149 /* ACC to LOGO completes to NPort <nlp_DID> */ 5150 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5151 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5152 "Data: x%x x%x x%x\n", 5153 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5154 ndlp->nlp_state, ndlp->nlp_rpi); 5155 5156 /* This clause allows the LOGO ACC to complete and free resources 5157 * for the Fabric Domain Controller. It does deliberately skip 5158 * the unreg_rpi and release rpi because some fabrics send RDP 5159 * requests after logging out from the initiator. 5160 */ 5161 if (ndlp->nlp_type & NLP_FABRIC && 5162 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5163 goto out; 5164 5165 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5166 /* If PLOGI is being retried, PLOGI completion will cleanup the 5167 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5168 * progress on nodes discovered from last RSCN. 5169 */ 5170 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5171 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5172 goto out; 5173 5174 /* NPort Recovery mode or node is just allocated */ 5175 if (!lpfc_nlp_not_used(ndlp)) { 5176 /* A LOGO is completing and the node is in NPR state. 5177 * Just unregister the RPI because the node is still 5178 * required. 5179 */ 5180 lpfc_unreg_rpi(vport, ndlp); 5181 } else { 5182 /* Indicate the node has already released, should 5183 * not reference to it from within lpfc_els_free_iocb. 5184 */ 5185 cmdiocb->ndlp = NULL; 5186 } 5187 } 5188 out: 5189 /* 5190 * The driver received a LOGO from the rport and has ACK'd it. 5191 * At this point, the driver is done so release the IOCB 5192 */ 5193 lpfc_els_free_iocb(phba, cmdiocb); 5194 lpfc_nlp_put(ndlp); 5195 } 5196 5197 /** 5198 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5199 * @phba: pointer to lpfc hba data structure. 5200 * @pmb: pointer to the driver internal queue element for mailbox command. 5201 * 5202 * This routine is the completion callback function for unregister default 5203 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5204 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5205 * decrements the ndlp reference count held for this completion callback 5206 * function. After that, it invokes the lpfc_nlp_not_used() to check 5207 * whether there is only one reference left on the ndlp. If so, it will 5208 * perform one more decrement and trigger the release of the ndlp. 5209 **/ 5210 void 5211 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5212 { 5213 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5214 u32 mbx_flag = pmb->mbox_flag; 5215 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5216 5217 if (ndlp) { 5218 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5219 "0006 rpi x%x DID:%x flg:%x %d x%px " 5220 "mbx_cmd x%x mbx_flag x%x x%px\n", 5221 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5222 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5223 mbx_flag, pmb); 5224 5225 /* This ends the default/temporary RPI cleanup logic for this 5226 * ndlp and the node and rpi needs to be released. Free the rpi 5227 * first on an UNREG_LOGIN and then release the final 5228 * references. 5229 */ 5230 spin_lock_irq(&ndlp->lock); 5231 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5232 if (mbx_cmd == MBX_UNREG_LOGIN) 5233 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5234 spin_unlock_irq(&ndlp->lock); 5235 lpfc_nlp_put(ndlp); 5236 lpfc_drop_node(ndlp->vport, ndlp); 5237 } 5238 5239 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5240 } 5241 5242 /** 5243 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5244 * @phba: pointer to lpfc hba data structure. 5245 * @cmdiocb: pointer to lpfc command iocb data structure. 5246 * @rspiocb: pointer to lpfc response iocb data structure. 5247 * 5248 * This routine is the completion callback function for ELS Response IOCB 5249 * command. In normal case, this callback function just properly sets the 5250 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5251 * field in the command IOCB is not NULL, the referred mailbox command will 5252 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5253 * the IOCB. 5254 **/ 5255 static void 5256 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5257 struct lpfc_iocbq *rspiocb) 5258 { 5259 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5260 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5261 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5262 IOCB_t *irsp; 5263 LPFC_MBOXQ_t *mbox = NULL; 5264 u32 ulp_status, ulp_word4, tmo, did, iotag; 5265 5266 if (!vport) { 5267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5268 "3177 ELS response failed\n"); 5269 goto out; 5270 } 5271 if (cmdiocb->context_un.mbox) 5272 mbox = cmdiocb->context_un.mbox; 5273 5274 ulp_status = get_job_ulpstatus(phba, rspiocb); 5275 ulp_word4 = get_job_word4(phba, rspiocb); 5276 did = get_job_els_rsp64_did(phba, cmdiocb); 5277 5278 if (phba->sli_rev == LPFC_SLI_REV4) { 5279 tmo = get_wqe_tmo(cmdiocb); 5280 iotag = get_wqe_reqtag(cmdiocb); 5281 } else { 5282 irsp = &rspiocb->iocb; 5283 tmo = irsp->ulpTimeout; 5284 iotag = irsp->ulpIoTag; 5285 } 5286 5287 /* Check to see if link went down during discovery */ 5288 if (!ndlp || lpfc_els_chk_latt(vport)) { 5289 if (mbox) 5290 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5291 goto out; 5292 } 5293 5294 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5295 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5296 ulp_status, ulp_word4, did); 5297 /* ELS response tag <ulpIoTag> completes */ 5298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5299 "0110 ELS response tag x%x completes " 5300 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5301 iotag, ulp_status, ulp_word4, tmo, 5302 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5303 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5304 if (mbox) { 5305 if (ulp_status == 0 5306 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5307 if (!lpfc_unreg_rpi(vport, ndlp) && 5308 (!(vport->fc_flag & FC_PT2PT))) { 5309 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5310 ndlp->nlp_state == 5311 NLP_STE_REG_LOGIN_ISSUE) { 5312 lpfc_printf_vlog(vport, KERN_INFO, 5313 LOG_DISCOVERY, 5314 "0314 PLOGI recov " 5315 "DID x%x " 5316 "Data: x%x x%x x%x\n", 5317 ndlp->nlp_DID, 5318 ndlp->nlp_state, 5319 ndlp->nlp_rpi, 5320 ndlp->nlp_flag); 5321 goto out_free_mbox; 5322 } 5323 } 5324 5325 /* Increment reference count to ndlp to hold the 5326 * reference to ndlp for the callback function. 5327 */ 5328 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5329 if (!mbox->ctx_ndlp) 5330 goto out_free_mbox; 5331 5332 mbox->vport = vport; 5333 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5334 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5335 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5336 } 5337 else { 5338 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5339 ndlp->nlp_prev_state = ndlp->nlp_state; 5340 lpfc_nlp_set_state(vport, ndlp, 5341 NLP_STE_REG_LOGIN_ISSUE); 5342 } 5343 5344 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5345 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5346 != MBX_NOT_FINISHED) 5347 goto out; 5348 5349 /* Decrement the ndlp reference count we 5350 * set for this failed mailbox command. 5351 */ 5352 lpfc_nlp_put(ndlp); 5353 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5354 5355 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5356 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5357 "0138 ELS rsp: Cannot issue reg_login for x%x " 5358 "Data: x%x x%x x%x\n", 5359 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5360 ndlp->nlp_rpi); 5361 } 5362 out_free_mbox: 5363 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5364 } 5365 out: 5366 if (ndlp && shost) { 5367 spin_lock_irq(&ndlp->lock); 5368 if (mbox) 5369 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5370 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5371 spin_unlock_irq(&ndlp->lock); 5372 } 5373 5374 /* An SLI4 NPIV instance wants to drop the node at this point under 5375 * these conditions and release the RPI. 5376 */ 5377 if (phba->sli_rev == LPFC_SLI_REV4 && 5378 (vport && vport->port_type == LPFC_NPIV_PORT) && 5379 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5380 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5381 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5382 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5383 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5384 spin_lock_irq(&ndlp->lock); 5385 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5386 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5387 spin_unlock_irq(&ndlp->lock); 5388 lpfc_drop_node(vport, ndlp); 5389 } 5390 } 5391 5392 /* Release the originating I/O reference. */ 5393 lpfc_els_free_iocb(phba, cmdiocb); 5394 lpfc_nlp_put(ndlp); 5395 return; 5396 } 5397 5398 /** 5399 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5400 * @vport: pointer to a host virtual N_Port data structure. 5401 * @flag: the els command code to be accepted. 5402 * @oldiocb: pointer to the original lpfc command iocb data structure. 5403 * @ndlp: pointer to a node-list data structure. 5404 * @mbox: pointer to the driver internal queue element for mailbox command. 5405 * 5406 * This routine prepares and issues an Accept (ACC) response IOCB 5407 * command. It uses the @flag to properly set up the IOCB field for the 5408 * specific ACC response command to be issued and invokes the 5409 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5410 * @mbox pointer is passed in, it will be put into the context_un.mbox 5411 * field of the IOCB for the completion callback function to issue the 5412 * mailbox command to the HBA later when callback is invoked. 5413 * 5414 * Note that the ndlp reference count will be incremented by 1 for holding the 5415 * ndlp and the reference to ndlp will be stored into the ndlp field of 5416 * the IOCB for the completion callback function to the corresponding 5417 * response ELS IOCB command. 5418 * 5419 * Return code 5420 * 0 - Successfully issued acc response 5421 * 1 - Failed to issue acc response 5422 **/ 5423 int 5424 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5425 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5426 LPFC_MBOXQ_t *mbox) 5427 { 5428 struct lpfc_hba *phba = vport->phba; 5429 IOCB_t *icmd; 5430 IOCB_t *oldcmd; 5431 union lpfc_wqe128 *wqe; 5432 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5433 struct lpfc_iocbq *elsiocb; 5434 uint8_t *pcmd; 5435 struct serv_parm *sp; 5436 uint16_t cmdsize; 5437 int rc; 5438 ELS_PKT *els_pkt_ptr; 5439 struct fc_els_rdf_resp *rdf_resp; 5440 5441 switch (flag) { 5442 case ELS_CMD_ACC: 5443 cmdsize = sizeof(uint32_t); 5444 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5445 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5446 if (!elsiocb) { 5447 spin_lock_irq(&ndlp->lock); 5448 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5449 spin_unlock_irq(&ndlp->lock); 5450 return 1; 5451 } 5452 5453 if (phba->sli_rev == LPFC_SLI_REV4) { 5454 wqe = &elsiocb->wqe; 5455 /* XRI / rx_id */ 5456 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5457 bf_get(wqe_ctxt_tag, 5458 &oldwqe->xmit_els_rsp.wqe_com)); 5459 5460 /* oxid */ 5461 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5462 bf_get(wqe_rcvoxid, 5463 &oldwqe->xmit_els_rsp.wqe_com)); 5464 } else { 5465 icmd = &elsiocb->iocb; 5466 oldcmd = &oldiocb->iocb; 5467 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5468 icmd->unsli3.rcvsli3.ox_id = 5469 oldcmd->unsli3.rcvsli3.ox_id; 5470 } 5471 5472 pcmd = elsiocb->cmd_dmabuf->virt; 5473 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5474 pcmd += sizeof(uint32_t); 5475 5476 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5477 "Issue ACC: did:x%x flg:x%x", 5478 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5479 break; 5480 case ELS_CMD_FLOGI: 5481 case ELS_CMD_PLOGI: 5482 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5483 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5484 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5485 if (!elsiocb) 5486 return 1; 5487 5488 if (phba->sli_rev == LPFC_SLI_REV4) { 5489 wqe = &elsiocb->wqe; 5490 /* XRI / rx_id */ 5491 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5492 bf_get(wqe_ctxt_tag, 5493 &oldwqe->xmit_els_rsp.wqe_com)); 5494 5495 /* oxid */ 5496 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5497 bf_get(wqe_rcvoxid, 5498 &oldwqe->xmit_els_rsp.wqe_com)); 5499 } else { 5500 icmd = &elsiocb->iocb; 5501 oldcmd = &oldiocb->iocb; 5502 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5503 icmd->unsli3.rcvsli3.ox_id = 5504 oldcmd->unsli3.rcvsli3.ox_id; 5505 } 5506 5507 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5508 5509 if (mbox) 5510 elsiocb->context_un.mbox = mbox; 5511 5512 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5513 pcmd += sizeof(uint32_t); 5514 sp = (struct serv_parm *)pcmd; 5515 5516 if (flag == ELS_CMD_FLOGI) { 5517 /* Copy the received service parameters back */ 5518 memcpy(sp, &phba->fc_fabparam, 5519 sizeof(struct serv_parm)); 5520 5521 /* Clear the F_Port bit */ 5522 sp->cmn.fPort = 0; 5523 5524 /* Mark all class service parameters as invalid */ 5525 sp->cls1.classValid = 0; 5526 sp->cls2.classValid = 0; 5527 sp->cls3.classValid = 0; 5528 sp->cls4.classValid = 0; 5529 5530 /* Copy our worldwide names */ 5531 memcpy(&sp->portName, &vport->fc_sparam.portName, 5532 sizeof(struct lpfc_name)); 5533 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5534 sizeof(struct lpfc_name)); 5535 } else { 5536 memcpy(pcmd, &vport->fc_sparam, 5537 sizeof(struct serv_parm)); 5538 5539 sp->cmn.valid_vendor_ver_level = 0; 5540 memset(sp->un.vendorVersion, 0, 5541 sizeof(sp->un.vendorVersion)); 5542 sp->cmn.bbRcvSizeMsb &= 0xF; 5543 5544 /* If our firmware supports this feature, convey that 5545 * info to the target using the vendor specific field. 5546 */ 5547 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5548 sp->cmn.valid_vendor_ver_level = 1; 5549 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5550 sp->un.vv.flags = 5551 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5552 } 5553 } 5554 5555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5556 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5557 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5558 break; 5559 case ELS_CMD_PRLO: 5560 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5561 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5562 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5563 if (!elsiocb) 5564 return 1; 5565 5566 if (phba->sli_rev == LPFC_SLI_REV4) { 5567 wqe = &elsiocb->wqe; 5568 /* XRI / rx_id */ 5569 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5570 bf_get(wqe_ctxt_tag, 5571 &oldwqe->xmit_els_rsp.wqe_com)); 5572 5573 /* oxid */ 5574 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5575 bf_get(wqe_rcvoxid, 5576 &oldwqe->xmit_els_rsp.wqe_com)); 5577 } else { 5578 icmd = &elsiocb->iocb; 5579 oldcmd = &oldiocb->iocb; 5580 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5581 icmd->unsli3.rcvsli3.ox_id = 5582 oldcmd->unsli3.rcvsli3.ox_id; 5583 } 5584 5585 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5586 5587 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5588 sizeof(uint32_t) + sizeof(PRLO)); 5589 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5590 els_pkt_ptr = (ELS_PKT *) pcmd; 5591 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5592 5593 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5594 "Issue ACC PRLO: did:x%x flg:x%x", 5595 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5596 break; 5597 case ELS_CMD_RDF: 5598 cmdsize = sizeof(*rdf_resp); 5599 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5600 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5601 if (!elsiocb) 5602 return 1; 5603 5604 if (phba->sli_rev == LPFC_SLI_REV4) { 5605 wqe = &elsiocb->wqe; 5606 /* XRI / rx_id */ 5607 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5608 bf_get(wqe_ctxt_tag, 5609 &oldwqe->xmit_els_rsp.wqe_com)); 5610 5611 /* oxid */ 5612 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5613 bf_get(wqe_rcvoxid, 5614 &oldwqe->xmit_els_rsp.wqe_com)); 5615 } else { 5616 icmd = &elsiocb->iocb; 5617 oldcmd = &oldiocb->iocb; 5618 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5619 icmd->unsli3.rcvsli3.ox_id = 5620 oldcmd->unsli3.rcvsli3.ox_id; 5621 } 5622 5623 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5624 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5625 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5626 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5627 5628 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5629 rdf_resp->desc_list_len = cpu_to_be32(12); 5630 5631 /* FC-LS-5 specifies LS REQ Information descriptor */ 5632 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5633 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5634 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5635 break; 5636 default: 5637 return 1; 5638 } 5639 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5640 spin_lock_irq(&ndlp->lock); 5641 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5642 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5643 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5644 spin_unlock_irq(&ndlp->lock); 5645 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5646 } else { 5647 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5648 } 5649 5650 phba->fc_stat.elsXmitACC++; 5651 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5652 if (!elsiocb->ndlp) { 5653 lpfc_els_free_iocb(phba, elsiocb); 5654 return 1; 5655 } 5656 5657 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5658 if (rc == IOCB_ERROR) { 5659 lpfc_els_free_iocb(phba, elsiocb); 5660 lpfc_nlp_put(ndlp); 5661 return 1; 5662 } 5663 5664 /* Xmit ELS ACC response tag <ulpIoTag> */ 5665 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5666 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5667 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5668 "RPI: x%x, fc_flag x%x refcnt %d\n", 5669 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5670 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5671 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5672 return 0; 5673 } 5674 5675 /** 5676 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5677 * @vport: pointer to a virtual N_Port data structure. 5678 * @rejectError: reject response to issue 5679 * @oldiocb: pointer to the original lpfc command iocb data structure. 5680 * @ndlp: pointer to a node-list data structure. 5681 * @mbox: pointer to the driver internal queue element for mailbox command. 5682 * 5683 * This routine prepares and issue an Reject (RJT) response IOCB 5684 * command. If a @mbox pointer is passed in, it will be put into the 5685 * context_un.mbox field of the IOCB for the completion callback function 5686 * to issue to the HBA later. 5687 * 5688 * Note that the ndlp reference count will be incremented by 1 for holding the 5689 * ndlp and the reference to ndlp will be stored into the ndlp field of 5690 * the IOCB for the completion callback function to the reject response 5691 * ELS IOCB command. 5692 * 5693 * Return code 5694 * 0 - Successfully issued reject response 5695 * 1 - Failed to issue reject response 5696 **/ 5697 int 5698 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5699 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5700 LPFC_MBOXQ_t *mbox) 5701 { 5702 int rc; 5703 struct lpfc_hba *phba = vport->phba; 5704 IOCB_t *icmd; 5705 IOCB_t *oldcmd; 5706 union lpfc_wqe128 *wqe; 5707 struct lpfc_iocbq *elsiocb; 5708 uint8_t *pcmd; 5709 uint16_t cmdsize; 5710 5711 cmdsize = 2 * sizeof(uint32_t); 5712 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5713 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5714 if (!elsiocb) 5715 return 1; 5716 5717 if (phba->sli_rev == LPFC_SLI_REV4) { 5718 wqe = &elsiocb->wqe; 5719 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5720 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5721 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5722 get_job_rcvoxid(phba, oldiocb)); 5723 } else { 5724 icmd = &elsiocb->iocb; 5725 oldcmd = &oldiocb->iocb; 5726 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5727 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5728 } 5729 5730 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5731 5732 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5733 pcmd += sizeof(uint32_t); 5734 *((uint32_t *) (pcmd)) = rejectError; 5735 5736 if (mbox) 5737 elsiocb->context_un.mbox = mbox; 5738 5739 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5740 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5741 "0129 Xmit ELS RJT x%x response tag x%x " 5742 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5743 "rpi x%x\n", 5744 rejectError, elsiocb->iotag, 5745 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5746 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5747 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5748 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5749 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5750 5751 phba->fc_stat.elsXmitLSRJT++; 5752 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5753 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5754 if (!elsiocb->ndlp) { 5755 lpfc_els_free_iocb(phba, elsiocb); 5756 return 1; 5757 } 5758 5759 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5760 * node's assigned RPI gets released provided this node is not already 5761 * registered with the transport. 5762 */ 5763 if (phba->sli_rev == LPFC_SLI_REV4 && 5764 vport->port_type == LPFC_NPIV_PORT && 5765 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5766 spin_lock_irq(&ndlp->lock); 5767 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5768 spin_unlock_irq(&ndlp->lock); 5769 } 5770 5771 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5772 if (rc == IOCB_ERROR) { 5773 lpfc_els_free_iocb(phba, elsiocb); 5774 lpfc_nlp_put(ndlp); 5775 return 1; 5776 } 5777 5778 return 0; 5779 } 5780 5781 /** 5782 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5783 * @vport: pointer to a host virtual N_Port data structure. 5784 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5785 * @ndlp: NPort to where rsp is directed 5786 * 5787 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5788 * this N_Port's support of hardware signals in its Congestion 5789 * Capabilities Descriptor. 5790 * 5791 * Return code 5792 * 0 - Successfully issued edc rsp command 5793 * 1 - Failed to issue edc rsp command 5794 **/ 5795 static int 5796 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5797 struct lpfc_nodelist *ndlp) 5798 { 5799 struct lpfc_hba *phba = vport->phba; 5800 struct lpfc_els_edc_rsp *edc_rsp; 5801 struct lpfc_iocbq *elsiocb; 5802 IOCB_t *icmd, *cmd; 5803 union lpfc_wqe128 *wqe; 5804 uint8_t *pcmd; 5805 int cmdsize, rc; 5806 5807 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5808 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5809 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5810 if (!elsiocb) 5811 return 1; 5812 5813 if (phba->sli_rev == LPFC_SLI_REV4) { 5814 wqe = &elsiocb->wqe; 5815 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5816 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5817 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5818 get_job_rcvoxid(phba, cmdiocb)); 5819 } else { 5820 icmd = &elsiocb->iocb; 5821 cmd = &cmdiocb->iocb; 5822 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5823 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5824 } 5825 5826 pcmd = elsiocb->cmd_dmabuf->virt; 5827 memset(pcmd, 0, cmdsize); 5828 5829 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5830 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5831 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5832 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5833 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5834 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5835 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5836 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5837 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5838 5839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5840 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5841 ndlp->nlp_DID, ndlp->nlp_flag, 5842 kref_read(&ndlp->kref)); 5843 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5844 5845 phba->fc_stat.elsXmitACC++; 5846 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5847 if (!elsiocb->ndlp) { 5848 lpfc_els_free_iocb(phba, elsiocb); 5849 return 1; 5850 } 5851 5852 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5853 if (rc == IOCB_ERROR) { 5854 lpfc_els_free_iocb(phba, elsiocb); 5855 lpfc_nlp_put(ndlp); 5856 return 1; 5857 } 5858 5859 /* Xmit ELS ACC response tag <ulpIoTag> */ 5860 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5861 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5862 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5863 "RPI: x%x, fc_flag x%x\n", 5864 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5865 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5866 ndlp->nlp_rpi, vport->fc_flag); 5867 5868 return 0; 5869 } 5870 5871 /** 5872 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5873 * @vport: pointer to a virtual N_Port data structure. 5874 * @oldiocb: pointer to the original lpfc command iocb data structure. 5875 * @ndlp: pointer to a node-list data structure. 5876 * 5877 * This routine prepares and issues an Accept (ACC) response to Address 5878 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5879 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5880 * 5881 * Note that the ndlp reference count will be incremented by 1 for holding the 5882 * ndlp and the reference to ndlp will be stored into the ndlp field of 5883 * the IOCB for the completion callback function to the ADISC Accept response 5884 * ELS IOCB command. 5885 * 5886 * Return code 5887 * 0 - Successfully issued acc adisc response 5888 * 1 - Failed to issue adisc acc response 5889 **/ 5890 int 5891 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5892 struct lpfc_nodelist *ndlp) 5893 { 5894 struct lpfc_hba *phba = vport->phba; 5895 ADISC *ap; 5896 IOCB_t *icmd, *oldcmd; 5897 union lpfc_wqe128 *wqe; 5898 struct lpfc_iocbq *elsiocb; 5899 uint8_t *pcmd; 5900 uint16_t cmdsize; 5901 int rc; 5902 u32 ulp_context; 5903 5904 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5905 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5906 ndlp->nlp_DID, ELS_CMD_ACC); 5907 if (!elsiocb) 5908 return 1; 5909 5910 if (phba->sli_rev == LPFC_SLI_REV4) { 5911 wqe = &elsiocb->wqe; 5912 /* XRI / rx_id */ 5913 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5914 get_job_ulpcontext(phba, oldiocb)); 5915 ulp_context = get_job_ulpcontext(phba, elsiocb); 5916 /* oxid */ 5917 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5918 get_job_rcvoxid(phba, oldiocb)); 5919 } else { 5920 icmd = &elsiocb->iocb; 5921 oldcmd = &oldiocb->iocb; 5922 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5923 ulp_context = elsiocb->iocb.ulpContext; 5924 icmd->unsli3.rcvsli3.ox_id = 5925 oldcmd->unsli3.rcvsli3.ox_id; 5926 } 5927 5928 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5929 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5930 "0130 Xmit ADISC ACC response iotag x%x xri: " 5931 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5932 elsiocb->iotag, ulp_context, 5933 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5934 ndlp->nlp_rpi); 5935 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5936 5937 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5938 pcmd += sizeof(uint32_t); 5939 5940 ap = (ADISC *) (pcmd); 5941 ap->hardAL_PA = phba->fc_pref_ALPA; 5942 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5943 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5944 ap->DID = be32_to_cpu(vport->fc_myDID); 5945 5946 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5947 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5948 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5949 5950 phba->fc_stat.elsXmitACC++; 5951 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5952 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5953 if (!elsiocb->ndlp) { 5954 lpfc_els_free_iocb(phba, elsiocb); 5955 return 1; 5956 } 5957 5958 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5959 if (rc == IOCB_ERROR) { 5960 lpfc_els_free_iocb(phba, elsiocb); 5961 lpfc_nlp_put(ndlp); 5962 return 1; 5963 } 5964 5965 return 0; 5966 } 5967 5968 /** 5969 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5970 * @vport: pointer to a virtual N_Port data structure. 5971 * @oldiocb: pointer to the original lpfc command iocb data structure. 5972 * @ndlp: pointer to a node-list data structure. 5973 * 5974 * This routine prepares and issues an Accept (ACC) response to Process 5975 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5976 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5977 * 5978 * Note that the ndlp reference count will be incremented by 1 for holding the 5979 * ndlp and the reference to ndlp will be stored into the ndlp field of 5980 * the IOCB for the completion callback function to the PRLI Accept response 5981 * ELS IOCB command. 5982 * 5983 * Return code 5984 * 0 - Successfully issued acc prli response 5985 * 1 - Failed to issue acc prli response 5986 **/ 5987 int 5988 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5989 struct lpfc_nodelist *ndlp) 5990 { 5991 struct lpfc_hba *phba = vport->phba; 5992 PRLI *npr; 5993 struct lpfc_nvme_prli *npr_nvme; 5994 lpfc_vpd_t *vpd; 5995 IOCB_t *icmd; 5996 IOCB_t *oldcmd; 5997 union lpfc_wqe128 *wqe; 5998 struct lpfc_iocbq *elsiocb; 5999 uint8_t *pcmd; 6000 uint16_t cmdsize; 6001 uint32_t prli_fc4_req, *req_payload; 6002 struct lpfc_dmabuf *req_buf; 6003 int rc; 6004 u32 elsrspcmd, ulp_context; 6005 6006 /* Need the incoming PRLI payload to determine if the ACC is for an 6007 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6008 */ 6009 req_buf = oldiocb->cmd_dmabuf; 6010 req_payload = (((uint32_t *)req_buf->virt) + 1); 6011 6012 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6013 prli_fc4_req = be32_to_cpu(*req_payload); 6014 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6015 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6016 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6017 prli_fc4_req, *((uint32_t *)req_payload)); 6018 6019 if (prli_fc4_req == PRLI_FCP_TYPE) { 6020 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6021 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6022 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6023 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6024 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6025 } else { 6026 return 1; 6027 } 6028 6029 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6030 ndlp->nlp_DID, elsrspcmd); 6031 if (!elsiocb) 6032 return 1; 6033 6034 if (phba->sli_rev == LPFC_SLI_REV4) { 6035 wqe = &elsiocb->wqe; 6036 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6037 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6038 ulp_context = get_job_ulpcontext(phba, elsiocb); 6039 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6040 get_job_rcvoxid(phba, oldiocb)); 6041 } else { 6042 icmd = &elsiocb->iocb; 6043 oldcmd = &oldiocb->iocb; 6044 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6045 ulp_context = elsiocb->iocb.ulpContext; 6046 icmd->unsli3.rcvsli3.ox_id = 6047 oldcmd->unsli3.rcvsli3.ox_id; 6048 } 6049 6050 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6051 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6052 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6053 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6054 elsiocb->iotag, ulp_context, 6055 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6056 ndlp->nlp_rpi); 6057 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6058 memset(pcmd, 0, cmdsize); 6059 6060 *((uint32_t *)(pcmd)) = elsrspcmd; 6061 pcmd += sizeof(uint32_t); 6062 6063 /* For PRLI, remainder of payload is PRLI parameter page */ 6064 vpd = &phba->vpd; 6065 6066 if (prli_fc4_req == PRLI_FCP_TYPE) { 6067 /* 6068 * If the remote port is a target and our firmware version 6069 * is 3.20 or later, set the following bits for FC-TAPE 6070 * support. 6071 */ 6072 npr = (PRLI *) pcmd; 6073 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6074 (vpd->rev.feaLevelHigh >= 0x02)) { 6075 npr->ConfmComplAllowed = 1; 6076 npr->Retry = 1; 6077 npr->TaskRetryIdReq = 1; 6078 } 6079 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6080 npr->estabImagePair = 1; 6081 npr->readXferRdyDis = 1; 6082 npr->ConfmComplAllowed = 1; 6083 npr->prliType = PRLI_FCP_TYPE; 6084 npr->initiatorFunc = 1; 6085 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 6086 /* Respond with an NVME PRLI Type */ 6087 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6088 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6089 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6090 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6091 if (phba->nvmet_support) { 6092 bf_set(prli_tgt, npr_nvme, 1); 6093 bf_set(prli_disc, npr_nvme, 1); 6094 if (phba->cfg_nvme_enable_fb) { 6095 bf_set(prli_fba, npr_nvme, 1); 6096 6097 /* TBD. Target mode needs to post buffers 6098 * that support the configured first burst 6099 * byte size. 6100 */ 6101 bf_set(prli_fb_sz, npr_nvme, 6102 phba->cfg_nvmet_fb_size); 6103 } 6104 } else { 6105 bf_set(prli_init, npr_nvme, 1); 6106 } 6107 6108 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6109 "6015 NVME issue PRLI ACC word1 x%08x " 6110 "word4 x%08x word5 x%08x flag x%x, " 6111 "fcp_info x%x nlp_type x%x\n", 6112 npr_nvme->word1, npr_nvme->word4, 6113 npr_nvme->word5, ndlp->nlp_flag, 6114 ndlp->nlp_fcp_info, ndlp->nlp_type); 6115 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6116 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6117 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6118 } else 6119 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6120 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6121 prli_fc4_req, ndlp->nlp_fc4_type, 6122 ndlp->nlp_DID); 6123 6124 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6125 "Issue ACC PRLI: did:x%x flg:x%x", 6126 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6127 6128 phba->fc_stat.elsXmitACC++; 6129 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6130 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6131 if (!elsiocb->ndlp) { 6132 lpfc_els_free_iocb(phba, elsiocb); 6133 return 1; 6134 } 6135 6136 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6137 if (rc == IOCB_ERROR) { 6138 lpfc_els_free_iocb(phba, elsiocb); 6139 lpfc_nlp_put(ndlp); 6140 return 1; 6141 } 6142 6143 return 0; 6144 } 6145 6146 /** 6147 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6148 * @vport: pointer to a virtual N_Port data structure. 6149 * @format: rnid command format. 6150 * @oldiocb: pointer to the original lpfc command iocb data structure. 6151 * @ndlp: pointer to a node-list data structure. 6152 * 6153 * This routine issues a Request Node Identification Data (RNID) Accept 6154 * (ACC) response. It constructs the RNID ACC response command according to 6155 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6156 * issue the response. 6157 * 6158 * Note that the ndlp reference count will be incremented by 1 for holding the 6159 * ndlp and the reference to ndlp will be stored into the ndlp field of 6160 * the IOCB for the completion callback function. 6161 * 6162 * Return code 6163 * 0 - Successfully issued acc rnid response 6164 * 1 - Failed to issue acc rnid response 6165 **/ 6166 static int 6167 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6168 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6169 { 6170 struct lpfc_hba *phba = vport->phba; 6171 RNID *rn; 6172 IOCB_t *icmd, *oldcmd; 6173 union lpfc_wqe128 *wqe; 6174 struct lpfc_iocbq *elsiocb; 6175 uint8_t *pcmd; 6176 uint16_t cmdsize; 6177 int rc; 6178 u32 ulp_context; 6179 6180 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6181 + (2 * sizeof(struct lpfc_name)); 6182 if (format) 6183 cmdsize += sizeof(RNID_TOP_DISC); 6184 6185 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6186 ndlp->nlp_DID, ELS_CMD_ACC); 6187 if (!elsiocb) 6188 return 1; 6189 6190 if (phba->sli_rev == LPFC_SLI_REV4) { 6191 wqe = &elsiocb->wqe; 6192 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6193 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6194 ulp_context = get_job_ulpcontext(phba, elsiocb); 6195 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6196 get_job_rcvoxid(phba, oldiocb)); 6197 } else { 6198 icmd = &elsiocb->iocb; 6199 oldcmd = &oldiocb->iocb; 6200 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6201 ulp_context = elsiocb->iocb.ulpContext; 6202 icmd->unsli3.rcvsli3.ox_id = 6203 oldcmd->unsli3.rcvsli3.ox_id; 6204 } 6205 6206 /* Xmit RNID ACC response tag <ulpIoTag> */ 6207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6208 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6209 elsiocb->iotag, ulp_context); 6210 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6211 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6212 pcmd += sizeof(uint32_t); 6213 6214 memset(pcmd, 0, sizeof(RNID)); 6215 rn = (RNID *) (pcmd); 6216 rn->Format = format; 6217 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6218 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6219 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6220 switch (format) { 6221 case 0: 6222 rn->SpecificLen = 0; 6223 break; 6224 case RNID_TOPOLOGY_DISC: 6225 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6226 memcpy(&rn->un.topologyDisc.portName, 6227 &vport->fc_portname, sizeof(struct lpfc_name)); 6228 rn->un.topologyDisc.unitType = RNID_HBA; 6229 rn->un.topologyDisc.physPort = 0; 6230 rn->un.topologyDisc.attachedNodes = 0; 6231 break; 6232 default: 6233 rn->CommonLen = 0; 6234 rn->SpecificLen = 0; 6235 break; 6236 } 6237 6238 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6239 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6240 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6241 6242 phba->fc_stat.elsXmitACC++; 6243 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6244 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6245 if (!elsiocb->ndlp) { 6246 lpfc_els_free_iocb(phba, elsiocb); 6247 return 1; 6248 } 6249 6250 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6251 if (rc == IOCB_ERROR) { 6252 lpfc_els_free_iocb(phba, elsiocb); 6253 lpfc_nlp_put(ndlp); 6254 return 1; 6255 } 6256 6257 return 0; 6258 } 6259 6260 /** 6261 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6262 * @vport: pointer to a virtual N_Port data structure. 6263 * @iocb: pointer to the lpfc command iocb data structure. 6264 * @ndlp: pointer to a node-list data structure. 6265 * 6266 * Return 6267 **/ 6268 static void 6269 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6270 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6271 { 6272 struct lpfc_hba *phba = vport->phba; 6273 uint8_t *pcmd; 6274 struct RRQ *rrq; 6275 uint16_t rxid; 6276 uint16_t xri; 6277 struct lpfc_node_rrq *prrq; 6278 6279 6280 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6281 pcmd += sizeof(uint32_t); 6282 rrq = (struct RRQ *)pcmd; 6283 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6284 rxid = bf_get(rrq_rxid, rrq); 6285 6286 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6287 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6288 " x%x x%x\n", 6289 be32_to_cpu(bf_get(rrq_did, rrq)), 6290 bf_get(rrq_oxid, rrq), 6291 rxid, 6292 get_wqe_reqtag(iocb), 6293 get_job_ulpcontext(phba, iocb)); 6294 6295 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6296 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6297 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6298 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6299 xri = bf_get(rrq_oxid, rrq); 6300 else 6301 xri = rxid; 6302 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6303 if (prrq) 6304 lpfc_clr_rrq_active(phba, xri, prrq); 6305 return; 6306 } 6307 6308 /** 6309 * lpfc_els_rsp_echo_acc - Issue echo acc response 6310 * @vport: pointer to a virtual N_Port data structure. 6311 * @data: pointer to echo data to return in the accept. 6312 * @oldiocb: pointer to the original lpfc command iocb data structure. 6313 * @ndlp: pointer to a node-list data structure. 6314 * 6315 * Return code 6316 * 0 - Successfully issued acc echo response 6317 * 1 - Failed to issue acc echo response 6318 **/ 6319 static int 6320 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6321 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6322 { 6323 struct lpfc_hba *phba = vport->phba; 6324 IOCB_t *icmd, *oldcmd; 6325 union lpfc_wqe128 *wqe; 6326 struct lpfc_iocbq *elsiocb; 6327 uint8_t *pcmd; 6328 uint16_t cmdsize; 6329 int rc; 6330 u32 ulp_context; 6331 6332 if (phba->sli_rev == LPFC_SLI_REV4) 6333 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6334 else 6335 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6336 6337 /* The accumulated length can exceed the BPL_SIZE. For 6338 * now, use this as the limit 6339 */ 6340 if (cmdsize > LPFC_BPL_SIZE) 6341 cmdsize = LPFC_BPL_SIZE; 6342 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6343 ndlp->nlp_DID, ELS_CMD_ACC); 6344 if (!elsiocb) 6345 return 1; 6346 6347 if (phba->sli_rev == LPFC_SLI_REV4) { 6348 wqe = &elsiocb->wqe; 6349 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6350 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6351 ulp_context = get_job_ulpcontext(phba, elsiocb); 6352 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6353 get_job_rcvoxid(phba, oldiocb)); 6354 } else { 6355 icmd = &elsiocb->iocb; 6356 oldcmd = &oldiocb->iocb; 6357 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6358 ulp_context = elsiocb->iocb.ulpContext; 6359 icmd->unsli3.rcvsli3.ox_id = 6360 oldcmd->unsli3.rcvsli3.ox_id; 6361 } 6362 6363 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6365 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6366 elsiocb->iotag, ulp_context); 6367 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6368 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6369 pcmd += sizeof(uint32_t); 6370 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6371 6372 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6373 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6374 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6375 6376 phba->fc_stat.elsXmitACC++; 6377 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6378 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6379 if (!elsiocb->ndlp) { 6380 lpfc_els_free_iocb(phba, elsiocb); 6381 return 1; 6382 } 6383 6384 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6385 if (rc == IOCB_ERROR) { 6386 lpfc_els_free_iocb(phba, elsiocb); 6387 lpfc_nlp_put(ndlp); 6388 return 1; 6389 } 6390 6391 return 0; 6392 } 6393 6394 /** 6395 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6396 * @vport: pointer to a host virtual N_Port data structure. 6397 * 6398 * This routine issues Address Discover (ADISC) ELS commands to those 6399 * N_Ports which are in node port recovery state and ADISC has not been issued 6400 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6401 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6402 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6403 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6404 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6405 * IOCBs quit for later pick up. On the other hand, after walking through 6406 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6407 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6408 * no more ADISC need to be sent. 6409 * 6410 * Return code 6411 * The number of N_Ports with adisc issued. 6412 **/ 6413 int 6414 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6415 { 6416 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6417 struct lpfc_nodelist *ndlp, *next_ndlp; 6418 int sentadisc = 0; 6419 6420 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6421 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6422 6423 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6424 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6425 continue; 6426 6427 spin_lock_irq(&ndlp->lock); 6428 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6429 spin_unlock_irq(&ndlp->lock); 6430 6431 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6432 /* This node was marked for ADISC but was not picked 6433 * for discovery. This is possible if the node was 6434 * missing in gidft response. 6435 * 6436 * At time of marking node for ADISC, we skipped unreg 6437 * from backend 6438 */ 6439 lpfc_nlp_unreg_node(vport, ndlp); 6440 lpfc_unreg_rpi(vport, ndlp); 6441 continue; 6442 } 6443 6444 ndlp->nlp_prev_state = ndlp->nlp_state; 6445 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6446 lpfc_issue_els_adisc(vport, ndlp, 0); 6447 sentadisc++; 6448 vport->num_disc_nodes++; 6449 if (vport->num_disc_nodes >= 6450 vport->cfg_discovery_threads) { 6451 spin_lock_irq(shost->host_lock); 6452 vport->fc_flag |= FC_NLP_MORE; 6453 spin_unlock_irq(shost->host_lock); 6454 break; 6455 } 6456 6457 } 6458 if (sentadisc == 0) { 6459 spin_lock_irq(shost->host_lock); 6460 vport->fc_flag &= ~FC_NLP_MORE; 6461 spin_unlock_irq(shost->host_lock); 6462 } 6463 return sentadisc; 6464 } 6465 6466 /** 6467 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6468 * @vport: pointer to a host virtual N_Port data structure. 6469 * 6470 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6471 * which are in node port recovery state, with a @vport. Each time an ELS 6472 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6473 * the per @vport number of discover count (num_disc_nodes) shall be 6474 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6475 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6476 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6477 * later pick up. On the other hand, after walking through all the ndlps with 6478 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6479 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6480 * PLOGI need to be sent. 6481 * 6482 * Return code 6483 * The number of N_Ports with plogi issued. 6484 **/ 6485 int 6486 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6487 { 6488 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6489 struct lpfc_nodelist *ndlp, *next_ndlp; 6490 int sentplogi = 0; 6491 6492 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6493 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6494 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6495 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6496 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6497 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6498 ndlp->nlp_prev_state = ndlp->nlp_state; 6499 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6500 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6501 sentplogi++; 6502 vport->num_disc_nodes++; 6503 if (vport->num_disc_nodes >= 6504 vport->cfg_discovery_threads) { 6505 spin_lock_irq(shost->host_lock); 6506 vport->fc_flag |= FC_NLP_MORE; 6507 spin_unlock_irq(shost->host_lock); 6508 break; 6509 } 6510 } 6511 } 6512 6513 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6514 "6452 Discover PLOGI %d flag x%x\n", 6515 sentplogi, vport->fc_flag); 6516 6517 if (sentplogi) { 6518 lpfc_set_disctmo(vport); 6519 } 6520 else { 6521 spin_lock_irq(shost->host_lock); 6522 vport->fc_flag &= ~FC_NLP_MORE; 6523 spin_unlock_irq(shost->host_lock); 6524 } 6525 return sentplogi; 6526 } 6527 6528 static uint32_t 6529 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6530 uint32_t word0) 6531 { 6532 6533 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6534 desc->payload.els_req = word0; 6535 desc->length = cpu_to_be32(sizeof(desc->payload)); 6536 6537 return sizeof(struct fc_rdp_link_service_desc); 6538 } 6539 6540 static uint32_t 6541 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6542 uint8_t *page_a0, uint8_t *page_a2) 6543 { 6544 uint16_t wavelength; 6545 uint16_t temperature; 6546 uint16_t rx_power; 6547 uint16_t tx_bias; 6548 uint16_t tx_power; 6549 uint16_t vcc; 6550 uint16_t flag = 0; 6551 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6552 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6553 6554 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6555 6556 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6557 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6558 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6559 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6560 6561 if ((trasn_code_byte4->fc_sw_laser) || 6562 (trasn_code_byte5->fc_sw_laser_sl) || 6563 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6564 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6565 } else if (trasn_code_byte4->fc_lw_laser) { 6566 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6567 page_a0[SSF_WAVELENGTH_B0]; 6568 if (wavelength == SFP_WAVELENGTH_LC1310) 6569 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6570 if (wavelength == SFP_WAVELENGTH_LL1550) 6571 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6572 } 6573 /* check if its SFP+ */ 6574 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6575 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6576 << SFP_FLAG_CT_SHIFT; 6577 6578 /* check if its OPTICAL */ 6579 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6580 SFP_FLAG_IS_OPTICAL_PORT : 0) 6581 << SFP_FLAG_IS_OPTICAL_SHIFT; 6582 6583 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6584 page_a2[SFF_TEMPERATURE_B0]); 6585 vcc = (page_a2[SFF_VCC_B1] << 8 | 6586 page_a2[SFF_VCC_B0]); 6587 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6588 page_a2[SFF_TXPOWER_B0]); 6589 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6590 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6591 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6592 page_a2[SFF_RXPOWER_B0]); 6593 desc->sfp_info.temperature = cpu_to_be16(temperature); 6594 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6595 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6596 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6597 desc->sfp_info.vcc = cpu_to_be16(vcc); 6598 6599 desc->sfp_info.flags = cpu_to_be16(flag); 6600 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6601 6602 return sizeof(struct fc_rdp_sfp_desc); 6603 } 6604 6605 static uint32_t 6606 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6607 READ_LNK_VAR *stat) 6608 { 6609 uint32_t type; 6610 6611 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6612 6613 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6614 6615 desc->info.port_type = cpu_to_be32(type); 6616 6617 desc->info.link_status.link_failure_cnt = 6618 cpu_to_be32(stat->linkFailureCnt); 6619 desc->info.link_status.loss_of_synch_cnt = 6620 cpu_to_be32(stat->lossSyncCnt); 6621 desc->info.link_status.loss_of_signal_cnt = 6622 cpu_to_be32(stat->lossSignalCnt); 6623 desc->info.link_status.primitive_seq_proto_err = 6624 cpu_to_be32(stat->primSeqErrCnt); 6625 desc->info.link_status.invalid_trans_word = 6626 cpu_to_be32(stat->invalidXmitWord); 6627 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6628 6629 desc->length = cpu_to_be32(sizeof(desc->info)); 6630 6631 return sizeof(struct fc_rdp_link_error_status_desc); 6632 } 6633 6634 static uint32_t 6635 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6636 struct lpfc_vport *vport) 6637 { 6638 uint32_t bbCredit; 6639 6640 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6641 6642 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6643 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6644 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6645 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6646 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6647 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6648 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6649 } else { 6650 desc->bbc_info.attached_port_bbc = 0; 6651 } 6652 6653 desc->bbc_info.rtt = 0; 6654 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6655 6656 return sizeof(struct fc_rdp_bbc_desc); 6657 } 6658 6659 static uint32_t 6660 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6661 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6662 { 6663 uint32_t flags = 0; 6664 6665 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6666 6667 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6668 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6669 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6670 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6671 6672 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6673 flags |= RDP_OET_HIGH_ALARM; 6674 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6675 flags |= RDP_OET_LOW_ALARM; 6676 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6677 flags |= RDP_OET_HIGH_WARNING; 6678 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6679 flags |= RDP_OET_LOW_WARNING; 6680 6681 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6682 desc->oed_info.function_flags = cpu_to_be32(flags); 6683 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6684 return sizeof(struct fc_rdp_oed_sfp_desc); 6685 } 6686 6687 static uint32_t 6688 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6689 struct fc_rdp_oed_sfp_desc *desc, 6690 uint8_t *page_a2) 6691 { 6692 uint32_t flags = 0; 6693 6694 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6695 6696 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6697 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6698 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6699 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6700 6701 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6702 flags |= RDP_OET_HIGH_ALARM; 6703 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6704 flags |= RDP_OET_LOW_ALARM; 6705 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6706 flags |= RDP_OET_HIGH_WARNING; 6707 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6708 flags |= RDP_OET_LOW_WARNING; 6709 6710 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6711 desc->oed_info.function_flags = cpu_to_be32(flags); 6712 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6713 return sizeof(struct fc_rdp_oed_sfp_desc); 6714 } 6715 6716 static uint32_t 6717 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6718 struct fc_rdp_oed_sfp_desc *desc, 6719 uint8_t *page_a2) 6720 { 6721 uint32_t flags = 0; 6722 6723 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6724 6725 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6726 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6727 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6728 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6729 6730 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6731 flags |= RDP_OET_HIGH_ALARM; 6732 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6733 flags |= RDP_OET_LOW_ALARM; 6734 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6735 flags |= RDP_OET_HIGH_WARNING; 6736 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6737 flags |= RDP_OET_LOW_WARNING; 6738 6739 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6740 desc->oed_info.function_flags = cpu_to_be32(flags); 6741 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6742 return sizeof(struct fc_rdp_oed_sfp_desc); 6743 } 6744 6745 static uint32_t 6746 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6747 struct fc_rdp_oed_sfp_desc *desc, 6748 uint8_t *page_a2) 6749 { 6750 uint32_t flags = 0; 6751 6752 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6753 6754 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6755 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6756 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6757 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6758 6759 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6760 flags |= RDP_OET_HIGH_ALARM; 6761 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6762 flags |= RDP_OET_LOW_ALARM; 6763 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6764 flags |= RDP_OET_HIGH_WARNING; 6765 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6766 flags |= RDP_OET_LOW_WARNING; 6767 6768 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6769 desc->oed_info.function_flags = cpu_to_be32(flags); 6770 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6771 return sizeof(struct fc_rdp_oed_sfp_desc); 6772 } 6773 6774 6775 static uint32_t 6776 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6777 struct fc_rdp_oed_sfp_desc *desc, 6778 uint8_t *page_a2) 6779 { 6780 uint32_t flags = 0; 6781 6782 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6783 6784 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6785 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6786 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6787 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6788 6789 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6790 flags |= RDP_OET_HIGH_ALARM; 6791 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6792 flags |= RDP_OET_LOW_ALARM; 6793 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6794 flags |= RDP_OET_HIGH_WARNING; 6795 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6796 flags |= RDP_OET_LOW_WARNING; 6797 6798 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6799 desc->oed_info.function_flags = cpu_to_be32(flags); 6800 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6801 return sizeof(struct fc_rdp_oed_sfp_desc); 6802 } 6803 6804 static uint32_t 6805 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6806 uint8_t *page_a0, struct lpfc_vport *vport) 6807 { 6808 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6809 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6810 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6811 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6812 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6813 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6814 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6815 return sizeof(struct fc_rdp_opd_sfp_desc); 6816 } 6817 6818 static uint32_t 6819 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6820 { 6821 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6822 return 0; 6823 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6824 6825 desc->info.CorrectedBlocks = 6826 cpu_to_be32(stat->fecCorrBlkCount); 6827 desc->info.UncorrectableBlocks = 6828 cpu_to_be32(stat->fecUncorrBlkCount); 6829 6830 desc->length = cpu_to_be32(sizeof(desc->info)); 6831 6832 return sizeof(struct fc_fec_rdp_desc); 6833 } 6834 6835 static uint32_t 6836 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6837 { 6838 uint16_t rdp_cap = 0; 6839 uint16_t rdp_speed; 6840 6841 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6842 6843 switch (phba->fc_linkspeed) { 6844 case LPFC_LINK_SPEED_1GHZ: 6845 rdp_speed = RDP_PS_1GB; 6846 break; 6847 case LPFC_LINK_SPEED_2GHZ: 6848 rdp_speed = RDP_PS_2GB; 6849 break; 6850 case LPFC_LINK_SPEED_4GHZ: 6851 rdp_speed = RDP_PS_4GB; 6852 break; 6853 case LPFC_LINK_SPEED_8GHZ: 6854 rdp_speed = RDP_PS_8GB; 6855 break; 6856 case LPFC_LINK_SPEED_10GHZ: 6857 rdp_speed = RDP_PS_10GB; 6858 break; 6859 case LPFC_LINK_SPEED_16GHZ: 6860 rdp_speed = RDP_PS_16GB; 6861 break; 6862 case LPFC_LINK_SPEED_32GHZ: 6863 rdp_speed = RDP_PS_32GB; 6864 break; 6865 case LPFC_LINK_SPEED_64GHZ: 6866 rdp_speed = RDP_PS_64GB; 6867 break; 6868 case LPFC_LINK_SPEED_128GHZ: 6869 rdp_speed = RDP_PS_128GB; 6870 break; 6871 case LPFC_LINK_SPEED_256GHZ: 6872 rdp_speed = RDP_PS_256GB; 6873 break; 6874 default: 6875 rdp_speed = RDP_PS_UNKNOWN; 6876 break; 6877 } 6878 6879 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6880 6881 if (phba->lmt & LMT_256Gb) 6882 rdp_cap |= RDP_PS_256GB; 6883 if (phba->lmt & LMT_128Gb) 6884 rdp_cap |= RDP_PS_128GB; 6885 if (phba->lmt & LMT_64Gb) 6886 rdp_cap |= RDP_PS_64GB; 6887 if (phba->lmt & LMT_32Gb) 6888 rdp_cap |= RDP_PS_32GB; 6889 if (phba->lmt & LMT_16Gb) 6890 rdp_cap |= RDP_PS_16GB; 6891 if (phba->lmt & LMT_10Gb) 6892 rdp_cap |= RDP_PS_10GB; 6893 if (phba->lmt & LMT_8Gb) 6894 rdp_cap |= RDP_PS_8GB; 6895 if (phba->lmt & LMT_4Gb) 6896 rdp_cap |= RDP_PS_4GB; 6897 if (phba->lmt & LMT_2Gb) 6898 rdp_cap |= RDP_PS_2GB; 6899 if (phba->lmt & LMT_1Gb) 6900 rdp_cap |= RDP_PS_1GB; 6901 6902 if (rdp_cap == 0) 6903 rdp_cap = RDP_CAP_UNKNOWN; 6904 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6905 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6906 6907 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6908 desc->length = cpu_to_be32(sizeof(desc->info)); 6909 return sizeof(struct fc_rdp_port_speed_desc); 6910 } 6911 6912 static uint32_t 6913 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6914 struct lpfc_vport *vport) 6915 { 6916 6917 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6918 6919 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6920 sizeof(desc->port_names.wwnn)); 6921 6922 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6923 sizeof(desc->port_names.wwpn)); 6924 6925 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6926 return sizeof(struct fc_rdp_port_name_desc); 6927 } 6928 6929 static uint32_t 6930 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6931 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6932 { 6933 6934 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6935 if (vport->fc_flag & FC_FABRIC) { 6936 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6937 sizeof(desc->port_names.wwnn)); 6938 6939 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6940 sizeof(desc->port_names.wwpn)); 6941 } else { /* Point to Point */ 6942 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6943 sizeof(desc->port_names.wwnn)); 6944 6945 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6946 sizeof(desc->port_names.wwpn)); 6947 } 6948 6949 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6950 return sizeof(struct fc_rdp_port_name_desc); 6951 } 6952 6953 static void 6954 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6955 int status) 6956 { 6957 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6958 struct lpfc_vport *vport = ndlp->vport; 6959 struct lpfc_iocbq *elsiocb; 6960 struct ulp_bde64 *bpl; 6961 IOCB_t *icmd; 6962 union lpfc_wqe128 *wqe; 6963 uint8_t *pcmd; 6964 struct ls_rjt *stat; 6965 struct fc_rdp_res_frame *rdp_res; 6966 uint32_t cmdsize, len; 6967 uint16_t *flag_ptr; 6968 int rc; 6969 u32 ulp_context; 6970 6971 if (status != SUCCESS) 6972 goto error; 6973 6974 /* This will change once we know the true size of the RDP payload */ 6975 cmdsize = sizeof(struct fc_rdp_res_frame); 6976 6977 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6978 lpfc_max_els_tries, rdp_context->ndlp, 6979 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6980 if (!elsiocb) 6981 goto free_rdp_context; 6982 6983 ulp_context = get_job_ulpcontext(phba, elsiocb); 6984 if (phba->sli_rev == LPFC_SLI_REV4) { 6985 wqe = &elsiocb->wqe; 6986 /* ox-id of the frame */ 6987 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6988 rdp_context->ox_id); 6989 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 6990 rdp_context->rx_id); 6991 } else { 6992 icmd = &elsiocb->iocb; 6993 icmd->ulpContext = rdp_context->rx_id; 6994 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6995 } 6996 6997 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6998 "2171 Xmit RDP response tag x%x xri x%x, " 6999 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7000 elsiocb->iotag, ulp_context, 7001 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7002 ndlp->nlp_rpi); 7003 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7004 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7005 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7006 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7007 7008 /* Update Alarm and Warning */ 7009 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7010 phba->sfp_alarm |= *flag_ptr; 7011 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7012 phba->sfp_warning |= *flag_ptr; 7013 7014 /* For RDP payload */ 7015 len = 8; 7016 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7017 (len + pcmd), ELS_CMD_RDP); 7018 7019 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7020 rdp_context->page_a0, rdp_context->page_a2); 7021 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7022 phba); 7023 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7024 (len + pcmd), &rdp_context->link_stat); 7025 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7026 (len + pcmd), vport); 7027 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7028 (len + pcmd), vport, ndlp); 7029 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7030 &rdp_context->link_stat); 7031 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7032 &rdp_context->link_stat, vport); 7033 len += lpfc_rdp_res_oed_temp_desc(phba, 7034 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7035 rdp_context->page_a2); 7036 len += lpfc_rdp_res_oed_voltage_desc(phba, 7037 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7038 rdp_context->page_a2); 7039 len += lpfc_rdp_res_oed_txbias_desc(phba, 7040 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7041 rdp_context->page_a2); 7042 len += lpfc_rdp_res_oed_txpower_desc(phba, 7043 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7044 rdp_context->page_a2); 7045 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7046 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7047 rdp_context->page_a2); 7048 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7049 rdp_context->page_a0, vport); 7050 7051 rdp_res->length = cpu_to_be32(len - 8); 7052 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7053 7054 /* Now that we know the true size of the payload, update the BPL */ 7055 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7056 bpl->tus.f.bdeSize = len; 7057 bpl->tus.f.bdeFlags = 0; 7058 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7059 7060 phba->fc_stat.elsXmitACC++; 7061 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7062 if (!elsiocb->ndlp) { 7063 lpfc_els_free_iocb(phba, elsiocb); 7064 goto free_rdp_context; 7065 } 7066 7067 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7068 if (rc == IOCB_ERROR) { 7069 lpfc_els_free_iocb(phba, elsiocb); 7070 lpfc_nlp_put(ndlp); 7071 } 7072 7073 goto free_rdp_context; 7074 7075 error: 7076 cmdsize = 2 * sizeof(uint32_t); 7077 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7078 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7079 if (!elsiocb) 7080 goto free_rdp_context; 7081 7082 if (phba->sli_rev == LPFC_SLI_REV4) { 7083 wqe = &elsiocb->wqe; 7084 /* ox-id of the frame */ 7085 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7086 rdp_context->ox_id); 7087 bf_set(wqe_ctxt_tag, 7088 &wqe->xmit_els_rsp.wqe_com, 7089 rdp_context->rx_id); 7090 } else { 7091 icmd = &elsiocb->iocb; 7092 icmd->ulpContext = rdp_context->rx_id; 7093 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7094 } 7095 7096 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7097 7098 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7099 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7100 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7101 7102 phba->fc_stat.elsXmitLSRJT++; 7103 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7104 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7105 if (!elsiocb->ndlp) { 7106 lpfc_els_free_iocb(phba, elsiocb); 7107 goto free_rdp_context; 7108 } 7109 7110 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7111 if (rc == IOCB_ERROR) { 7112 lpfc_els_free_iocb(phba, elsiocb); 7113 lpfc_nlp_put(ndlp); 7114 } 7115 7116 free_rdp_context: 7117 /* This reference put is for the original unsolicited RDP. If the 7118 * prep failed, there is no reference to remove. 7119 */ 7120 lpfc_nlp_put(ndlp); 7121 kfree(rdp_context); 7122 } 7123 7124 static int 7125 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7126 { 7127 LPFC_MBOXQ_t *mbox = NULL; 7128 int rc; 7129 7130 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7131 if (!mbox) { 7132 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7133 "7105 failed to allocate mailbox memory"); 7134 return 1; 7135 } 7136 7137 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7138 goto rdp_fail; 7139 mbox->vport = rdp_context->ndlp->vport; 7140 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7141 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7142 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7143 if (rc == MBX_NOT_FINISHED) { 7144 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7145 return 1; 7146 } 7147 7148 return 0; 7149 7150 rdp_fail: 7151 mempool_free(mbox, phba->mbox_mem_pool); 7152 return 1; 7153 } 7154 7155 /* 7156 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7157 * @vport: pointer to a host virtual N_Port data structure. 7158 * @cmdiocb: pointer to lpfc command iocb data structure. 7159 * @ndlp: pointer to a node-list data structure. 7160 * 7161 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7162 * IOCB. First, the payload of the unsolicited RDP is checked. 7163 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7164 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7165 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7166 * gather all data and send RDP response. 7167 * 7168 * Return code 7169 * 0 - Sent the acc response 7170 * 1 - Sent the reject response. 7171 */ 7172 static int 7173 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7174 struct lpfc_nodelist *ndlp) 7175 { 7176 struct lpfc_hba *phba = vport->phba; 7177 struct lpfc_dmabuf *pcmd; 7178 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7179 struct fc_rdp_req_frame *rdp_req; 7180 struct lpfc_rdp_context *rdp_context; 7181 union lpfc_wqe128 *cmd = NULL; 7182 struct ls_rjt stat; 7183 7184 if (phba->sli_rev < LPFC_SLI_REV4 || 7185 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7186 LPFC_SLI_INTF_IF_TYPE_2) { 7187 rjt_err = LSRJT_UNABLE_TPC; 7188 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7189 goto error; 7190 } 7191 7192 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7193 rjt_err = LSRJT_UNABLE_TPC; 7194 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7195 goto error; 7196 } 7197 7198 pcmd = cmdiocb->cmd_dmabuf; 7199 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7200 7201 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7202 "2422 ELS RDP Request " 7203 "dec len %d tag x%x port_id %d len %d\n", 7204 be32_to_cpu(rdp_req->rdp_des_length), 7205 be32_to_cpu(rdp_req->nport_id_desc.tag), 7206 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7207 be32_to_cpu(rdp_req->nport_id_desc.length)); 7208 7209 if (sizeof(struct fc_rdp_nport_desc) != 7210 be32_to_cpu(rdp_req->rdp_des_length)) 7211 goto rjt_logerr; 7212 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7213 goto rjt_logerr; 7214 if (RDP_NPORT_ID_SIZE != 7215 be32_to_cpu(rdp_req->nport_id_desc.length)) 7216 goto rjt_logerr; 7217 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7218 if (!rdp_context) { 7219 rjt_err = LSRJT_UNABLE_TPC; 7220 goto error; 7221 } 7222 7223 cmd = &cmdiocb->wqe; 7224 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7225 if (!rdp_context->ndlp) { 7226 kfree(rdp_context); 7227 rjt_err = LSRJT_UNABLE_TPC; 7228 goto error; 7229 } 7230 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7231 &cmd->xmit_els_rsp.wqe_com); 7232 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7233 &cmd->xmit_els_rsp.wqe_com); 7234 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7235 if (lpfc_get_rdp_info(phba, rdp_context)) { 7236 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7237 "2423 Unable to send mailbox"); 7238 kfree(rdp_context); 7239 rjt_err = LSRJT_UNABLE_TPC; 7240 lpfc_nlp_put(ndlp); 7241 goto error; 7242 } 7243 7244 return 0; 7245 7246 rjt_logerr: 7247 rjt_err = LSRJT_LOGICAL_ERR; 7248 7249 error: 7250 memset(&stat, 0, sizeof(stat)); 7251 stat.un.b.lsRjtRsnCode = rjt_err; 7252 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7253 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7254 return 1; 7255 } 7256 7257 7258 static void 7259 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7260 { 7261 MAILBOX_t *mb; 7262 IOCB_t *icmd; 7263 union lpfc_wqe128 *wqe; 7264 uint8_t *pcmd; 7265 struct lpfc_iocbq *elsiocb; 7266 struct lpfc_nodelist *ndlp; 7267 struct ls_rjt *stat; 7268 union lpfc_sli4_cfg_shdr *shdr; 7269 struct lpfc_lcb_context *lcb_context; 7270 struct fc_lcb_res_frame *lcb_res; 7271 uint32_t cmdsize, shdr_status, shdr_add_status; 7272 int rc; 7273 7274 mb = &pmb->u.mb; 7275 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7276 ndlp = lcb_context->ndlp; 7277 pmb->ctx_ndlp = NULL; 7278 pmb->ctx_buf = NULL; 7279 7280 shdr = (union lpfc_sli4_cfg_shdr *) 7281 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7282 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7283 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7284 7285 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7286 "0194 SET_BEACON_CONFIG mailbox " 7287 "completed with status x%x add_status x%x," 7288 " mbx status x%x\n", 7289 shdr_status, shdr_add_status, mb->mbxStatus); 7290 7291 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7292 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7293 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7294 mempool_free(pmb, phba->mbox_mem_pool); 7295 goto error; 7296 } 7297 7298 mempool_free(pmb, phba->mbox_mem_pool); 7299 cmdsize = sizeof(struct fc_lcb_res_frame); 7300 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7301 lpfc_max_els_tries, ndlp, 7302 ndlp->nlp_DID, ELS_CMD_ACC); 7303 7304 /* Decrement the ndlp reference count from previous mbox command */ 7305 lpfc_nlp_put(ndlp); 7306 7307 if (!elsiocb) 7308 goto free_lcb_context; 7309 7310 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7311 7312 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7313 7314 if (phba->sli_rev == LPFC_SLI_REV4) { 7315 wqe = &elsiocb->wqe; 7316 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7317 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7318 lcb_context->ox_id); 7319 } else { 7320 icmd = &elsiocb->iocb; 7321 icmd->ulpContext = lcb_context->rx_id; 7322 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7323 } 7324 7325 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7326 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7327 lcb_res->lcb_sub_command = lcb_context->sub_command; 7328 lcb_res->lcb_type = lcb_context->type; 7329 lcb_res->capability = lcb_context->capability; 7330 lcb_res->lcb_frequency = lcb_context->frequency; 7331 lcb_res->lcb_duration = lcb_context->duration; 7332 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7333 phba->fc_stat.elsXmitACC++; 7334 7335 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7336 if (!elsiocb->ndlp) { 7337 lpfc_els_free_iocb(phba, elsiocb); 7338 goto out; 7339 } 7340 7341 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7342 if (rc == IOCB_ERROR) { 7343 lpfc_els_free_iocb(phba, elsiocb); 7344 lpfc_nlp_put(ndlp); 7345 } 7346 out: 7347 kfree(lcb_context); 7348 return; 7349 7350 error: 7351 cmdsize = sizeof(struct fc_lcb_res_frame); 7352 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7353 lpfc_max_els_tries, ndlp, 7354 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7355 lpfc_nlp_put(ndlp); 7356 if (!elsiocb) 7357 goto free_lcb_context; 7358 7359 if (phba->sli_rev == LPFC_SLI_REV4) { 7360 wqe = &elsiocb->wqe; 7361 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7362 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7363 lcb_context->ox_id); 7364 } else { 7365 icmd = &elsiocb->iocb; 7366 icmd->ulpContext = lcb_context->rx_id; 7367 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7368 } 7369 7370 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7371 7372 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7373 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7374 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7375 7376 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7377 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7378 7379 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7380 phba->fc_stat.elsXmitLSRJT++; 7381 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7382 if (!elsiocb->ndlp) { 7383 lpfc_els_free_iocb(phba, elsiocb); 7384 goto free_lcb_context; 7385 } 7386 7387 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7388 if (rc == IOCB_ERROR) { 7389 lpfc_els_free_iocb(phba, elsiocb); 7390 lpfc_nlp_put(ndlp); 7391 } 7392 free_lcb_context: 7393 kfree(lcb_context); 7394 } 7395 7396 static int 7397 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7398 struct lpfc_lcb_context *lcb_context, 7399 uint32_t beacon_state) 7400 { 7401 struct lpfc_hba *phba = vport->phba; 7402 union lpfc_sli4_cfg_shdr *cfg_shdr; 7403 LPFC_MBOXQ_t *mbox = NULL; 7404 uint32_t len; 7405 int rc; 7406 7407 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7408 if (!mbox) 7409 return 1; 7410 7411 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7412 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7413 sizeof(struct lpfc_sli4_cfg_mhdr); 7414 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7415 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7416 LPFC_SLI4_MBX_EMBED); 7417 mbox->ctx_ndlp = (void *)lcb_context; 7418 mbox->vport = phba->pport; 7419 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7420 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7421 phba->sli4_hba.physical_port); 7422 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7423 beacon_state); 7424 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7425 7426 /* 7427 * Check bv1s bit before issuing the mailbox 7428 * if bv1s == 1, LCB V1 supported 7429 * else, LCB V0 supported 7430 */ 7431 7432 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7433 /* COMMON_SET_BEACON_CONFIG_V1 */ 7434 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7435 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7436 bf_set(lpfc_mbx_set_beacon_port_type, 7437 &mbox->u.mqe.un.beacon_config, 0); 7438 bf_set(lpfc_mbx_set_beacon_duration_v1, 7439 &mbox->u.mqe.un.beacon_config, 7440 be16_to_cpu(lcb_context->duration)); 7441 } else { 7442 /* COMMON_SET_BEACON_CONFIG_V0 */ 7443 if (be16_to_cpu(lcb_context->duration) != 0) { 7444 mempool_free(mbox, phba->mbox_mem_pool); 7445 return 1; 7446 } 7447 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7448 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7449 bf_set(lpfc_mbx_set_beacon_state, 7450 &mbox->u.mqe.un.beacon_config, beacon_state); 7451 bf_set(lpfc_mbx_set_beacon_port_type, 7452 &mbox->u.mqe.un.beacon_config, 1); 7453 bf_set(lpfc_mbx_set_beacon_duration, 7454 &mbox->u.mqe.un.beacon_config, 7455 be16_to_cpu(lcb_context->duration)); 7456 } 7457 7458 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7459 if (rc == MBX_NOT_FINISHED) { 7460 mempool_free(mbox, phba->mbox_mem_pool); 7461 return 1; 7462 } 7463 7464 return 0; 7465 } 7466 7467 7468 /** 7469 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7470 * @vport: pointer to a host virtual N_Port data structure. 7471 * @cmdiocb: pointer to lpfc command iocb data structure. 7472 * @ndlp: pointer to a node-list data structure. 7473 * 7474 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7475 * First, the payload of the unsolicited LCB is checked. 7476 * Then based on Subcommand beacon will either turn on or off. 7477 * 7478 * Return code 7479 * 0 - Sent the acc response 7480 * 1 - Sent the reject response. 7481 **/ 7482 static int 7483 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7484 struct lpfc_nodelist *ndlp) 7485 { 7486 struct lpfc_hba *phba = vport->phba; 7487 struct lpfc_dmabuf *pcmd; 7488 uint8_t *lp; 7489 struct fc_lcb_request_frame *beacon; 7490 struct lpfc_lcb_context *lcb_context; 7491 u8 state, rjt_err = 0; 7492 struct ls_rjt stat; 7493 7494 pcmd = cmdiocb->cmd_dmabuf; 7495 lp = (uint8_t *)pcmd->virt; 7496 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7497 7498 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7499 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7500 "type x%x frequency %x duration x%x\n", 7501 lp[0], lp[1], lp[2], 7502 beacon->lcb_command, 7503 beacon->lcb_sub_command, 7504 beacon->lcb_type, 7505 beacon->lcb_frequency, 7506 be16_to_cpu(beacon->lcb_duration)); 7507 7508 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7509 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7510 rjt_err = LSRJT_CMD_UNSUPPORTED; 7511 goto rjt; 7512 } 7513 7514 if (phba->sli_rev < LPFC_SLI_REV4 || 7515 phba->hba_flag & HBA_FCOE_MODE || 7516 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7517 LPFC_SLI_INTF_IF_TYPE_2)) { 7518 rjt_err = LSRJT_CMD_UNSUPPORTED; 7519 goto rjt; 7520 } 7521 7522 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7523 if (!lcb_context) { 7524 rjt_err = LSRJT_UNABLE_TPC; 7525 goto rjt; 7526 } 7527 7528 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7529 lcb_context->sub_command = beacon->lcb_sub_command; 7530 lcb_context->capability = 0; 7531 lcb_context->type = beacon->lcb_type; 7532 lcb_context->frequency = beacon->lcb_frequency; 7533 lcb_context->duration = beacon->lcb_duration; 7534 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7535 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7536 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7537 if (!lcb_context->ndlp) { 7538 rjt_err = LSRJT_UNABLE_TPC; 7539 goto rjt_free; 7540 } 7541 7542 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7543 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7544 "0193 failed to send mail box"); 7545 lpfc_nlp_put(ndlp); 7546 rjt_err = LSRJT_UNABLE_TPC; 7547 goto rjt_free; 7548 } 7549 return 0; 7550 7551 rjt_free: 7552 kfree(lcb_context); 7553 rjt: 7554 memset(&stat, 0, sizeof(stat)); 7555 stat.un.b.lsRjtRsnCode = rjt_err; 7556 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7557 return 1; 7558 } 7559 7560 7561 /** 7562 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7563 * @vport: pointer to a host virtual N_Port data structure. 7564 * 7565 * This routine cleans up any Registration State Change Notification 7566 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7567 * @vport together with the host_lock is used to prevent multiple thread 7568 * trying to access the RSCN array on a same @vport at the same time. 7569 **/ 7570 void 7571 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7572 { 7573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7574 struct lpfc_hba *phba = vport->phba; 7575 int i; 7576 7577 spin_lock_irq(shost->host_lock); 7578 if (vport->fc_rscn_flush) { 7579 /* Another thread is walking fc_rscn_id_list on this vport */ 7580 spin_unlock_irq(shost->host_lock); 7581 return; 7582 } 7583 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7584 vport->fc_rscn_flush = 1; 7585 spin_unlock_irq(shost->host_lock); 7586 7587 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7588 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7589 vport->fc_rscn_id_list[i] = NULL; 7590 } 7591 spin_lock_irq(shost->host_lock); 7592 vport->fc_rscn_id_cnt = 0; 7593 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7594 spin_unlock_irq(shost->host_lock); 7595 lpfc_can_disctmo(vport); 7596 /* Indicate we are done walking this fc_rscn_id_list */ 7597 vport->fc_rscn_flush = 0; 7598 } 7599 7600 /** 7601 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7602 * @vport: pointer to a host virtual N_Port data structure. 7603 * @did: remote destination port identifier. 7604 * 7605 * This routine checks whether there is any pending Registration State 7606 * Configuration Notification (RSCN) to a @did on @vport. 7607 * 7608 * Return code 7609 * None zero - The @did matched with a pending rscn 7610 * 0 - not able to match @did with a pending rscn 7611 **/ 7612 int 7613 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7614 { 7615 D_ID ns_did; 7616 D_ID rscn_did; 7617 uint32_t *lp; 7618 uint32_t payload_len, i; 7619 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7620 7621 ns_did.un.word = did; 7622 7623 /* Never match fabric nodes for RSCNs */ 7624 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7625 return 0; 7626 7627 /* If we are doing a FULL RSCN rediscovery, match everything */ 7628 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7629 return did; 7630 7631 spin_lock_irq(shost->host_lock); 7632 if (vport->fc_rscn_flush) { 7633 /* Another thread is walking fc_rscn_id_list on this vport */ 7634 spin_unlock_irq(shost->host_lock); 7635 return 0; 7636 } 7637 /* Indicate we are walking fc_rscn_id_list on this vport */ 7638 vport->fc_rscn_flush = 1; 7639 spin_unlock_irq(shost->host_lock); 7640 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7641 lp = vport->fc_rscn_id_list[i]->virt; 7642 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7643 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7644 while (payload_len) { 7645 rscn_did.un.word = be32_to_cpu(*lp++); 7646 payload_len -= sizeof(uint32_t); 7647 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7648 case RSCN_ADDRESS_FORMAT_PORT: 7649 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7650 && (ns_did.un.b.area == rscn_did.un.b.area) 7651 && (ns_did.un.b.id == rscn_did.un.b.id)) 7652 goto return_did_out; 7653 break; 7654 case RSCN_ADDRESS_FORMAT_AREA: 7655 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7656 && (ns_did.un.b.area == rscn_did.un.b.area)) 7657 goto return_did_out; 7658 break; 7659 case RSCN_ADDRESS_FORMAT_DOMAIN: 7660 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7661 goto return_did_out; 7662 break; 7663 case RSCN_ADDRESS_FORMAT_FABRIC: 7664 goto return_did_out; 7665 } 7666 } 7667 } 7668 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7669 vport->fc_rscn_flush = 0; 7670 return 0; 7671 return_did_out: 7672 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7673 vport->fc_rscn_flush = 0; 7674 return did; 7675 } 7676 7677 /** 7678 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7679 * @vport: pointer to a host virtual N_Port data structure. 7680 * 7681 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7682 * state machine for a @vport's nodes that are with pending RSCN (Registration 7683 * State Change Notification). 7684 * 7685 * Return code 7686 * 0 - Successful (currently alway return 0) 7687 **/ 7688 static int 7689 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7690 { 7691 struct lpfc_nodelist *ndlp = NULL, *n; 7692 7693 /* Move all affected nodes by pending RSCNs to NPR state. */ 7694 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7695 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7696 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7697 continue; 7698 7699 /* NVME Target mode does not do RSCN Recovery. */ 7700 if (vport->phba->nvmet_support) 7701 continue; 7702 7703 /* If we are in the process of doing discovery on this 7704 * NPort, let it continue on its own. 7705 */ 7706 switch (ndlp->nlp_state) { 7707 case NLP_STE_PLOGI_ISSUE: 7708 case NLP_STE_ADISC_ISSUE: 7709 case NLP_STE_REG_LOGIN_ISSUE: 7710 case NLP_STE_PRLI_ISSUE: 7711 case NLP_STE_LOGO_ISSUE: 7712 continue; 7713 } 7714 7715 lpfc_disc_state_machine(vport, ndlp, NULL, 7716 NLP_EVT_DEVICE_RECOVERY); 7717 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7718 } 7719 return 0; 7720 } 7721 7722 /** 7723 * lpfc_send_rscn_event - Send an RSCN event to management application 7724 * @vport: pointer to a host virtual N_Port data structure. 7725 * @cmdiocb: pointer to lpfc command iocb data structure. 7726 * 7727 * lpfc_send_rscn_event sends an RSCN netlink event to management 7728 * applications. 7729 */ 7730 static void 7731 lpfc_send_rscn_event(struct lpfc_vport *vport, 7732 struct lpfc_iocbq *cmdiocb) 7733 { 7734 struct lpfc_dmabuf *pcmd; 7735 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7736 uint32_t *payload_ptr; 7737 uint32_t payload_len; 7738 struct lpfc_rscn_event_header *rscn_event_data; 7739 7740 pcmd = cmdiocb->cmd_dmabuf; 7741 payload_ptr = (uint32_t *) pcmd->virt; 7742 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7743 7744 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7745 payload_len, GFP_KERNEL); 7746 if (!rscn_event_data) { 7747 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7748 "0147 Failed to allocate memory for RSCN event\n"); 7749 return; 7750 } 7751 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7752 rscn_event_data->payload_length = payload_len; 7753 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7754 payload_len); 7755 7756 fc_host_post_vendor_event(shost, 7757 fc_get_event_number(), 7758 sizeof(struct lpfc_rscn_event_header) + payload_len, 7759 (char *)rscn_event_data, 7760 LPFC_NL_VENDOR_ID); 7761 7762 kfree(rscn_event_data); 7763 } 7764 7765 /** 7766 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7767 * @vport: pointer to a host virtual N_Port data structure. 7768 * @cmdiocb: pointer to lpfc command iocb data structure. 7769 * @ndlp: pointer to a node-list data structure. 7770 * 7771 * This routine processes an unsolicited RSCN (Registration State Change 7772 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7773 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7774 * discover state machine is about to begin discovery, it just accepts the 7775 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7776 * contains N_Port IDs for other vports on this HBA, it just accepts the 7777 * RSCN and ignore processing it. If the state machine is in the recovery 7778 * state, the fc_rscn_id_list of this @vport is walked and the 7779 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7780 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7781 * routine is invoked to handle the RSCN event. 7782 * 7783 * Return code 7784 * 0 - Just sent the acc response 7785 * 1 - Sent the acc response and waited for name server completion 7786 **/ 7787 static int 7788 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7789 struct lpfc_nodelist *ndlp) 7790 { 7791 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7792 struct lpfc_hba *phba = vport->phba; 7793 struct lpfc_dmabuf *pcmd; 7794 uint32_t *lp, *datap; 7795 uint32_t payload_len, length, nportid, *cmd; 7796 int rscn_cnt; 7797 int rscn_id = 0, hba_id = 0; 7798 int i, tmo; 7799 7800 pcmd = cmdiocb->cmd_dmabuf; 7801 lp = (uint32_t *) pcmd->virt; 7802 7803 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7804 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7805 /* RSCN received */ 7806 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7807 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7808 vport->fc_flag, payload_len, *lp, 7809 vport->fc_rscn_id_cnt); 7810 7811 /* Send an RSCN event to the management application */ 7812 lpfc_send_rscn_event(vport, cmdiocb); 7813 7814 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7815 fc_host_post_event(shost, fc_get_event_number(), 7816 FCH_EVT_RSCN, lp[i]); 7817 7818 /* Check if RSCN is coming from a direct-connected remote NPort */ 7819 if (vport->fc_flag & FC_PT2PT) { 7820 /* If so, just ACC it, no other action needed for now */ 7821 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7822 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7823 *lp, vport->fc_flag, payload_len); 7824 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7825 7826 /* Check to see if we need to NVME rescan this target 7827 * remoteport. 7828 */ 7829 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7830 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7831 lpfc_nvme_rescan_port(vport, ndlp); 7832 return 0; 7833 } 7834 7835 /* If we are about to begin discovery, just ACC the RSCN. 7836 * Discovery processing will satisfy it. 7837 */ 7838 if (vport->port_state <= LPFC_NS_QRY) { 7839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7840 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7841 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7842 7843 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7844 return 0; 7845 } 7846 7847 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7848 * just ACC and ignore it. 7849 */ 7850 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7851 !(vport->cfg_peer_port_login)) { 7852 i = payload_len; 7853 datap = lp; 7854 while (i > 0) { 7855 nportid = *datap++; 7856 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7857 i -= sizeof(uint32_t); 7858 rscn_id++; 7859 if (lpfc_find_vport_by_did(phba, nportid)) 7860 hba_id++; 7861 } 7862 if (rscn_id == hba_id) { 7863 /* ALL NPortIDs in RSCN are on HBA */ 7864 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7865 "0219 Ignore RSCN " 7866 "Data: x%x x%x x%x x%x\n", 7867 vport->fc_flag, payload_len, 7868 *lp, vport->fc_rscn_id_cnt); 7869 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7870 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7871 ndlp->nlp_DID, vport->port_state, 7872 ndlp->nlp_flag); 7873 7874 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7875 ndlp, NULL); 7876 /* Restart disctmo if its already running */ 7877 if (vport->fc_flag & FC_DISC_TMO) { 7878 tmo = ((phba->fc_ratov * 3) + 3); 7879 mod_timer(&vport->fc_disctmo, 7880 jiffies + 7881 msecs_to_jiffies(1000 * tmo)); 7882 } 7883 return 0; 7884 } 7885 } 7886 7887 spin_lock_irq(shost->host_lock); 7888 if (vport->fc_rscn_flush) { 7889 /* Another thread is walking fc_rscn_id_list on this vport */ 7890 vport->fc_flag |= FC_RSCN_DISCOVERY; 7891 spin_unlock_irq(shost->host_lock); 7892 /* Send back ACC */ 7893 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7894 return 0; 7895 } 7896 /* Indicate we are walking fc_rscn_id_list on this vport */ 7897 vport->fc_rscn_flush = 1; 7898 spin_unlock_irq(shost->host_lock); 7899 /* Get the array count after successfully have the token */ 7900 rscn_cnt = vport->fc_rscn_id_cnt; 7901 /* If we are already processing an RSCN, save the received 7902 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 7903 */ 7904 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7905 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7906 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7907 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7908 7909 spin_lock_irq(shost->host_lock); 7910 vport->fc_flag |= FC_RSCN_DEFERRED; 7911 7912 /* Restart disctmo if its already running */ 7913 if (vport->fc_flag & FC_DISC_TMO) { 7914 tmo = ((phba->fc_ratov * 3) + 3); 7915 mod_timer(&vport->fc_disctmo, 7916 jiffies + msecs_to_jiffies(1000 * tmo)); 7917 } 7918 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7919 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7920 vport->fc_flag |= FC_RSCN_MODE; 7921 vport->fc_flag &= ~FC_RSCN_MEMENTO; 7922 spin_unlock_irq(shost->host_lock); 7923 if (rscn_cnt) { 7924 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7925 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7926 } 7927 if ((rscn_cnt) && 7928 (payload_len + length <= LPFC_BPL_SIZE)) { 7929 *cmd &= ELS_CMD_MASK; 7930 *cmd |= cpu_to_be32(payload_len + length); 7931 memcpy(((uint8_t *)cmd) + length, lp, 7932 payload_len); 7933 } else { 7934 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7935 vport->fc_rscn_id_cnt++; 7936 /* If we zero, cmdiocb->cmd_dmabuf, the calling 7937 * routine will not try to free it. 7938 */ 7939 cmdiocb->cmd_dmabuf = NULL; 7940 } 7941 /* Deferred RSCN */ 7942 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7943 "0235 Deferred RSCN " 7944 "Data: x%x x%x x%x\n", 7945 vport->fc_rscn_id_cnt, vport->fc_flag, 7946 vport->port_state); 7947 } else { 7948 vport->fc_flag |= FC_RSCN_DISCOVERY; 7949 spin_unlock_irq(shost->host_lock); 7950 /* ReDiscovery RSCN */ 7951 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7952 "0234 ReDiscovery RSCN " 7953 "Data: x%x x%x x%x\n", 7954 vport->fc_rscn_id_cnt, vport->fc_flag, 7955 vport->port_state); 7956 } 7957 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7958 vport->fc_rscn_flush = 0; 7959 /* Send back ACC */ 7960 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7961 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7962 lpfc_rscn_recovery_check(vport); 7963 return 0; 7964 } 7965 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7966 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7967 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7968 7969 spin_lock_irq(shost->host_lock); 7970 vport->fc_flag |= FC_RSCN_MODE; 7971 vport->fc_flag &= ~FC_RSCN_MEMENTO; 7972 spin_unlock_irq(shost->host_lock); 7973 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7974 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7975 vport->fc_rscn_flush = 0; 7976 /* 7977 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 7978 * not try to free it. 7979 */ 7980 cmdiocb->cmd_dmabuf = NULL; 7981 lpfc_set_disctmo(vport); 7982 /* Send back ACC */ 7983 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7984 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7985 lpfc_rscn_recovery_check(vport); 7986 return lpfc_els_handle_rscn(vport); 7987 } 7988 7989 /** 7990 * lpfc_els_handle_rscn - Handle rscn for a vport 7991 * @vport: pointer to a host virtual N_Port data structure. 7992 * 7993 * This routine handles the Registration State Configuration Notification 7994 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7995 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7996 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7997 * NameServer shall be issued. If CT command to the NameServer fails to be 7998 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7999 * RSCN activities with the @vport. 8000 * 8001 * Return code 8002 * 0 - Cleaned up rscn on the @vport 8003 * 1 - Wait for plogi to name server before proceed 8004 **/ 8005 int 8006 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8007 { 8008 struct lpfc_nodelist *ndlp; 8009 struct lpfc_hba *phba = vport->phba; 8010 8011 /* Ignore RSCN if the port is being torn down. */ 8012 if (vport->load_flag & FC_UNLOADING) { 8013 lpfc_els_flush_rscn(vport); 8014 return 0; 8015 } 8016 8017 /* Start timer for RSCN processing */ 8018 lpfc_set_disctmo(vport); 8019 8020 /* RSCN processed */ 8021 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8022 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8023 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8024 vport->port_state, vport->num_disc_nodes, 8025 vport->gidft_inp); 8026 8027 /* To process RSCN, first compare RSCN data with NameServer */ 8028 vport->fc_ns_retry = 0; 8029 vport->num_disc_nodes = 0; 8030 8031 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8032 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8033 /* Good ndlp, issue CT Request to NameServer. Need to 8034 * know how many gidfts were issued. If none, then just 8035 * flush the RSCN. Otherwise, the outstanding requests 8036 * need to complete. 8037 */ 8038 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8039 if (lpfc_issue_gidft(vport) > 0) 8040 return 1; 8041 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8042 if (lpfc_issue_gidpt(vport) > 0) 8043 return 1; 8044 } else { 8045 return 1; 8046 } 8047 } else { 8048 /* Nameserver login in question. Revalidate. */ 8049 if (ndlp) { 8050 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8051 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8052 } else { 8053 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8054 if (!ndlp) { 8055 lpfc_els_flush_rscn(vport); 8056 return 0; 8057 } 8058 ndlp->nlp_prev_state = ndlp->nlp_state; 8059 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8060 } 8061 ndlp->nlp_type |= NLP_FABRIC; 8062 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8063 /* Wait for NameServer login cmpl before we can 8064 * continue 8065 */ 8066 return 1; 8067 } 8068 8069 lpfc_els_flush_rscn(vport); 8070 return 0; 8071 } 8072 8073 /** 8074 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8075 * @vport: pointer to a host virtual N_Port data structure. 8076 * @cmdiocb: pointer to lpfc command iocb data structure. 8077 * @ndlp: pointer to a node-list data structure. 8078 * 8079 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8080 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8081 * point topology. As an unsolicited FLOGI should not be received in a loop 8082 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8083 * lpfc_check_sparm() routine is invoked to check the parameters in the 8084 * unsolicited FLOGI. If parameters validation failed, the routine 8085 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8086 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8087 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8088 * will initiate PLOGI. The higher lexicographical value party shall has 8089 * higher priority (as the winning port) and will initiate PLOGI and 8090 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8091 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8092 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8093 * 8094 * Return code 8095 * 0 - Successfully processed the unsolicited flogi 8096 * 1 - Failed to process the unsolicited flogi 8097 **/ 8098 static int 8099 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8100 struct lpfc_nodelist *ndlp) 8101 { 8102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8103 struct lpfc_hba *phba = vport->phba; 8104 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8105 uint32_t *lp = (uint32_t *) pcmd->virt; 8106 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8107 struct serv_parm *sp; 8108 LPFC_MBOXQ_t *mbox; 8109 uint32_t cmd, did; 8110 int rc; 8111 uint32_t fc_flag = 0; 8112 uint32_t port_state = 0; 8113 8114 /* Clear external loopback plug detected flag */ 8115 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8116 8117 cmd = *lp++; 8118 sp = (struct serv_parm *) lp; 8119 8120 /* FLOGI received */ 8121 8122 lpfc_set_disctmo(vport); 8123 8124 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8125 /* We should never receive a FLOGI in loop mode, ignore it */ 8126 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8127 8128 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8129 Loop Mode */ 8130 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8131 "0113 An FLOGI ELS command x%x was " 8132 "received from DID x%x in Loop Mode\n", 8133 cmd, did); 8134 return 1; 8135 } 8136 8137 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8138 8139 /* 8140 * If our portname is greater than the remote portname, 8141 * then we initiate Nport login. 8142 */ 8143 8144 rc = memcmp(&vport->fc_portname, &sp->portName, 8145 sizeof(struct lpfc_name)); 8146 8147 if (!rc) { 8148 if (phba->sli_rev < LPFC_SLI_REV4) { 8149 mbox = mempool_alloc(phba->mbox_mem_pool, 8150 GFP_KERNEL); 8151 if (!mbox) 8152 return 1; 8153 lpfc_linkdown(phba); 8154 lpfc_init_link(phba, mbox, 8155 phba->cfg_topology, 8156 phba->cfg_link_speed); 8157 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8158 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8159 mbox->vport = vport; 8160 rc = lpfc_sli_issue_mbox(phba, mbox, 8161 MBX_NOWAIT); 8162 lpfc_set_loopback_flag(phba); 8163 if (rc == MBX_NOT_FINISHED) 8164 mempool_free(mbox, phba->mbox_mem_pool); 8165 return 1; 8166 } 8167 8168 /* External loopback plug insertion detected */ 8169 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8170 8171 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8172 "1119 External Loopback plug detected\n"); 8173 8174 /* abort the flogi coming back to ourselves 8175 * due to external loopback on the port. 8176 */ 8177 lpfc_els_abort_flogi(phba); 8178 return 0; 8179 8180 } else if (rc > 0) { /* greater than */ 8181 spin_lock_irq(shost->host_lock); 8182 vport->fc_flag |= FC_PT2PT_PLOGI; 8183 spin_unlock_irq(shost->host_lock); 8184 8185 /* If we have the high WWPN we can assign our own 8186 * myDID; otherwise, we have to WAIT for a PLOGI 8187 * from the remote NPort to find out what it 8188 * will be. 8189 */ 8190 vport->fc_myDID = PT2PT_LocalID; 8191 } else { 8192 vport->fc_myDID = PT2PT_RemoteID; 8193 } 8194 8195 /* 8196 * The vport state should go to LPFC_FLOGI only 8197 * AFTER we issue a FLOGI, not receive one. 8198 */ 8199 spin_lock_irq(shost->host_lock); 8200 fc_flag = vport->fc_flag; 8201 port_state = vport->port_state; 8202 vport->fc_flag |= FC_PT2PT; 8203 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8204 8205 /* Acking an unsol FLOGI. Count 1 for link bounce 8206 * work-around. 8207 */ 8208 vport->rcv_flogi_cnt++; 8209 spin_unlock_irq(shost->host_lock); 8210 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8211 "3311 Rcv Flogi PS x%x new PS x%x " 8212 "fc_flag x%x new fc_flag x%x\n", 8213 port_state, vport->port_state, 8214 fc_flag, vport->fc_flag); 8215 8216 /* 8217 * We temporarily set fc_myDID to make it look like we are 8218 * a Fabric. This is done just so we end up with the right 8219 * did / sid on the FLOGI ACC rsp. 8220 */ 8221 did = vport->fc_myDID; 8222 vport->fc_myDID = Fabric_DID; 8223 8224 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8225 8226 /* Defer ACC response until AFTER we issue a FLOGI */ 8227 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8228 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8229 &wqe->xmit_els_rsp.wqe_com); 8230 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8231 &wqe->xmit_els_rsp.wqe_com); 8232 8233 vport->fc_myDID = did; 8234 8235 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8236 "3344 Deferring FLOGI ACC: rx_id: x%x," 8237 " ox_id: x%x, hba_flag x%x\n", 8238 phba->defer_flogi_acc_rx_id, 8239 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8240 8241 phba->defer_flogi_acc_flag = true; 8242 8243 return 0; 8244 } 8245 8246 /* Send back ACC */ 8247 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8248 8249 /* Now lets put fc_myDID back to what its supposed to be */ 8250 vport->fc_myDID = did; 8251 8252 return 0; 8253 } 8254 8255 /** 8256 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8257 * @vport: pointer to a host virtual N_Port data structure. 8258 * @cmdiocb: pointer to lpfc command iocb data structure. 8259 * @ndlp: pointer to a node-list data structure. 8260 * 8261 * This routine processes Request Node Identification Data (RNID) IOCB 8262 * received as an ELS unsolicited event. Only when the RNID specified format 8263 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8264 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8265 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8266 * rejected by invoking the lpfc_els_rsp_reject() routine. 8267 * 8268 * Return code 8269 * 0 - Successfully processed rnid iocb (currently always return 0) 8270 **/ 8271 static int 8272 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8273 struct lpfc_nodelist *ndlp) 8274 { 8275 struct lpfc_dmabuf *pcmd; 8276 uint32_t *lp; 8277 RNID *rn; 8278 struct ls_rjt stat; 8279 8280 pcmd = cmdiocb->cmd_dmabuf; 8281 lp = (uint32_t *) pcmd->virt; 8282 8283 lp++; 8284 rn = (RNID *) lp; 8285 8286 /* RNID received */ 8287 8288 switch (rn->Format) { 8289 case 0: 8290 case RNID_TOPOLOGY_DISC: 8291 /* Send back ACC */ 8292 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8293 break; 8294 default: 8295 /* Reject this request because format not supported */ 8296 stat.un.b.lsRjtRsvd0 = 0; 8297 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8298 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8299 stat.un.b.vendorUnique = 0; 8300 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8301 NULL); 8302 } 8303 return 0; 8304 } 8305 8306 /** 8307 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8308 * @vport: pointer to a host virtual N_Port data structure. 8309 * @cmdiocb: pointer to lpfc command iocb data structure. 8310 * @ndlp: pointer to a node-list data structure. 8311 * 8312 * Return code 8313 * 0 - Successfully processed echo iocb (currently always return 0) 8314 **/ 8315 static int 8316 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8317 struct lpfc_nodelist *ndlp) 8318 { 8319 uint8_t *pcmd; 8320 8321 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8322 8323 /* skip over first word of echo command to find echo data */ 8324 pcmd += sizeof(uint32_t); 8325 8326 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8327 return 0; 8328 } 8329 8330 /** 8331 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8332 * @vport: pointer to a host virtual N_Port data structure. 8333 * @cmdiocb: pointer to lpfc command iocb data structure. 8334 * @ndlp: pointer to a node-list data structure. 8335 * 8336 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8337 * received as an ELS unsolicited event. Currently, this function just invokes 8338 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8339 * 8340 * Return code 8341 * 0 - Successfully processed lirr iocb (currently always return 0) 8342 **/ 8343 static int 8344 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8345 struct lpfc_nodelist *ndlp) 8346 { 8347 struct ls_rjt stat; 8348 8349 /* For now, unconditionally reject this command */ 8350 stat.un.b.lsRjtRsvd0 = 0; 8351 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8352 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8353 stat.un.b.vendorUnique = 0; 8354 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8355 return 0; 8356 } 8357 8358 /** 8359 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8360 * @vport: pointer to a host virtual N_Port data structure. 8361 * @cmdiocb: pointer to lpfc command iocb data structure. 8362 * @ndlp: pointer to a node-list data structure. 8363 * 8364 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8365 * received as an ELS unsolicited event. A request to RRQ shall only 8366 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8367 * Nx_Port N_Port_ID of the target Exchange is the same as the 8368 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8369 * not accepted, an LS_RJT with reason code "Unable to perform 8370 * command request" and reason code explanation "Invalid Originator 8371 * S_ID" shall be returned. For now, we just unconditionally accept 8372 * RRQ from the target. 8373 **/ 8374 static void 8375 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8376 struct lpfc_nodelist *ndlp) 8377 { 8378 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8379 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8380 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8381 } 8382 8383 /** 8384 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8385 * @phba: pointer to lpfc hba data structure. 8386 * @pmb: pointer to the driver internal queue element for mailbox command. 8387 * 8388 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8389 * mailbox command. This callback function is to actually send the Accept 8390 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8391 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8392 * mailbox command, constructs the RLS response with the link statistics 8393 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8394 * response to the RLS. 8395 * 8396 * Note that the ndlp reference count will be incremented by 1 for holding the 8397 * ndlp and the reference to ndlp will be stored into the ndlp field of 8398 * the IOCB for the completion callback function to the RLS Accept Response 8399 * ELS IOCB command. 8400 * 8401 **/ 8402 static void 8403 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8404 { 8405 int rc = 0; 8406 MAILBOX_t *mb; 8407 IOCB_t *icmd; 8408 union lpfc_wqe128 *wqe; 8409 struct RLS_RSP *rls_rsp; 8410 uint8_t *pcmd; 8411 struct lpfc_iocbq *elsiocb; 8412 struct lpfc_nodelist *ndlp; 8413 uint16_t oxid; 8414 uint16_t rxid; 8415 uint32_t cmdsize; 8416 u32 ulp_context; 8417 8418 mb = &pmb->u.mb; 8419 8420 ndlp = pmb->ctx_ndlp; 8421 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8422 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8423 pmb->ctx_buf = NULL; 8424 pmb->ctx_ndlp = NULL; 8425 8426 if (mb->mbxStatus) { 8427 mempool_free(pmb, phba->mbox_mem_pool); 8428 return; 8429 } 8430 8431 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8432 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8433 lpfc_max_els_tries, ndlp, 8434 ndlp->nlp_DID, ELS_CMD_ACC); 8435 8436 /* Decrement the ndlp reference count from previous mbox command */ 8437 lpfc_nlp_put(ndlp); 8438 8439 if (!elsiocb) { 8440 mempool_free(pmb, phba->mbox_mem_pool); 8441 return; 8442 } 8443 8444 ulp_context = get_job_ulpcontext(phba, elsiocb); 8445 if (phba->sli_rev == LPFC_SLI_REV4) { 8446 wqe = &elsiocb->wqe; 8447 /* Xri / rx_id */ 8448 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8449 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8450 } else { 8451 icmd = &elsiocb->iocb; 8452 icmd->ulpContext = rxid; 8453 icmd->unsli3.rcvsli3.ox_id = oxid; 8454 } 8455 8456 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8457 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8458 pcmd += sizeof(uint32_t); /* Skip past command */ 8459 rls_rsp = (struct RLS_RSP *)pcmd; 8460 8461 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8462 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8463 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8464 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8465 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8466 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8467 mempool_free(pmb, phba->mbox_mem_pool); 8468 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8469 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8470 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8471 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8472 elsiocb->iotag, ulp_context, 8473 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8474 ndlp->nlp_rpi); 8475 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8476 phba->fc_stat.elsXmitACC++; 8477 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8478 if (!elsiocb->ndlp) { 8479 lpfc_els_free_iocb(phba, elsiocb); 8480 return; 8481 } 8482 8483 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8484 if (rc == IOCB_ERROR) { 8485 lpfc_els_free_iocb(phba, elsiocb); 8486 lpfc_nlp_put(ndlp); 8487 } 8488 return; 8489 } 8490 8491 /** 8492 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8493 * @vport: pointer to a host virtual N_Port data structure. 8494 * @cmdiocb: pointer to lpfc command iocb data structure. 8495 * @ndlp: pointer to a node-list data structure. 8496 * 8497 * This routine processes Read Link Status (RLS) IOCB received as an 8498 * ELS unsolicited event. It first checks the remote port state. If the 8499 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8500 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8501 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8502 * for reading the HBA link statistics. It is for the callback function, 8503 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8504 * to actually sending out RPL Accept (ACC) response. 8505 * 8506 * Return codes 8507 * 0 - Successfully processed rls iocb (currently always return 0) 8508 **/ 8509 static int 8510 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8511 struct lpfc_nodelist *ndlp) 8512 { 8513 struct lpfc_hba *phba = vport->phba; 8514 LPFC_MBOXQ_t *mbox; 8515 struct ls_rjt stat; 8516 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8517 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8518 8519 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8520 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8521 /* reject the unsolicited RLS request and done with it */ 8522 goto reject_out; 8523 8524 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8525 if (mbox) { 8526 lpfc_read_lnk_stat(phba, mbox); 8527 mbox->ctx_buf = (void *)((unsigned long) 8528 (ox_id << 16 | ctx)); 8529 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8530 if (!mbox->ctx_ndlp) 8531 goto node_err; 8532 mbox->vport = vport; 8533 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8534 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8535 != MBX_NOT_FINISHED) 8536 /* Mbox completion will send ELS Response */ 8537 return 0; 8538 /* Decrement reference count used for the failed mbox 8539 * command. 8540 */ 8541 lpfc_nlp_put(ndlp); 8542 node_err: 8543 mempool_free(mbox, phba->mbox_mem_pool); 8544 } 8545 reject_out: 8546 /* issue rejection response */ 8547 stat.un.b.lsRjtRsvd0 = 0; 8548 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8549 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8550 stat.un.b.vendorUnique = 0; 8551 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8552 return 0; 8553 } 8554 8555 /** 8556 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8557 * @vport: pointer to a host virtual N_Port data structure. 8558 * @cmdiocb: pointer to lpfc command iocb data structure. 8559 * @ndlp: pointer to a node-list data structure. 8560 * 8561 * This routine processes Read Timout Value (RTV) IOCB received as an 8562 * ELS unsolicited event. It first checks the remote port state. If the 8563 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8564 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8565 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8566 * Value (RTV) unsolicited IOCB event. 8567 * 8568 * Note that the ndlp reference count will be incremented by 1 for holding the 8569 * ndlp and the reference to ndlp will be stored into the ndlp field of 8570 * the IOCB for the completion callback function to the RTV Accept Response 8571 * ELS IOCB command. 8572 * 8573 * Return codes 8574 * 0 - Successfully processed rtv iocb (currently always return 0) 8575 **/ 8576 static int 8577 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8578 struct lpfc_nodelist *ndlp) 8579 { 8580 int rc = 0; 8581 IOCB_t *icmd; 8582 union lpfc_wqe128 *wqe; 8583 struct lpfc_hba *phba = vport->phba; 8584 struct ls_rjt stat; 8585 struct RTV_RSP *rtv_rsp; 8586 uint8_t *pcmd; 8587 struct lpfc_iocbq *elsiocb; 8588 uint32_t cmdsize; 8589 u32 ulp_context; 8590 8591 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8592 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8593 /* reject the unsolicited RTV request and done with it */ 8594 goto reject_out; 8595 8596 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8597 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8598 lpfc_max_els_tries, ndlp, 8599 ndlp->nlp_DID, ELS_CMD_ACC); 8600 8601 if (!elsiocb) 8602 return 1; 8603 8604 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8605 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8606 pcmd += sizeof(uint32_t); /* Skip past command */ 8607 8608 ulp_context = get_job_ulpcontext(phba, elsiocb); 8609 /* use the command's xri in the response */ 8610 if (phba->sli_rev == LPFC_SLI_REV4) { 8611 wqe = &elsiocb->wqe; 8612 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8613 get_job_ulpcontext(phba, cmdiocb)); 8614 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8615 get_job_rcvoxid(phba, cmdiocb)); 8616 } else { 8617 icmd = &elsiocb->iocb; 8618 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8619 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8620 } 8621 8622 rtv_rsp = (struct RTV_RSP *)pcmd; 8623 8624 /* populate RTV payload */ 8625 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8626 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8627 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8628 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8629 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8630 8631 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8632 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8633 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8634 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8635 "Data: x%x x%x x%x\n", 8636 elsiocb->iotag, ulp_context, 8637 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8638 ndlp->nlp_rpi, 8639 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8640 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8641 phba->fc_stat.elsXmitACC++; 8642 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8643 if (!elsiocb->ndlp) { 8644 lpfc_els_free_iocb(phba, elsiocb); 8645 return 0; 8646 } 8647 8648 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8649 if (rc == IOCB_ERROR) { 8650 lpfc_els_free_iocb(phba, elsiocb); 8651 lpfc_nlp_put(ndlp); 8652 } 8653 return 0; 8654 8655 reject_out: 8656 /* issue rejection response */ 8657 stat.un.b.lsRjtRsvd0 = 0; 8658 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8659 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8660 stat.un.b.vendorUnique = 0; 8661 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8662 return 0; 8663 } 8664 8665 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8666 * @vport: pointer to a host virtual N_Port data structure. 8667 * @ndlp: pointer to a node-list data structure. 8668 * @did: DID of the target. 8669 * @rrq: Pointer to the rrq struct. 8670 * 8671 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8672 * Successful the the completion handler will clear the RRQ. 8673 * 8674 * Return codes 8675 * 0 - Successfully sent rrq els iocb. 8676 * 1 - Failed to send rrq els iocb. 8677 **/ 8678 static int 8679 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8680 uint32_t did, struct lpfc_node_rrq *rrq) 8681 { 8682 struct lpfc_hba *phba = vport->phba; 8683 struct RRQ *els_rrq; 8684 struct lpfc_iocbq *elsiocb; 8685 uint8_t *pcmd; 8686 uint16_t cmdsize; 8687 int ret; 8688 8689 if (!ndlp) 8690 return 1; 8691 8692 /* If ndlp is not NULL, we will bump the reference count on it */ 8693 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8694 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8695 ELS_CMD_RRQ); 8696 if (!elsiocb) 8697 return 1; 8698 8699 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8700 8701 /* For RRQ request, remainder of payload is Exchange IDs */ 8702 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8703 pcmd += sizeof(uint32_t); 8704 els_rrq = (struct RRQ *) pcmd; 8705 8706 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8707 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8708 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8709 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8710 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8711 8712 8713 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8714 "Issue RRQ: did:x%x", 8715 did, rrq->xritag, rrq->rxid); 8716 elsiocb->context_un.rrq = rrq; 8717 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8718 8719 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8720 if (!elsiocb->ndlp) 8721 goto io_err; 8722 8723 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8724 if (ret == IOCB_ERROR) { 8725 lpfc_nlp_put(ndlp); 8726 goto io_err; 8727 } 8728 return 0; 8729 8730 io_err: 8731 lpfc_els_free_iocb(phba, elsiocb); 8732 return 1; 8733 } 8734 8735 /** 8736 * lpfc_send_rrq - Sends ELS RRQ if needed. 8737 * @phba: pointer to lpfc hba data structure. 8738 * @rrq: pointer to the active rrq. 8739 * 8740 * This routine will call the lpfc_issue_els_rrq if the rrq is 8741 * still active for the xri. If this function returns a failure then 8742 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8743 * 8744 * Returns 0 Success. 8745 * 1 Failure. 8746 **/ 8747 int 8748 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8749 { 8750 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8751 rrq->nlp_DID); 8752 if (!ndlp) 8753 return 1; 8754 8755 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8756 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8757 rrq->nlp_DID, rrq); 8758 else 8759 return 1; 8760 } 8761 8762 /** 8763 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8764 * @vport: pointer to a host virtual N_Port data structure. 8765 * @cmdsize: size of the ELS command. 8766 * @oldiocb: pointer to the original lpfc command iocb data structure. 8767 * @ndlp: pointer to a node-list data structure. 8768 * 8769 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8770 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8771 * 8772 * Note that the ndlp reference count will be incremented by 1 for holding the 8773 * ndlp and the reference to ndlp will be stored into the ndlp field of 8774 * the IOCB for the completion callback function to the RPL Accept Response 8775 * ELS command. 8776 * 8777 * Return code 8778 * 0 - Successfully issued ACC RPL ELS command 8779 * 1 - Failed to issue ACC RPL ELS command 8780 **/ 8781 static int 8782 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8783 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8784 { 8785 int rc = 0; 8786 struct lpfc_hba *phba = vport->phba; 8787 IOCB_t *icmd; 8788 union lpfc_wqe128 *wqe; 8789 RPL_RSP rpl_rsp; 8790 struct lpfc_iocbq *elsiocb; 8791 uint8_t *pcmd; 8792 u32 ulp_context; 8793 8794 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8795 ndlp->nlp_DID, ELS_CMD_ACC); 8796 8797 if (!elsiocb) 8798 return 1; 8799 8800 ulp_context = get_job_ulpcontext(phba, elsiocb); 8801 if (phba->sli_rev == LPFC_SLI_REV4) { 8802 wqe = &elsiocb->wqe; 8803 /* Xri / rx_id */ 8804 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8805 get_job_ulpcontext(phba, oldiocb)); 8806 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8807 get_job_rcvoxid(phba, oldiocb)); 8808 } else { 8809 icmd = &elsiocb->iocb; 8810 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 8811 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 8812 } 8813 8814 pcmd = elsiocb->cmd_dmabuf->virt; 8815 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8816 pcmd += sizeof(uint16_t); 8817 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8818 pcmd += sizeof(uint16_t); 8819 8820 /* Setup the RPL ACC payload */ 8821 rpl_rsp.listLen = be32_to_cpu(1); 8822 rpl_rsp.index = 0; 8823 rpl_rsp.port_num_blk.portNum = 0; 8824 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8825 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8826 sizeof(struct lpfc_name)); 8827 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8828 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8829 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8830 "0120 Xmit ELS RPL ACC response tag x%x " 8831 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8832 "rpi x%x\n", 8833 elsiocb->iotag, ulp_context, 8834 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8835 ndlp->nlp_rpi); 8836 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8837 phba->fc_stat.elsXmitACC++; 8838 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8839 if (!elsiocb->ndlp) { 8840 lpfc_els_free_iocb(phba, elsiocb); 8841 return 1; 8842 } 8843 8844 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8845 if (rc == IOCB_ERROR) { 8846 lpfc_els_free_iocb(phba, elsiocb); 8847 lpfc_nlp_put(ndlp); 8848 return 1; 8849 } 8850 8851 return 0; 8852 } 8853 8854 /** 8855 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8856 * @vport: pointer to a host virtual N_Port data structure. 8857 * @cmdiocb: pointer to lpfc command iocb data structure. 8858 * @ndlp: pointer to a node-list data structure. 8859 * 8860 * This routine processes Read Port List (RPL) IOCB received as an ELS 8861 * unsolicited event. It first checks the remote port state. If the remote 8862 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8863 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8864 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8865 * to accept the RPL. 8866 * 8867 * Return code 8868 * 0 - Successfully processed rpl iocb (currently always return 0) 8869 **/ 8870 static int 8871 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8872 struct lpfc_nodelist *ndlp) 8873 { 8874 struct lpfc_dmabuf *pcmd; 8875 uint32_t *lp; 8876 uint32_t maxsize; 8877 uint16_t cmdsize; 8878 RPL *rpl; 8879 struct ls_rjt stat; 8880 8881 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8882 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8883 /* issue rejection response */ 8884 stat.un.b.lsRjtRsvd0 = 0; 8885 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8886 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8887 stat.un.b.vendorUnique = 0; 8888 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8889 NULL); 8890 /* rejected the unsolicited RPL request and done with it */ 8891 return 0; 8892 } 8893 8894 pcmd = cmdiocb->cmd_dmabuf; 8895 lp = (uint32_t *) pcmd->virt; 8896 rpl = (RPL *) (lp + 1); 8897 maxsize = be32_to_cpu(rpl->maxsize); 8898 8899 /* We support only one port */ 8900 if ((rpl->index == 0) && 8901 ((maxsize == 0) || 8902 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8903 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8904 } else { 8905 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8906 } 8907 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8908 8909 return 0; 8910 } 8911 8912 /** 8913 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8914 * @vport: pointer to a virtual N_Port data structure. 8915 * @cmdiocb: pointer to lpfc command iocb data structure. 8916 * @ndlp: pointer to a node-list data structure. 8917 * 8918 * This routine processes Fibre Channel Address Resolution Protocol 8919 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8920 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8921 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8922 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8923 * remote PortName is compared against the FC PortName stored in the @vport 8924 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8925 * compared against the FC NodeName stored in the @vport data structure. 8926 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8927 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8928 * invoked to send out FARP Response to the remote node. Before sending the 8929 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8930 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8931 * routine is invoked to log into the remote port first. 8932 * 8933 * Return code 8934 * 0 - Either the FARP Match Mode not supported or successfully processed 8935 **/ 8936 static int 8937 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8938 struct lpfc_nodelist *ndlp) 8939 { 8940 struct lpfc_dmabuf *pcmd; 8941 uint32_t *lp; 8942 FARP *fp; 8943 uint32_t cnt, did; 8944 8945 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 8946 pcmd = cmdiocb->cmd_dmabuf; 8947 lp = (uint32_t *) pcmd->virt; 8948 8949 lp++; 8950 fp = (FARP *) lp; 8951 /* FARP-REQ received from DID <did> */ 8952 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8953 "0601 FARP-REQ received from DID x%x\n", did); 8954 /* We will only support match on WWPN or WWNN */ 8955 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8956 return 0; 8957 } 8958 8959 cnt = 0; 8960 /* If this FARP command is searching for my portname */ 8961 if (fp->Mflags & FARP_MATCH_PORT) { 8962 if (memcmp(&fp->RportName, &vport->fc_portname, 8963 sizeof(struct lpfc_name)) == 0) 8964 cnt = 1; 8965 } 8966 8967 /* If this FARP command is searching for my nodename */ 8968 if (fp->Mflags & FARP_MATCH_NODE) { 8969 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8970 sizeof(struct lpfc_name)) == 0) 8971 cnt = 1; 8972 } 8973 8974 if (cnt) { 8975 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8976 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8977 /* Log back into the node before sending the FARP. */ 8978 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8979 ndlp->nlp_prev_state = ndlp->nlp_state; 8980 lpfc_nlp_set_state(vport, ndlp, 8981 NLP_STE_PLOGI_ISSUE); 8982 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8983 } 8984 8985 /* Send a FARP response to that node */ 8986 if (fp->Rflags & FARP_REQUEST_FARPR) 8987 lpfc_issue_els_farpr(vport, did, 0); 8988 } 8989 } 8990 return 0; 8991 } 8992 8993 /** 8994 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8995 * @vport: pointer to a host virtual N_Port data structure. 8996 * @cmdiocb: pointer to lpfc command iocb data structure. 8997 * @ndlp: pointer to a node-list data structure. 8998 * 8999 * This routine processes Fibre Channel Address Resolution Protocol 9000 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9001 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9002 * the FARP response request. 9003 * 9004 * Return code 9005 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9006 **/ 9007 static int 9008 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9009 struct lpfc_nodelist *ndlp) 9010 { 9011 struct lpfc_dmabuf *pcmd; 9012 uint32_t *lp; 9013 uint32_t did; 9014 9015 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9016 pcmd = cmdiocb->cmd_dmabuf; 9017 lp = (uint32_t *)pcmd->virt; 9018 9019 lp++; 9020 /* FARP-RSP received from DID <did> */ 9021 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9022 "0600 FARP-RSP received from DID x%x\n", did); 9023 /* ACCEPT the Farp resp request */ 9024 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9025 9026 return 0; 9027 } 9028 9029 /** 9030 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9031 * @vport: pointer to a host virtual N_Port data structure. 9032 * @cmdiocb: pointer to lpfc command iocb data structure. 9033 * @fan_ndlp: pointer to a node-list data structure. 9034 * 9035 * This routine processes a Fabric Address Notification (FAN) IOCB 9036 * command received as an ELS unsolicited event. The FAN ELS command will 9037 * only be processed on a physical port (i.e., the @vport represents the 9038 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9039 * compared against those in the phba data structure. If any of those is 9040 * different, the lpfc_initial_flogi() routine is invoked to initialize 9041 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9042 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9043 * is invoked to register login to the fabric. 9044 * 9045 * Return code 9046 * 0 - Successfully processed fan iocb (currently always return 0). 9047 **/ 9048 static int 9049 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9050 struct lpfc_nodelist *fan_ndlp) 9051 { 9052 struct lpfc_hba *phba = vport->phba; 9053 uint32_t *lp; 9054 FAN *fp; 9055 9056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9057 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9058 fp = (FAN *) ++lp; 9059 /* FAN received; Fan does not have a reply sequence */ 9060 if ((vport == phba->pport) && 9061 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9062 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9063 sizeof(struct lpfc_name))) || 9064 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9065 sizeof(struct lpfc_name)))) { 9066 /* This port has switched fabrics. FLOGI is required */ 9067 lpfc_issue_init_vfi(vport); 9068 } else { 9069 /* FAN verified - skip FLOGI */ 9070 vport->fc_myDID = vport->fc_prevDID; 9071 if (phba->sli_rev < LPFC_SLI_REV4) 9072 lpfc_issue_fabric_reglogin(vport); 9073 else { 9074 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9075 "3138 Need register VFI: (x%x/%x)\n", 9076 vport->fc_prevDID, vport->fc_myDID); 9077 lpfc_issue_reg_vfi(vport); 9078 } 9079 } 9080 } 9081 return 0; 9082 } 9083 9084 /** 9085 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9086 * @vport: pointer to a host virtual N_Port data structure. 9087 * @cmdiocb: pointer to lpfc command iocb data structure. 9088 * @ndlp: pointer to a node-list data structure. 9089 * 9090 * Return code 9091 * 0 - Successfully processed echo iocb (currently always return 0) 9092 **/ 9093 static int 9094 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9095 struct lpfc_nodelist *ndlp) 9096 { 9097 struct lpfc_hba *phba = vport->phba; 9098 struct fc_els_edc *edc_req; 9099 struct fc_tlv_desc *tlv; 9100 uint8_t *payload; 9101 uint32_t *ptr, dtag; 9102 const char *dtag_nm; 9103 int desc_cnt = 0, bytes_remain; 9104 bool rcv_cap_desc = false; 9105 9106 payload = cmdiocb->cmd_dmabuf->virt; 9107 9108 edc_req = (struct fc_els_edc *)payload; 9109 bytes_remain = be32_to_cpu(edc_req->desc_len); 9110 9111 ptr = (uint32_t *)payload; 9112 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 9113 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9114 bytes_remain, be32_to_cpu(*ptr), 9115 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9116 9117 /* No signal support unless there is a congestion descriptor */ 9118 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9119 phba->cgn_sig_freq = 0; 9120 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9121 9122 if (bytes_remain <= 0) 9123 goto out; 9124 9125 tlv = edc_req->desc; 9126 9127 /* 9128 * cycle through EDC diagnostic descriptors to find the 9129 * congestion signaling capability descriptor 9130 */ 9131 while (bytes_remain && !rcv_cap_desc) { 9132 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9133 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9134 "6464 Truncated TLV hdr on " 9135 "Diagnostic descriptor[%d]\n", 9136 desc_cnt); 9137 goto out; 9138 } 9139 9140 dtag = be32_to_cpu(tlv->desc_tag); 9141 switch (dtag) { 9142 case ELS_DTAG_LNK_FAULT_CAP: 9143 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9144 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9145 sizeof(struct fc_diag_lnkflt_desc)) { 9146 lpfc_printf_log( 9147 phba, KERN_WARNING, LOG_CGN_MGMT, 9148 "6465 Truncated Link Fault Diagnostic " 9149 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9150 desc_cnt, bytes_remain, 9151 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9152 sizeof(struct fc_diag_cg_sig_desc)); 9153 goto out; 9154 } 9155 /* No action for Link Fault descriptor for now */ 9156 break; 9157 case ELS_DTAG_CG_SIGNAL_CAP: 9158 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9159 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9160 sizeof(struct fc_diag_cg_sig_desc)) { 9161 lpfc_printf_log( 9162 phba, KERN_WARNING, LOG_CGN_MGMT, 9163 "6466 Truncated cgn signal Diagnostic " 9164 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9165 desc_cnt, bytes_remain, 9166 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9167 sizeof(struct fc_diag_cg_sig_desc)); 9168 goto out; 9169 } 9170 9171 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9172 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9173 9174 /* We start negotiation with lpfc_fabric_cgn_frequency. 9175 * When we process the EDC, we will settle on the 9176 * higher frequency. 9177 */ 9178 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9179 9180 lpfc_least_capable_settings( 9181 phba, (struct fc_diag_cg_sig_desc *)tlv); 9182 rcv_cap_desc = true; 9183 break; 9184 default: 9185 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9186 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9187 "6467 unknown Diagnostic " 9188 "Descriptor[%d]: tag x%x (%s)\n", 9189 desc_cnt, dtag, dtag_nm); 9190 } 9191 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9192 tlv = fc_tlv_next_desc(tlv); 9193 desc_cnt++; 9194 } 9195 out: 9196 /* Need to send back an ACC */ 9197 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9198 9199 lpfc_config_cgn_signal(phba); 9200 return 0; 9201 } 9202 9203 /** 9204 * lpfc_els_timeout - Handler funciton to the els timer 9205 * @t: timer context used to obtain the vport. 9206 * 9207 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9208 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9209 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9210 * up the worker thread. It is for the worker thread to invoke the routine 9211 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9212 **/ 9213 void 9214 lpfc_els_timeout(struct timer_list *t) 9215 { 9216 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9217 struct lpfc_hba *phba = vport->phba; 9218 uint32_t tmo_posted; 9219 unsigned long iflag; 9220 9221 spin_lock_irqsave(&vport->work_port_lock, iflag); 9222 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9223 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9224 vport->work_port_events |= WORKER_ELS_TMO; 9225 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9226 9227 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9228 lpfc_worker_wake_up(phba); 9229 return; 9230 } 9231 9232 9233 /** 9234 * lpfc_els_timeout_handler - Process an els timeout event 9235 * @vport: pointer to a virtual N_Port data structure. 9236 * 9237 * This routine is the actual handler function that processes an ELS timeout 9238 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9239 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9240 * invoking the lpfc_sli_issue_abort_iotag() routine. 9241 **/ 9242 void 9243 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9244 { 9245 struct lpfc_hba *phba = vport->phba; 9246 struct lpfc_sli_ring *pring; 9247 struct lpfc_iocbq *tmp_iocb, *piocb; 9248 IOCB_t *cmd = NULL; 9249 struct lpfc_dmabuf *pcmd; 9250 uint32_t els_command = 0; 9251 uint32_t timeout; 9252 uint32_t remote_ID = 0xffffffff; 9253 LIST_HEAD(abort_list); 9254 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9255 9256 9257 timeout = (uint32_t)(phba->fc_ratov << 1); 9258 9259 pring = lpfc_phba_elsring(phba); 9260 if (unlikely(!pring)) 9261 return; 9262 9263 if (phba->pport->load_flag & FC_UNLOADING) 9264 return; 9265 9266 spin_lock_irq(&phba->hbalock); 9267 if (phba->sli_rev == LPFC_SLI_REV4) 9268 spin_lock(&pring->ring_lock); 9269 9270 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9271 ulp_command = get_job_cmnd(phba, piocb); 9272 ulp_context = get_job_ulpcontext(phba, piocb); 9273 did = get_job_els_rsp64_did(phba, piocb); 9274 9275 if (phba->sli_rev == LPFC_SLI_REV4) { 9276 iotag = get_wqe_reqtag(piocb); 9277 } else { 9278 cmd = &piocb->iocb; 9279 iotag = cmd->ulpIoTag; 9280 } 9281 9282 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9283 ulp_command == CMD_ABORT_XRI_CX || 9284 ulp_command == CMD_ABORT_XRI_CN || 9285 ulp_command == CMD_CLOSE_XRI_CN) 9286 continue; 9287 9288 if (piocb->vport != vport) 9289 continue; 9290 9291 pcmd = piocb->cmd_dmabuf; 9292 if (pcmd) 9293 els_command = *(uint32_t *) (pcmd->virt); 9294 9295 if (els_command == ELS_CMD_FARP || 9296 els_command == ELS_CMD_FARPR || 9297 els_command == ELS_CMD_FDISC) 9298 continue; 9299 9300 if (piocb->drvrTimeout > 0) { 9301 if (piocb->drvrTimeout >= timeout) 9302 piocb->drvrTimeout -= timeout; 9303 else 9304 piocb->drvrTimeout = 0; 9305 continue; 9306 } 9307 9308 remote_ID = 0xffffffff; 9309 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9310 remote_ID = did; 9311 } else { 9312 struct lpfc_nodelist *ndlp; 9313 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9314 if (ndlp) 9315 remote_ID = ndlp->nlp_DID; 9316 } 9317 list_add_tail(&piocb->dlist, &abort_list); 9318 } 9319 if (phba->sli_rev == LPFC_SLI_REV4) 9320 spin_unlock(&pring->ring_lock); 9321 spin_unlock_irq(&phba->hbalock); 9322 9323 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9324 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9325 "0127 ELS timeout Data: x%x x%x x%x " 9326 "x%x\n", els_command, 9327 remote_ID, ulp_command, iotag); 9328 9329 spin_lock_irq(&phba->hbalock); 9330 list_del_init(&piocb->dlist); 9331 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9332 spin_unlock_irq(&phba->hbalock); 9333 } 9334 9335 /* Make sure HBA is alive */ 9336 lpfc_issue_hb_tmo(phba); 9337 9338 if (!list_empty(&pring->txcmplq)) 9339 if (!(phba->pport->load_flag & FC_UNLOADING)) 9340 mod_timer(&vport->els_tmofunc, 9341 jiffies + msecs_to_jiffies(1000 * timeout)); 9342 } 9343 9344 /** 9345 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9346 * @vport: pointer to a host virtual N_Port data structure. 9347 * 9348 * This routine is used to clean up all the outstanding ELS commands on a 9349 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9350 * routine. After that, it walks the ELS transmit queue to remove all the 9351 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9352 * the IOCBs with a non-NULL completion callback function, the callback 9353 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9354 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9355 * callback function, the IOCB will simply be released. Finally, it walks 9356 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9357 * completion queue IOCB that is associated with the @vport and is not 9358 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9359 * part of the discovery state machine) out to HBA by invoking the 9360 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9361 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9362 * the IOCBs are aborted when this function returns. 9363 **/ 9364 void 9365 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9366 { 9367 LIST_HEAD(abort_list); 9368 struct lpfc_hba *phba = vport->phba; 9369 struct lpfc_sli_ring *pring; 9370 struct lpfc_iocbq *tmp_iocb, *piocb; 9371 u32 ulp_command; 9372 unsigned long iflags = 0; 9373 9374 lpfc_fabric_abort_vport(vport); 9375 9376 /* 9377 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9378 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9379 * ultimately grabs the ring_lock, the driver must splice the list into 9380 * a working list and release the locks before calling the abort. 9381 */ 9382 spin_lock_irqsave(&phba->hbalock, iflags); 9383 pring = lpfc_phba_elsring(phba); 9384 9385 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9386 if (unlikely(!pring)) { 9387 spin_unlock_irqrestore(&phba->hbalock, iflags); 9388 return; 9389 } 9390 9391 if (phba->sli_rev == LPFC_SLI_REV4) 9392 spin_lock(&pring->ring_lock); 9393 9394 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9395 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9396 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9397 continue; 9398 9399 if (piocb->vport != vport) 9400 continue; 9401 9402 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9403 continue; 9404 9405 /* On the ELS ring we can have ELS_REQUESTs or 9406 * GEN_REQUESTs waiting for a response. 9407 */ 9408 ulp_command = get_job_cmnd(phba, piocb); 9409 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9410 list_add_tail(&piocb->dlist, &abort_list); 9411 9412 /* If the link is down when flushing ELS commands 9413 * the firmware will not complete them till after 9414 * the link comes back up. This may confuse 9415 * discovery for the new link up, so we need to 9416 * change the compl routine to just clean up the iocb 9417 * and avoid any retry logic. 9418 */ 9419 if (phba->link_state == LPFC_LINK_DOWN) 9420 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9421 } 9422 if (ulp_command == CMD_GEN_REQUEST64_CR) 9423 list_add_tail(&piocb->dlist, &abort_list); 9424 } 9425 9426 if (phba->sli_rev == LPFC_SLI_REV4) 9427 spin_unlock(&pring->ring_lock); 9428 spin_unlock_irqrestore(&phba->hbalock, iflags); 9429 9430 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9431 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9432 spin_lock_irqsave(&phba->hbalock, iflags); 9433 list_del_init(&piocb->dlist); 9434 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9435 spin_unlock_irqrestore(&phba->hbalock, iflags); 9436 } 9437 /* Make sure HBA is alive */ 9438 lpfc_issue_hb_tmo(phba); 9439 9440 if (!list_empty(&abort_list)) 9441 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9442 "3387 abort list for txq not empty\n"); 9443 INIT_LIST_HEAD(&abort_list); 9444 9445 spin_lock_irqsave(&phba->hbalock, iflags); 9446 if (phba->sli_rev == LPFC_SLI_REV4) 9447 spin_lock(&pring->ring_lock); 9448 9449 /* No need to abort the txq list, 9450 * just queue them up for lpfc_sli_cancel_iocbs 9451 */ 9452 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9453 ulp_command = get_job_cmnd(phba, piocb); 9454 9455 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9456 continue; 9457 9458 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9459 if (ulp_command == CMD_QUE_RING_BUF_CN || 9460 ulp_command == CMD_QUE_RING_BUF64_CN || 9461 ulp_command == CMD_CLOSE_XRI_CN || 9462 ulp_command == CMD_ABORT_XRI_CN || 9463 ulp_command == CMD_ABORT_XRI_CX) 9464 continue; 9465 9466 if (piocb->vport != vport) 9467 continue; 9468 9469 list_del_init(&piocb->list); 9470 list_add_tail(&piocb->list, &abort_list); 9471 } 9472 9473 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9474 if (vport == phba->pport) { 9475 list_for_each_entry_safe(piocb, tmp_iocb, 9476 &phba->fabric_iocb_list, list) { 9477 list_del_init(&piocb->list); 9478 list_add_tail(&piocb->list, &abort_list); 9479 } 9480 } 9481 9482 if (phba->sli_rev == LPFC_SLI_REV4) 9483 spin_unlock(&pring->ring_lock); 9484 spin_unlock_irqrestore(&phba->hbalock, iflags); 9485 9486 /* Cancel all the IOCBs from the completions list */ 9487 lpfc_sli_cancel_iocbs(phba, &abort_list, 9488 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9489 9490 return; 9491 } 9492 9493 /** 9494 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9495 * @phba: pointer to lpfc hba data structure. 9496 * 9497 * This routine is used to clean up all the outstanding ELS commands on a 9498 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9499 * routine. After that, it walks the ELS transmit queue to remove all the 9500 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9501 * the IOCBs with the completion callback function associated, the callback 9502 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9503 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9504 * callback function associated, the IOCB will simply be released. Finally, 9505 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9506 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9507 * management plane IOCBs that are not part of the discovery state machine) 9508 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9509 **/ 9510 void 9511 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9512 { 9513 struct lpfc_vport *vport; 9514 9515 spin_lock_irq(&phba->port_list_lock); 9516 list_for_each_entry(vport, &phba->port_list, listentry) 9517 lpfc_els_flush_cmd(vport); 9518 spin_unlock_irq(&phba->port_list_lock); 9519 9520 return; 9521 } 9522 9523 /** 9524 * lpfc_send_els_failure_event - Posts an ELS command failure event 9525 * @phba: Pointer to hba context object. 9526 * @cmdiocbp: Pointer to command iocb which reported error. 9527 * @rspiocbp: Pointer to response iocb which reported error. 9528 * 9529 * This function sends an event when there is an ELS command 9530 * failure. 9531 **/ 9532 void 9533 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9534 struct lpfc_iocbq *cmdiocbp, 9535 struct lpfc_iocbq *rspiocbp) 9536 { 9537 struct lpfc_vport *vport = cmdiocbp->vport; 9538 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9539 struct lpfc_lsrjt_event lsrjt_event; 9540 struct lpfc_fabric_event_header fabric_event; 9541 struct ls_rjt stat; 9542 struct lpfc_nodelist *ndlp; 9543 uint32_t *pcmd; 9544 u32 ulp_status, ulp_word4; 9545 9546 ndlp = cmdiocbp->ndlp; 9547 if (!ndlp) 9548 return; 9549 9550 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9551 ulp_word4 = get_job_word4(phba, rspiocbp); 9552 9553 if (ulp_status == IOSTAT_LS_RJT) { 9554 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9555 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9556 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9557 sizeof(struct lpfc_name)); 9558 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9559 sizeof(struct lpfc_name)); 9560 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9561 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9562 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9563 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9564 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9565 fc_host_post_vendor_event(shost, 9566 fc_get_event_number(), 9567 sizeof(lsrjt_event), 9568 (char *)&lsrjt_event, 9569 LPFC_NL_VENDOR_ID); 9570 return; 9571 } 9572 if (ulp_status == IOSTAT_NPORT_BSY || 9573 ulp_status == IOSTAT_FABRIC_BSY) { 9574 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9575 if (ulp_status == IOSTAT_NPORT_BSY) 9576 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9577 else 9578 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9579 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9580 sizeof(struct lpfc_name)); 9581 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9582 sizeof(struct lpfc_name)); 9583 fc_host_post_vendor_event(shost, 9584 fc_get_event_number(), 9585 sizeof(fabric_event), 9586 (char *)&fabric_event, 9587 LPFC_NL_VENDOR_ID); 9588 return; 9589 } 9590 9591 } 9592 9593 /** 9594 * lpfc_send_els_event - Posts unsolicited els event 9595 * @vport: Pointer to vport object. 9596 * @ndlp: Pointer FC node object. 9597 * @payload: ELS command code type. 9598 * 9599 * This function posts an event when there is an incoming 9600 * unsolicited ELS command. 9601 **/ 9602 static void 9603 lpfc_send_els_event(struct lpfc_vport *vport, 9604 struct lpfc_nodelist *ndlp, 9605 uint32_t *payload) 9606 { 9607 struct lpfc_els_event_header *els_data = NULL; 9608 struct lpfc_logo_event *logo_data = NULL; 9609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9610 9611 if (*payload == ELS_CMD_LOGO) { 9612 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9613 if (!logo_data) { 9614 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9615 "0148 Failed to allocate memory " 9616 "for LOGO event\n"); 9617 return; 9618 } 9619 els_data = &logo_data->header; 9620 } else { 9621 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9622 GFP_KERNEL); 9623 if (!els_data) { 9624 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9625 "0149 Failed to allocate memory " 9626 "for ELS event\n"); 9627 return; 9628 } 9629 } 9630 els_data->event_type = FC_REG_ELS_EVENT; 9631 switch (*payload) { 9632 case ELS_CMD_PLOGI: 9633 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9634 break; 9635 case ELS_CMD_PRLO: 9636 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9637 break; 9638 case ELS_CMD_ADISC: 9639 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9640 break; 9641 case ELS_CMD_LOGO: 9642 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9643 /* Copy the WWPN in the LOGO payload */ 9644 memcpy(logo_data->logo_wwpn, &payload[2], 9645 sizeof(struct lpfc_name)); 9646 break; 9647 default: 9648 kfree(els_data); 9649 return; 9650 } 9651 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9652 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9653 if (*payload == ELS_CMD_LOGO) { 9654 fc_host_post_vendor_event(shost, 9655 fc_get_event_number(), 9656 sizeof(struct lpfc_logo_event), 9657 (char *)logo_data, 9658 LPFC_NL_VENDOR_ID); 9659 kfree(logo_data); 9660 } else { 9661 fc_host_post_vendor_event(shost, 9662 fc_get_event_number(), 9663 sizeof(struct lpfc_els_event_header), 9664 (char *)els_data, 9665 LPFC_NL_VENDOR_ID); 9666 kfree(els_data); 9667 } 9668 9669 return; 9670 } 9671 9672 9673 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9674 FC_FPIN_LI_EVT_TYPES_INIT); 9675 9676 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9677 FC_FPIN_DELI_EVT_TYPES_INIT); 9678 9679 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9680 FC_FPIN_CONGN_EVT_TYPES_INIT); 9681 9682 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9683 fc_fpin_congn_severity_types, 9684 FC_FPIN_CONGN_SEVERITY_INIT); 9685 9686 9687 /** 9688 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9689 * @phba: Pointer to phba object. 9690 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9691 * @cnt: count of WWPNs in FPIN payload 9692 * 9693 * This routine is called by LI and PC descriptors. 9694 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9695 */ 9696 static void 9697 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9698 { 9699 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9700 __be64 wwn; 9701 u64 wwpn; 9702 int i, len; 9703 int line = 0; 9704 int wcnt = 0; 9705 bool endit = false; 9706 9707 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9708 for (i = 0; i < cnt; i++) { 9709 /* Are we on the last WWPN */ 9710 if (i == (cnt - 1)) 9711 endit = true; 9712 9713 /* Extract the next WWPN from the payload */ 9714 wwn = *wwnlist++; 9715 wwpn = be64_to_cpu(wwn); 9716 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9717 " %016llx", wwpn); 9718 9719 /* Log a message if we are on the last WWPN 9720 * or if we hit the max allowed per message. 9721 */ 9722 wcnt++; 9723 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9724 buf[len] = 0; 9725 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9726 "4686 %s\n", buf); 9727 9728 /* Check if we reached the last WWPN */ 9729 if (endit) 9730 return; 9731 9732 /* Limit the number of log message displayed per FPIN */ 9733 line++; 9734 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9735 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9736 "4687 %d WWPNs Truncated\n", 9737 cnt - i - 1); 9738 return; 9739 } 9740 9741 /* Start over with next log message */ 9742 wcnt = 0; 9743 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9744 "Additional WWPNs:"); 9745 } 9746 } 9747 } 9748 9749 /** 9750 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9751 * @phba: Pointer to phba object. 9752 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9753 * 9754 * This function processes a Link Integrity FPIN event by logging a message. 9755 **/ 9756 static void 9757 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9758 { 9759 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9760 const char *li_evt_str; 9761 u32 li_evt, cnt; 9762 9763 li_evt = be16_to_cpu(li->event_type); 9764 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9765 cnt = be32_to_cpu(li->pname_count); 9766 9767 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9768 "4680 FPIN Link Integrity %s (x%x) " 9769 "Detecting PN x%016llx Attached PN x%016llx " 9770 "Duration %d mSecs Count %d Port Cnt %d\n", 9771 li_evt_str, li_evt, 9772 be64_to_cpu(li->detecting_wwpn), 9773 be64_to_cpu(li->attached_wwpn), 9774 be32_to_cpu(li->event_threshold), 9775 be32_to_cpu(li->event_count), cnt); 9776 9777 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9778 } 9779 9780 /** 9781 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9782 * @phba: Pointer to hba object. 9783 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9784 * 9785 * This function processes a Delivery FPIN event by logging a message. 9786 **/ 9787 static void 9788 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9789 { 9790 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9791 const char *del_rsn_str; 9792 u32 del_rsn; 9793 __be32 *frame; 9794 9795 del_rsn = be16_to_cpu(del->deli_reason_code); 9796 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9797 9798 /* Skip over desc_tag/desc_len header to payload */ 9799 frame = (__be32 *)(del + 1); 9800 9801 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9802 "4681 FPIN Delivery %s (x%x) " 9803 "Detecting PN x%016llx Attached PN x%016llx " 9804 "DiscHdr0 x%08x " 9805 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9806 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9807 del_rsn_str, del_rsn, 9808 be64_to_cpu(del->detecting_wwpn), 9809 be64_to_cpu(del->attached_wwpn), 9810 be32_to_cpu(frame[0]), 9811 be32_to_cpu(frame[1]), 9812 be32_to_cpu(frame[2]), 9813 be32_to_cpu(frame[3]), 9814 be32_to_cpu(frame[4]), 9815 be32_to_cpu(frame[5])); 9816 } 9817 9818 /** 9819 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9820 * @phba: Pointer to hba object. 9821 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9822 * 9823 * This function processes a Peer Congestion FPIN event by logging a message. 9824 **/ 9825 static void 9826 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9827 { 9828 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9829 const char *pc_evt_str; 9830 u32 pc_evt, cnt; 9831 9832 pc_evt = be16_to_cpu(pc->event_type); 9833 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9834 cnt = be32_to_cpu(pc->pname_count); 9835 9836 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9837 "4684 FPIN Peer Congestion %s (x%x) " 9838 "Duration %d mSecs " 9839 "Detecting PN x%016llx Attached PN x%016llx " 9840 "Impacted Port Cnt %d\n", 9841 pc_evt_str, pc_evt, 9842 be32_to_cpu(pc->event_period), 9843 be64_to_cpu(pc->detecting_wwpn), 9844 be64_to_cpu(pc->attached_wwpn), 9845 cnt); 9846 9847 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9848 } 9849 9850 /** 9851 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9852 * @phba: Pointer to hba object. 9853 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9854 * 9855 * This function processes an FPIN Congestion Notifiction. The notification 9856 * could be an Alarm or Warning. This routine feeds that data into driver's 9857 * running congestion algorithm. It also processes the FPIN by 9858 * logging a message. It returns 1 to indicate deliver this message 9859 * to the upper layer or 0 to indicate don't deliver it. 9860 **/ 9861 static int 9862 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9863 { 9864 struct lpfc_cgn_info *cp; 9865 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9866 const char *cgn_evt_str; 9867 u32 cgn_evt; 9868 const char *cgn_sev_str; 9869 u32 cgn_sev; 9870 uint16_t value; 9871 u32 crc; 9872 bool nm_log = false; 9873 int rc = 1; 9874 9875 cgn_evt = be16_to_cpu(cgn->event_type); 9876 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9877 cgn_sev = cgn->severity; 9878 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9879 9880 /* The driver only takes action on a Credit Stall or Oversubscription 9881 * event type to engage the IO algorithm. The driver prints an 9882 * unmaskable message only for Lost Credit and Credit Stall. 9883 * TODO: Still need to have definition of host action on clear, 9884 * lost credit and device specific event types. 9885 */ 9886 switch (cgn_evt) { 9887 case FPIN_CONGN_LOST_CREDIT: 9888 nm_log = true; 9889 break; 9890 case FPIN_CONGN_CREDIT_STALL: 9891 nm_log = true; 9892 fallthrough; 9893 case FPIN_CONGN_OVERSUBSCRIPTION: 9894 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9895 nm_log = false; 9896 switch (cgn_sev) { 9897 case FPIN_CONGN_SEVERITY_ERROR: 9898 /* Take action here for an Alarm event */ 9899 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9900 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9901 /* Track of alarm cnt for SYNC_WQE */ 9902 atomic_inc(&phba->cgn_sync_alarm_cnt); 9903 } 9904 /* Track alarm cnt for cgn_info regardless 9905 * of whether CMF is configured for Signals 9906 * or FPINs. 9907 */ 9908 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9909 goto cleanup; 9910 } 9911 break; 9912 case FPIN_CONGN_SEVERITY_WARNING: 9913 /* Take action here for a Warning event */ 9914 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9915 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9916 /* Track of warning cnt for SYNC_WQE */ 9917 atomic_inc(&phba->cgn_sync_warn_cnt); 9918 } 9919 /* Track warning cnt and freq for cgn_info 9920 * regardless of whether CMF is configured for 9921 * Signals or FPINs. 9922 */ 9923 atomic_inc(&phba->cgn_fabric_warn_cnt); 9924 cleanup: 9925 /* Save frequency in ms */ 9926 phba->cgn_fpin_frequency = 9927 be32_to_cpu(cgn->event_period); 9928 value = phba->cgn_fpin_frequency; 9929 if (phba->cgn_i) { 9930 cp = (struct lpfc_cgn_info *) 9931 phba->cgn_i->virt; 9932 cp->cgn_alarm_freq = 9933 cpu_to_le16(value); 9934 cp->cgn_warn_freq = 9935 cpu_to_le16(value); 9936 crc = lpfc_cgn_calc_crc32 9937 (cp, 9938 LPFC_CGN_INFO_SZ, 9939 LPFC_CGN_CRC32_SEED); 9940 cp->cgn_info_crc = cpu_to_le32(crc); 9941 } 9942 9943 /* Don't deliver to upper layer since 9944 * driver took action on this tlv. 9945 */ 9946 rc = 0; 9947 } 9948 break; 9949 } 9950 break; 9951 } 9952 9953 /* Change the log level to unmaskable for the following event types. */ 9954 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9955 LOG_CGN_MGMT | LOG_ELS, 9956 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9957 "Duration %d mSecs\n", 9958 cgn_sev_str, cgn_evt_str, cgn_evt, 9959 be32_to_cpu(cgn->event_period)); 9960 return rc; 9961 } 9962 9963 void 9964 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 9965 { 9966 struct lpfc_hba *phba = vport->phba; 9967 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 9968 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 9969 const char *dtag_nm; 9970 int desc_cnt = 0, bytes_remain, cnt; 9971 u32 dtag, deliver = 0; 9972 int len; 9973 9974 /* FPINs handled only if we are in the right discovery state */ 9975 if (vport->port_state < LPFC_DISC_AUTH) 9976 return; 9977 9978 /* make sure there is the full fpin header */ 9979 if (fpin_length < sizeof(struct fc_els_fpin)) 9980 return; 9981 9982 /* Sanity check descriptor length. The desc_len value does not 9983 * include space for the ELS command and the desc_len fields. 9984 */ 9985 len = be32_to_cpu(fpin->desc_len); 9986 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 9987 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9988 "4671 Bad ELS FPIN length %d: %d\n", 9989 len, fpin_length); 9990 return; 9991 } 9992 9993 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 9994 first_tlv = tlv; 9995 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 9996 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 9997 9998 /* process each descriptor separately */ 9999 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10000 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10001 dtag = be32_to_cpu(tlv->desc_tag); 10002 switch (dtag) { 10003 case ELS_DTAG_LNK_INTEGRITY: 10004 lpfc_els_rcv_fpin_li(phba, tlv); 10005 deliver = 1; 10006 break; 10007 case ELS_DTAG_DELIVERY: 10008 lpfc_els_rcv_fpin_del(phba, tlv); 10009 deliver = 1; 10010 break; 10011 case ELS_DTAG_PEER_CONGEST: 10012 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10013 deliver = 1; 10014 break; 10015 case ELS_DTAG_CONGESTION: 10016 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10017 break; 10018 default: 10019 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10020 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10021 "4678 unknown FPIN descriptor[%d]: " 10022 "tag x%x (%s)\n", 10023 desc_cnt, dtag, dtag_nm); 10024 10025 /* If descriptor is bad, drop the rest of the data */ 10026 return; 10027 } 10028 lpfc_cgn_update_stat(phba, dtag); 10029 cnt = be32_to_cpu(tlv->desc_len); 10030 10031 /* Sanity check descriptor length. The desc_len value does not 10032 * include space for the desc_tag and the desc_len fields. 10033 */ 10034 len -= (cnt + sizeof(struct fc_tlv_desc)); 10035 if (len < 0) { 10036 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10037 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10038 "4672 Bad FPIN descriptor TLV length " 10039 "%d: %d %d %s\n", 10040 cnt, len, fpin_length, dtag_nm); 10041 return; 10042 } 10043 10044 current_tlv = tlv; 10045 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10046 tlv = fc_tlv_next_desc(tlv); 10047 10048 /* Format payload such that the FPIN delivered to the 10049 * upper layer is a single descriptor FPIN. 10050 */ 10051 if (desc_cnt) 10052 memcpy(first_tlv, current_tlv, 10053 (cnt + sizeof(struct fc_els_fpin))); 10054 10055 /* Adjust the length so that it only reflects a 10056 * single descriptor FPIN. 10057 */ 10058 fpin_length = cnt + sizeof(struct fc_els_fpin); 10059 fpin->desc_len = cpu_to_be32(fpin_length); 10060 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10061 10062 /* Send every descriptor individually to the upper layer */ 10063 if (deliver) 10064 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10065 fpin_length, (char *)fpin); 10066 desc_cnt++; 10067 } 10068 } 10069 10070 /** 10071 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10072 * @phba: pointer to lpfc hba data structure. 10073 * @pring: pointer to a SLI ring. 10074 * @vport: pointer to a host virtual N_Port data structure. 10075 * @elsiocb: pointer to lpfc els command iocb data structure. 10076 * 10077 * This routine is used for processing the IOCB associated with a unsolicited 10078 * event. It first determines whether there is an existing ndlp that matches 10079 * the DID from the unsolicited IOCB. If not, it will create a new one with 10080 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10081 * IOCB is then used to invoke the proper routine and to set up proper state 10082 * of the discovery state machine. 10083 **/ 10084 static void 10085 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10086 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10087 { 10088 struct lpfc_nodelist *ndlp; 10089 struct ls_rjt stat; 10090 u32 *payload, payload_len; 10091 u32 cmd = 0, did = 0, newnode, status = 0; 10092 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10093 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10094 LPFC_MBOXQ_t *mbox; 10095 10096 if (!vport || !elsiocb->cmd_dmabuf) 10097 goto dropit; 10098 10099 newnode = 0; 10100 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10101 payload = elsiocb->cmd_dmabuf->virt; 10102 if (phba->sli_rev == LPFC_SLI_REV4) 10103 payload_len = wcqe_cmpl->total_data_placed; 10104 else 10105 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10106 status = get_job_ulpstatus(phba, elsiocb); 10107 cmd = *payload; 10108 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10109 lpfc_sli3_post_buffer(phba, pring, 1); 10110 10111 did = get_job_els_rsp64_did(phba, elsiocb); 10112 if (status) { 10113 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10114 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10115 status, get_job_word4(phba, elsiocb), did); 10116 goto dropit; 10117 } 10118 10119 /* Check to see if link went down during discovery */ 10120 if (lpfc_els_chk_latt(vport)) 10121 goto dropit; 10122 10123 /* Ignore traffic received during vport shutdown. */ 10124 if (vport->load_flag & FC_UNLOADING) 10125 goto dropit; 10126 10127 /* If NPort discovery is delayed drop incoming ELS */ 10128 if ((vport->fc_flag & FC_DISC_DELAYED) && 10129 (cmd != ELS_CMD_PLOGI)) 10130 goto dropit; 10131 10132 ndlp = lpfc_findnode_did(vport, did); 10133 if (!ndlp) { 10134 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10135 ndlp = lpfc_nlp_init(vport, did); 10136 if (!ndlp) 10137 goto dropit; 10138 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10139 newnode = 1; 10140 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10141 ndlp->nlp_type |= NLP_FABRIC; 10142 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10143 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10144 newnode = 1; 10145 } 10146 10147 phba->fc_stat.elsRcvFrame++; 10148 10149 /* 10150 * Do not process any unsolicited ELS commands 10151 * if the ndlp is in DEV_LOSS 10152 */ 10153 spin_lock_irq(&ndlp->lock); 10154 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10155 spin_unlock_irq(&ndlp->lock); 10156 if (newnode) 10157 lpfc_nlp_put(ndlp); 10158 goto dropit; 10159 } 10160 spin_unlock_irq(&ndlp->lock); 10161 10162 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10163 if (!elsiocb->ndlp) 10164 goto dropit; 10165 elsiocb->vport = vport; 10166 10167 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10168 cmd &= ELS_CMD_MASK; 10169 } 10170 /* ELS command <elsCmd> received from NPORT <did> */ 10171 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10172 "0112 ELS command x%x received from NPORT x%x " 10173 "refcnt %d Data: x%x x%x x%x x%x\n", 10174 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10175 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10176 10177 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10178 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10179 (cmd != ELS_CMD_FLOGI) && 10180 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10181 rjt_err = LSRJT_LOGICAL_BSY; 10182 rjt_exp = LSEXP_NOTHING_MORE; 10183 goto lsrjt; 10184 } 10185 10186 switch (cmd) { 10187 case ELS_CMD_PLOGI: 10188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10189 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10190 did, vport->port_state, ndlp->nlp_flag); 10191 10192 phba->fc_stat.elsRcvPLOGI++; 10193 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10194 if (phba->sli_rev == LPFC_SLI_REV4 && 10195 (phba->pport->fc_flag & FC_PT2PT)) { 10196 vport->fc_prevDID = vport->fc_myDID; 10197 /* Our DID needs to be updated before registering 10198 * the vfi. This is done in lpfc_rcv_plogi but 10199 * that is called after the reg_vfi. 10200 */ 10201 vport->fc_myDID = 10202 bf_get(els_rsp64_sid, 10203 &elsiocb->wqe.xmit_els_rsp); 10204 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10205 "3312 Remote port assigned DID x%x " 10206 "%x\n", vport->fc_myDID, 10207 vport->fc_prevDID); 10208 } 10209 10210 lpfc_send_els_event(vport, ndlp, payload); 10211 10212 /* If Nport discovery is delayed, reject PLOGIs */ 10213 if (vport->fc_flag & FC_DISC_DELAYED) { 10214 rjt_err = LSRJT_UNABLE_TPC; 10215 rjt_exp = LSEXP_NOTHING_MORE; 10216 break; 10217 } 10218 10219 if (vport->port_state < LPFC_DISC_AUTH) { 10220 if (!(phba->pport->fc_flag & FC_PT2PT) || 10221 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10222 rjt_err = LSRJT_UNABLE_TPC; 10223 rjt_exp = LSEXP_NOTHING_MORE; 10224 break; 10225 } 10226 } 10227 10228 spin_lock_irq(&ndlp->lock); 10229 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10230 spin_unlock_irq(&ndlp->lock); 10231 10232 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10233 NLP_EVT_RCV_PLOGI); 10234 10235 break; 10236 case ELS_CMD_FLOGI: 10237 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10238 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10239 did, vport->port_state, ndlp->nlp_flag); 10240 10241 phba->fc_stat.elsRcvFLOGI++; 10242 10243 /* If the driver believes fabric discovery is done and is ready, 10244 * bounce the link. There is some descrepancy. 10245 */ 10246 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10247 vport->fc_flag & FC_PT2PT && 10248 vport->rcv_flogi_cnt >= 1) { 10249 rjt_err = LSRJT_LOGICAL_BSY; 10250 rjt_exp = LSEXP_NOTHING_MORE; 10251 init_link++; 10252 goto lsrjt; 10253 } 10254 10255 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10256 /* retain node if our response is deferred */ 10257 if (phba->defer_flogi_acc_flag) 10258 break; 10259 if (newnode) 10260 lpfc_disc_state_machine(vport, ndlp, NULL, 10261 NLP_EVT_DEVICE_RM); 10262 break; 10263 case ELS_CMD_LOGO: 10264 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10265 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10266 did, vport->port_state, ndlp->nlp_flag); 10267 10268 phba->fc_stat.elsRcvLOGO++; 10269 lpfc_send_els_event(vport, ndlp, payload); 10270 if (vport->port_state < LPFC_DISC_AUTH) { 10271 rjt_err = LSRJT_UNABLE_TPC; 10272 rjt_exp = LSEXP_NOTHING_MORE; 10273 break; 10274 } 10275 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10276 if (newnode) 10277 lpfc_disc_state_machine(vport, ndlp, NULL, 10278 NLP_EVT_DEVICE_RM); 10279 break; 10280 case ELS_CMD_PRLO: 10281 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10282 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10283 did, vport->port_state, ndlp->nlp_flag); 10284 10285 phba->fc_stat.elsRcvPRLO++; 10286 lpfc_send_els_event(vport, ndlp, payload); 10287 if (vport->port_state < LPFC_DISC_AUTH) { 10288 rjt_err = LSRJT_UNABLE_TPC; 10289 rjt_exp = LSEXP_NOTHING_MORE; 10290 break; 10291 } 10292 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10293 break; 10294 case ELS_CMD_LCB: 10295 phba->fc_stat.elsRcvLCB++; 10296 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10297 break; 10298 case ELS_CMD_RDP: 10299 phba->fc_stat.elsRcvRDP++; 10300 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10301 break; 10302 case ELS_CMD_RSCN: 10303 phba->fc_stat.elsRcvRSCN++; 10304 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10305 if (newnode) 10306 lpfc_disc_state_machine(vport, ndlp, NULL, 10307 NLP_EVT_DEVICE_RM); 10308 break; 10309 case ELS_CMD_ADISC: 10310 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10311 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10312 did, vport->port_state, ndlp->nlp_flag); 10313 10314 lpfc_send_els_event(vport, ndlp, payload); 10315 phba->fc_stat.elsRcvADISC++; 10316 if (vport->port_state < LPFC_DISC_AUTH) { 10317 rjt_err = LSRJT_UNABLE_TPC; 10318 rjt_exp = LSEXP_NOTHING_MORE; 10319 break; 10320 } 10321 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10322 NLP_EVT_RCV_ADISC); 10323 break; 10324 case ELS_CMD_PDISC: 10325 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10326 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10327 did, vport->port_state, ndlp->nlp_flag); 10328 10329 phba->fc_stat.elsRcvPDISC++; 10330 if (vport->port_state < LPFC_DISC_AUTH) { 10331 rjt_err = LSRJT_UNABLE_TPC; 10332 rjt_exp = LSEXP_NOTHING_MORE; 10333 break; 10334 } 10335 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10336 NLP_EVT_RCV_PDISC); 10337 break; 10338 case ELS_CMD_FARPR: 10339 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10340 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10341 did, vport->port_state, ndlp->nlp_flag); 10342 10343 phba->fc_stat.elsRcvFARPR++; 10344 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10345 break; 10346 case ELS_CMD_FARP: 10347 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10348 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10349 did, vport->port_state, ndlp->nlp_flag); 10350 10351 phba->fc_stat.elsRcvFARP++; 10352 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10353 break; 10354 case ELS_CMD_FAN: 10355 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10356 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10357 did, vport->port_state, ndlp->nlp_flag); 10358 10359 phba->fc_stat.elsRcvFAN++; 10360 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10361 break; 10362 case ELS_CMD_PRLI: 10363 case ELS_CMD_NVMEPRLI: 10364 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10365 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10366 did, vport->port_state, ndlp->nlp_flag); 10367 10368 phba->fc_stat.elsRcvPRLI++; 10369 if ((vport->port_state < LPFC_DISC_AUTH) && 10370 (vport->fc_flag & FC_FABRIC)) { 10371 rjt_err = LSRJT_UNABLE_TPC; 10372 rjt_exp = LSEXP_NOTHING_MORE; 10373 break; 10374 } 10375 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10376 break; 10377 case ELS_CMD_LIRR: 10378 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10379 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10380 did, vport->port_state, ndlp->nlp_flag); 10381 10382 phba->fc_stat.elsRcvLIRR++; 10383 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10384 if (newnode) 10385 lpfc_disc_state_machine(vport, ndlp, NULL, 10386 NLP_EVT_DEVICE_RM); 10387 break; 10388 case ELS_CMD_RLS: 10389 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10390 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10391 did, vport->port_state, ndlp->nlp_flag); 10392 10393 phba->fc_stat.elsRcvRLS++; 10394 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10395 if (newnode) 10396 lpfc_disc_state_machine(vport, ndlp, NULL, 10397 NLP_EVT_DEVICE_RM); 10398 break; 10399 case ELS_CMD_RPL: 10400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10401 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10402 did, vport->port_state, ndlp->nlp_flag); 10403 10404 phba->fc_stat.elsRcvRPL++; 10405 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10406 if (newnode) 10407 lpfc_disc_state_machine(vport, ndlp, NULL, 10408 NLP_EVT_DEVICE_RM); 10409 break; 10410 case ELS_CMD_RNID: 10411 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10412 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10413 did, vport->port_state, ndlp->nlp_flag); 10414 10415 phba->fc_stat.elsRcvRNID++; 10416 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10417 if (newnode) 10418 lpfc_disc_state_machine(vport, ndlp, NULL, 10419 NLP_EVT_DEVICE_RM); 10420 break; 10421 case ELS_CMD_RTV: 10422 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10423 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10424 did, vport->port_state, ndlp->nlp_flag); 10425 phba->fc_stat.elsRcvRTV++; 10426 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10427 if (newnode) 10428 lpfc_disc_state_machine(vport, ndlp, NULL, 10429 NLP_EVT_DEVICE_RM); 10430 break; 10431 case ELS_CMD_RRQ: 10432 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10433 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10434 did, vport->port_state, ndlp->nlp_flag); 10435 10436 phba->fc_stat.elsRcvRRQ++; 10437 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10438 if (newnode) 10439 lpfc_disc_state_machine(vport, ndlp, NULL, 10440 NLP_EVT_DEVICE_RM); 10441 break; 10442 case ELS_CMD_ECHO: 10443 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10444 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10445 did, vport->port_state, ndlp->nlp_flag); 10446 10447 phba->fc_stat.elsRcvECHO++; 10448 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10449 if (newnode) 10450 lpfc_disc_state_machine(vport, ndlp, NULL, 10451 NLP_EVT_DEVICE_RM); 10452 break; 10453 case ELS_CMD_REC: 10454 /* receive this due to exchange closed */ 10455 rjt_err = LSRJT_UNABLE_TPC; 10456 rjt_exp = LSEXP_INVALID_OX_RX; 10457 break; 10458 case ELS_CMD_FPIN: 10459 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10460 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10461 did, vport->port_state, ndlp->nlp_flag); 10462 10463 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10464 payload_len); 10465 10466 /* There are no replies, so no rjt codes */ 10467 break; 10468 case ELS_CMD_EDC: 10469 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10470 break; 10471 case ELS_CMD_RDF: 10472 phba->fc_stat.elsRcvRDF++; 10473 /* Accept RDF only from fabric controller */ 10474 if (did != Fabric_Cntl_DID) { 10475 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10476 "1115 Received RDF from invalid DID " 10477 "x%x\n", did); 10478 rjt_err = LSRJT_PROTOCOL_ERR; 10479 rjt_exp = LSEXP_NOTHING_MORE; 10480 goto lsrjt; 10481 } 10482 10483 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10484 break; 10485 default: 10486 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10487 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10488 cmd, did, vport->port_state); 10489 10490 /* Unsupported ELS command, reject */ 10491 rjt_err = LSRJT_CMD_UNSUPPORTED; 10492 rjt_exp = LSEXP_NOTHING_MORE; 10493 10494 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10495 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10496 "0115 Unknown ELS command x%x " 10497 "received from NPORT x%x\n", cmd, did); 10498 if (newnode) 10499 lpfc_disc_state_machine(vport, ndlp, NULL, 10500 NLP_EVT_DEVICE_RM); 10501 break; 10502 } 10503 10504 lsrjt: 10505 /* check if need to LS_RJT received ELS cmd */ 10506 if (rjt_err) { 10507 memset(&stat, 0, sizeof(stat)); 10508 stat.un.b.lsRjtRsnCode = rjt_err; 10509 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10510 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10511 NULL); 10512 /* Remove the reference from above for new nodes. */ 10513 if (newnode) 10514 lpfc_disc_state_machine(vport, ndlp, NULL, 10515 NLP_EVT_DEVICE_RM); 10516 } 10517 10518 /* Release the reference on this elsiocb, not the ndlp. */ 10519 lpfc_nlp_put(elsiocb->ndlp); 10520 elsiocb->ndlp = NULL; 10521 10522 /* Special case. Driver received an unsolicited command that 10523 * unsupportable given the driver's current state. Reset the 10524 * link and start over. 10525 */ 10526 if (init_link) { 10527 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10528 if (!mbox) 10529 return; 10530 lpfc_linkdown(phba); 10531 lpfc_init_link(phba, mbox, 10532 phba->cfg_topology, 10533 phba->cfg_link_speed); 10534 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10535 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10536 mbox->vport = vport; 10537 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10538 MBX_NOT_FINISHED) 10539 mempool_free(mbox, phba->mbox_mem_pool); 10540 } 10541 10542 return; 10543 10544 dropit: 10545 if (vport && !(vport->load_flag & FC_UNLOADING)) 10546 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10547 "0111 Dropping received ELS cmd " 10548 "Data: x%x x%x x%x x%x\n", 10549 cmd, status, get_job_word4(phba, elsiocb), did); 10550 10551 phba->fc_stat.elsRcvDrop++; 10552 } 10553 10554 /** 10555 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10556 * @phba: pointer to lpfc hba data structure. 10557 * @pring: pointer to a SLI ring. 10558 * @elsiocb: pointer to lpfc els iocb data structure. 10559 * 10560 * This routine is used to process an unsolicited event received from a SLI 10561 * (Service Level Interface) ring. The actual processing of the data buffer 10562 * associated with the unsolicited event is done by invoking the routine 10563 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10564 * SLI ring on which the unsolicited event was received. 10565 **/ 10566 void 10567 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10568 struct lpfc_iocbq *elsiocb) 10569 { 10570 struct lpfc_vport *vport = elsiocb->vport; 10571 u32 ulp_command, status, parameter, bde_count = 0; 10572 IOCB_t *icmd; 10573 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10574 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10575 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10576 dma_addr_t paddr; 10577 10578 elsiocb->cmd_dmabuf = NULL; 10579 elsiocb->rsp_dmabuf = NULL; 10580 elsiocb->bpl_dmabuf = NULL; 10581 10582 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10583 ulp_command = get_job_cmnd(phba, elsiocb); 10584 status = get_job_ulpstatus(phba, elsiocb); 10585 parameter = get_job_word4(phba, elsiocb); 10586 if (phba->sli_rev == LPFC_SLI_REV4) 10587 bde_count = wcqe_cmpl->word3; 10588 else 10589 bde_count = elsiocb->iocb.ulpBdeCount; 10590 10591 if (status == IOSTAT_NEED_BUFFER) { 10592 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10593 } else if (status == IOSTAT_LOCAL_REJECT && 10594 (parameter & IOERR_PARAM_MASK) == 10595 IOERR_RCV_BUFFER_WAITING) { 10596 phba->fc_stat.NoRcvBuf++; 10597 /* Not enough posted buffers; Try posting more buffers */ 10598 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10599 lpfc_sli3_post_buffer(phba, pring, 0); 10600 return; 10601 } 10602 10603 if (phba->sli_rev == LPFC_SLI_REV3) { 10604 icmd = &elsiocb->iocb; 10605 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10606 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10607 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10608 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10609 vport = phba->pport; 10610 else 10611 vport = lpfc_find_vport_by_vpid(phba, 10612 icmd->unsli3.rcvsli3.vpi); 10613 } 10614 } 10615 10616 /* If there are no BDEs associated 10617 * with this IOCB, there is nothing to do. 10618 */ 10619 if (bde_count == 0) 10620 return; 10621 10622 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10623 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10624 elsiocb->cmd_dmabuf = bdeBuf1; 10625 if (bde_count == 2) 10626 elsiocb->bpl_dmabuf = bdeBuf2; 10627 } else { 10628 icmd = &elsiocb->iocb; 10629 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10630 icmd->un.cont64[0].addrLow); 10631 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10632 paddr); 10633 if (bde_count == 2) { 10634 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10635 icmd->un.cont64[1].addrLow); 10636 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10637 pring, 10638 paddr); 10639 } 10640 } 10641 10642 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10643 /* 10644 * The different unsolicited event handlers would tell us 10645 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10646 */ 10647 if (elsiocb->cmd_dmabuf) { 10648 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10649 elsiocb->cmd_dmabuf = NULL; 10650 } 10651 10652 if (elsiocb->bpl_dmabuf) { 10653 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10654 elsiocb->bpl_dmabuf = NULL; 10655 } 10656 10657 } 10658 10659 static void 10660 lpfc_start_fdmi(struct lpfc_vport *vport) 10661 { 10662 struct lpfc_nodelist *ndlp; 10663 10664 /* If this is the first time, allocate an ndlp and initialize 10665 * it. Otherwise, make sure the node is enabled and then do the 10666 * login. 10667 */ 10668 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10669 if (!ndlp) { 10670 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10671 if (ndlp) { 10672 ndlp->nlp_type |= NLP_FABRIC; 10673 } else { 10674 return; 10675 } 10676 } 10677 10678 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10679 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10680 } 10681 10682 /** 10683 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10684 * @phba: pointer to lpfc hba data structure. 10685 * @vport: pointer to a virtual N_Port data structure. 10686 * 10687 * This routine issues a Port Login (PLOGI) to the Name Server with 10688 * State Change Request (SCR) for a @vport. This routine will create an 10689 * ndlp for the Name Server associated to the @vport if such node does 10690 * not already exist. The PLOGI to Name Server is issued by invoking the 10691 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10692 * (FDMI) is configured to the @vport, a FDMI node will be created and 10693 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10694 **/ 10695 void 10696 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10697 { 10698 struct lpfc_nodelist *ndlp; 10699 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10700 10701 /* 10702 * If lpfc_delay_discovery parameter is set and the clean address 10703 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10704 * discovery. 10705 */ 10706 spin_lock_irq(shost->host_lock); 10707 if (vport->fc_flag & FC_DISC_DELAYED) { 10708 spin_unlock_irq(shost->host_lock); 10709 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10710 "3334 Delay fc port discovery for %d secs\n", 10711 phba->fc_ratov); 10712 mod_timer(&vport->delayed_disc_tmo, 10713 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10714 return; 10715 } 10716 spin_unlock_irq(shost->host_lock); 10717 10718 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10719 if (!ndlp) { 10720 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10721 if (!ndlp) { 10722 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10723 lpfc_disc_start(vport); 10724 return; 10725 } 10726 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10727 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10728 "0251 NameServer login: no memory\n"); 10729 return; 10730 } 10731 } 10732 10733 ndlp->nlp_type |= NLP_FABRIC; 10734 10735 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10736 10737 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10738 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10739 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10740 "0252 Cannot issue NameServer login\n"); 10741 return; 10742 } 10743 10744 if ((phba->cfg_enable_SmartSAN || 10745 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10746 (vport->load_flag & FC_ALLOW_FDMI)) 10747 lpfc_start_fdmi(vport); 10748 } 10749 10750 /** 10751 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10752 * @phba: pointer to lpfc hba data structure. 10753 * @pmb: pointer to the driver internal queue element for mailbox command. 10754 * 10755 * This routine is the completion callback function to register new vport 10756 * mailbox command. If the new vport mailbox command completes successfully, 10757 * the fabric registration login shall be performed on physical port (the 10758 * new vport created is actually a physical port, with VPI 0) or the port 10759 * login to Name Server for State Change Request (SCR) will be performed 10760 * on virtual port (real virtual port, with VPI greater than 0). 10761 **/ 10762 static void 10763 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10764 { 10765 struct lpfc_vport *vport = pmb->vport; 10766 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10767 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10768 MAILBOX_t *mb = &pmb->u.mb; 10769 int rc; 10770 10771 spin_lock_irq(shost->host_lock); 10772 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10773 spin_unlock_irq(shost->host_lock); 10774 10775 if (mb->mbxStatus) { 10776 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10777 "0915 Register VPI failed : Status: x%x" 10778 " upd bit: x%x \n", mb->mbxStatus, 10779 mb->un.varRegVpi.upd); 10780 if (phba->sli_rev == LPFC_SLI_REV4 && 10781 mb->un.varRegVpi.upd) 10782 goto mbox_err_exit ; 10783 10784 switch (mb->mbxStatus) { 10785 case 0x11: /* unsupported feature */ 10786 case 0x9603: /* max_vpi exceeded */ 10787 case 0x9602: /* Link event since CLEAR_LA */ 10788 /* giving up on vport registration */ 10789 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10790 spin_lock_irq(shost->host_lock); 10791 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10792 spin_unlock_irq(shost->host_lock); 10793 lpfc_can_disctmo(vport); 10794 break; 10795 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10796 case 0x20: 10797 spin_lock_irq(shost->host_lock); 10798 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10799 spin_unlock_irq(shost->host_lock); 10800 lpfc_init_vpi(phba, pmb, vport->vpi); 10801 pmb->vport = vport; 10802 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10803 rc = lpfc_sli_issue_mbox(phba, pmb, 10804 MBX_NOWAIT); 10805 if (rc == MBX_NOT_FINISHED) { 10806 lpfc_printf_vlog(vport, KERN_ERR, 10807 LOG_TRACE_EVENT, 10808 "2732 Failed to issue INIT_VPI" 10809 " mailbox command\n"); 10810 } else { 10811 lpfc_nlp_put(ndlp); 10812 return; 10813 } 10814 fallthrough; 10815 default: 10816 /* Try to recover from this error */ 10817 if (phba->sli_rev == LPFC_SLI_REV4) 10818 lpfc_sli4_unreg_all_rpis(vport); 10819 lpfc_mbx_unreg_vpi(vport); 10820 spin_lock_irq(shost->host_lock); 10821 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10822 spin_unlock_irq(shost->host_lock); 10823 if (mb->mbxStatus == MBX_NOT_FINISHED) 10824 break; 10825 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10826 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10827 if (phba->sli_rev == LPFC_SLI_REV4) 10828 lpfc_issue_init_vfi(vport); 10829 else 10830 lpfc_initial_flogi(vport); 10831 } else { 10832 lpfc_initial_fdisc(vport); 10833 } 10834 break; 10835 } 10836 } else { 10837 spin_lock_irq(shost->host_lock); 10838 vport->vpi_state |= LPFC_VPI_REGISTERED; 10839 spin_unlock_irq(shost->host_lock); 10840 if (vport == phba->pport) { 10841 if (phba->sli_rev < LPFC_SLI_REV4) 10842 lpfc_issue_fabric_reglogin(vport); 10843 else { 10844 /* 10845 * If the physical port is instantiated using 10846 * FDISC, do not start vport discovery. 10847 */ 10848 if (vport->port_state != LPFC_FDISC) 10849 lpfc_start_fdiscs(phba); 10850 lpfc_do_scr_ns_plogi(phba, vport); 10851 } 10852 } else { 10853 lpfc_do_scr_ns_plogi(phba, vport); 10854 } 10855 } 10856 mbox_err_exit: 10857 /* Now, we decrement the ndlp reference count held for this 10858 * callback function 10859 */ 10860 lpfc_nlp_put(ndlp); 10861 10862 mempool_free(pmb, phba->mbox_mem_pool); 10863 return; 10864 } 10865 10866 /** 10867 * lpfc_register_new_vport - Register a new vport with a HBA 10868 * @phba: pointer to lpfc hba data structure. 10869 * @vport: pointer to a host virtual N_Port data structure. 10870 * @ndlp: pointer to a node-list data structure. 10871 * 10872 * This routine registers the @vport as a new virtual port with a HBA. 10873 * It is done through a registering vpi mailbox command. 10874 **/ 10875 void 10876 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10877 struct lpfc_nodelist *ndlp) 10878 { 10879 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10880 LPFC_MBOXQ_t *mbox; 10881 10882 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10883 if (mbox) { 10884 lpfc_reg_vpi(vport, mbox); 10885 mbox->vport = vport; 10886 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10887 if (!mbox->ctx_ndlp) { 10888 mempool_free(mbox, phba->mbox_mem_pool); 10889 goto mbox_err_exit; 10890 } 10891 10892 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10893 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10894 == MBX_NOT_FINISHED) { 10895 /* mailbox command not success, decrement ndlp 10896 * reference count for this command 10897 */ 10898 lpfc_nlp_put(ndlp); 10899 mempool_free(mbox, phba->mbox_mem_pool); 10900 10901 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10902 "0253 Register VPI: Can't send mbox\n"); 10903 goto mbox_err_exit; 10904 } 10905 } else { 10906 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10907 "0254 Register VPI: no memory\n"); 10908 goto mbox_err_exit; 10909 } 10910 return; 10911 10912 mbox_err_exit: 10913 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10914 spin_lock_irq(shost->host_lock); 10915 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10916 spin_unlock_irq(shost->host_lock); 10917 return; 10918 } 10919 10920 /** 10921 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10922 * @phba: pointer to lpfc hba data structure. 10923 * 10924 * This routine cancels the retry delay timers to all the vports. 10925 **/ 10926 void 10927 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10928 { 10929 struct lpfc_vport **vports; 10930 struct lpfc_nodelist *ndlp; 10931 uint32_t link_state; 10932 int i; 10933 10934 /* Treat this failure as linkdown for all vports */ 10935 link_state = phba->link_state; 10936 lpfc_linkdown(phba); 10937 phba->link_state = link_state; 10938 10939 vports = lpfc_create_vport_work_array(phba); 10940 10941 if (vports) { 10942 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10943 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10944 if (ndlp) 10945 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10946 lpfc_els_flush_cmd(vports[i]); 10947 } 10948 lpfc_destroy_vport_work_array(phba, vports); 10949 } 10950 } 10951 10952 /** 10953 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10954 * @phba: pointer to lpfc hba data structure. 10955 * 10956 * This routine abort all pending discovery commands and 10957 * start a timer to retry FLOGI for the physical port 10958 * discovery. 10959 **/ 10960 void 10961 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 10962 { 10963 struct lpfc_nodelist *ndlp; 10964 10965 /* Cancel the all vports retry delay retry timers */ 10966 lpfc_cancel_all_vport_retry_delay_timer(phba); 10967 10968 /* If fabric require FLOGI, then re-instantiate physical login */ 10969 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 10970 if (!ndlp) 10971 return; 10972 10973 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 10974 spin_lock_irq(&ndlp->lock); 10975 ndlp->nlp_flag |= NLP_DELAY_TMO; 10976 spin_unlock_irq(&ndlp->lock); 10977 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 10978 phba->pport->port_state = LPFC_FLOGI; 10979 return; 10980 } 10981 10982 /** 10983 * lpfc_fabric_login_reqd - Check if FLOGI required. 10984 * @phba: pointer to lpfc hba data structure. 10985 * @cmdiocb: pointer to FDISC command iocb. 10986 * @rspiocb: pointer to FDISC response iocb. 10987 * 10988 * This routine checks if a FLOGI is reguired for FDISC 10989 * to succeed. 10990 **/ 10991 static int 10992 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 10993 struct lpfc_iocbq *cmdiocb, 10994 struct lpfc_iocbq *rspiocb) 10995 { 10996 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 10997 u32 ulp_word4 = get_job_word4(phba, rspiocb); 10998 10999 if (ulp_status != IOSTAT_FABRIC_RJT || 11000 ulp_word4 != RJT_LOGIN_REQUIRED) 11001 return 0; 11002 else 11003 return 1; 11004 } 11005 11006 /** 11007 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11008 * @phba: pointer to lpfc hba data structure. 11009 * @cmdiocb: pointer to lpfc command iocb data structure. 11010 * @rspiocb: pointer to lpfc response iocb data structure. 11011 * 11012 * This routine is the completion callback function to a Fabric Discover 11013 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11014 * single threaded, each FDISC completion callback function will reset 11015 * the discovery timer for all vports such that the timers will not get 11016 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11017 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11018 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11019 * assigned to the vport has been changed with the completion of the FDISC 11020 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11021 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11022 * routine is invoked to register new vport with the HBA. Otherwise, the 11023 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11024 * Server for State Change Request (SCR). 11025 **/ 11026 static void 11027 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11028 struct lpfc_iocbq *rspiocb) 11029 { 11030 struct lpfc_vport *vport = cmdiocb->vport; 11031 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11032 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11033 struct lpfc_nodelist *np; 11034 struct lpfc_nodelist *next_np; 11035 struct lpfc_iocbq *piocb; 11036 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11037 struct serv_parm *sp; 11038 uint8_t fabric_param_changed; 11039 u32 ulp_status, ulp_word4; 11040 11041 ulp_status = get_job_ulpstatus(phba, rspiocb); 11042 ulp_word4 = get_job_word4(phba, rspiocb); 11043 11044 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11045 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11046 ulp_status, ulp_word4, 11047 vport->fc_prevDID); 11048 /* Since all FDISCs are being single threaded, we 11049 * must reset the discovery timer for ALL vports 11050 * waiting to send FDISC when one completes. 11051 */ 11052 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11053 lpfc_set_disctmo(piocb->vport); 11054 } 11055 11056 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11057 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11058 ulp_status, ulp_word4, vport->fc_prevDID); 11059 11060 if (ulp_status) { 11061 11062 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11063 lpfc_retry_pport_discovery(phba); 11064 goto out; 11065 } 11066 11067 /* Check for retry */ 11068 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11069 goto out; 11070 /* FDISC failed */ 11071 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11072 "0126 FDISC failed. (x%x/x%x)\n", 11073 ulp_status, ulp_word4); 11074 goto fdisc_failed; 11075 } 11076 11077 lpfc_check_nlp_post_devloss(vport, ndlp); 11078 11079 spin_lock_irq(shost->host_lock); 11080 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11081 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11082 vport->fc_flag |= FC_FABRIC; 11083 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11084 vport->fc_flag |= FC_PUBLIC_LOOP; 11085 spin_unlock_irq(shost->host_lock); 11086 11087 vport->fc_myDID = ulp_word4 & Mask_DID; 11088 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11089 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11090 if (!prsp) 11091 goto out; 11092 sp = prsp->virt + sizeof(uint32_t); 11093 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11094 memcpy(&vport->fabric_portname, &sp->portName, 11095 sizeof(struct lpfc_name)); 11096 memcpy(&vport->fabric_nodename, &sp->nodeName, 11097 sizeof(struct lpfc_name)); 11098 if (fabric_param_changed && 11099 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11100 /* If our NportID changed, we need to ensure all 11101 * remaining NPORTs get unreg_login'ed so we can 11102 * issue unreg_vpi. 11103 */ 11104 list_for_each_entry_safe(np, next_np, 11105 &vport->fc_nodes, nlp_listp) { 11106 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11107 !(np->nlp_flag & NLP_NPR_ADISC)) 11108 continue; 11109 spin_lock_irq(&ndlp->lock); 11110 np->nlp_flag &= ~NLP_NPR_ADISC; 11111 spin_unlock_irq(&ndlp->lock); 11112 lpfc_unreg_rpi(vport, np); 11113 } 11114 lpfc_cleanup_pending_mbox(vport); 11115 11116 if (phba->sli_rev == LPFC_SLI_REV4) 11117 lpfc_sli4_unreg_all_rpis(vport); 11118 11119 lpfc_mbx_unreg_vpi(vport); 11120 spin_lock_irq(shost->host_lock); 11121 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11122 if (phba->sli_rev == LPFC_SLI_REV4) 11123 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11124 else 11125 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11126 spin_unlock_irq(shost->host_lock); 11127 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11128 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11129 /* 11130 * Driver needs to re-reg VPI in order for f/w 11131 * to update the MAC address. 11132 */ 11133 lpfc_register_new_vport(phba, vport, ndlp); 11134 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11135 goto out; 11136 } 11137 11138 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11139 lpfc_issue_init_vpi(vport); 11140 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11141 lpfc_register_new_vport(phba, vport, ndlp); 11142 else 11143 lpfc_do_scr_ns_plogi(phba, vport); 11144 11145 /* The FDISC completed successfully. Move the fabric ndlp to 11146 * UNMAPPED state and register with the transport. 11147 */ 11148 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11149 goto out; 11150 11151 fdisc_failed: 11152 if (vport->fc_vport && 11153 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11154 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11155 /* Cancel discovery timer */ 11156 lpfc_can_disctmo(vport); 11157 out: 11158 lpfc_els_free_iocb(phba, cmdiocb); 11159 lpfc_nlp_put(ndlp); 11160 } 11161 11162 /** 11163 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11164 * @vport: pointer to a virtual N_Port data structure. 11165 * @ndlp: pointer to a node-list data structure. 11166 * @retry: number of retries to the command IOCB. 11167 * 11168 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11169 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11170 * routine to issue the IOCB, which makes sure only one outstanding fabric 11171 * IOCB will be sent off HBA at any given time. 11172 * 11173 * Note that the ndlp reference count will be incremented by 1 for holding the 11174 * ndlp and the reference to ndlp will be stored into the ndlp field of 11175 * the IOCB for the completion callback function to the FDISC ELS command. 11176 * 11177 * Return code 11178 * 0 - Successfully issued fdisc iocb command 11179 * 1 - Failed to issue fdisc iocb command 11180 **/ 11181 static int 11182 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11183 uint8_t retry) 11184 { 11185 struct lpfc_hba *phba = vport->phba; 11186 IOCB_t *icmd; 11187 union lpfc_wqe128 *wqe = NULL; 11188 struct lpfc_iocbq *elsiocb; 11189 struct serv_parm *sp; 11190 uint8_t *pcmd; 11191 uint16_t cmdsize; 11192 int did = ndlp->nlp_DID; 11193 int rc; 11194 11195 vport->port_state = LPFC_FDISC; 11196 vport->fc_myDID = 0; 11197 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11198 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11199 ELS_CMD_FDISC); 11200 if (!elsiocb) { 11201 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11202 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11203 "0255 Issue FDISC: no IOCB\n"); 11204 return 1; 11205 } 11206 11207 if (phba->sli_rev == LPFC_SLI_REV4) { 11208 wqe = &elsiocb->wqe; 11209 bf_set(els_req64_sid, &wqe->els_req, 0); 11210 bf_set(els_req64_sp, &wqe->els_req, 1); 11211 } else { 11212 icmd = &elsiocb->iocb; 11213 icmd->un.elsreq64.myID = 0; 11214 icmd->un.elsreq64.fl = 1; 11215 icmd->ulpCt_h = 1; 11216 icmd->ulpCt_l = 0; 11217 } 11218 11219 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11220 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11221 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11222 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11223 sp = (struct serv_parm *) pcmd; 11224 /* Setup CSPs accordingly for Fabric */ 11225 sp->cmn.e_d_tov = 0; 11226 sp->cmn.w2.r_a_tov = 0; 11227 sp->cmn.virtual_fabric_support = 0; 11228 sp->cls1.classValid = 0; 11229 sp->cls2.seqDelivery = 1; 11230 sp->cls3.seqDelivery = 1; 11231 11232 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11233 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11234 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11235 pcmd += sizeof(uint32_t); /* Port Name */ 11236 memcpy(pcmd, &vport->fc_portname, 8); 11237 pcmd += sizeof(uint32_t); /* Node Name */ 11238 pcmd += sizeof(uint32_t); /* Node Name */ 11239 memcpy(pcmd, &vport->fc_nodename, 8); 11240 sp->cmn.valid_vendor_ver_level = 0; 11241 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11242 lpfc_set_disctmo(vport); 11243 11244 phba->fc_stat.elsXmitFDISC++; 11245 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11246 11247 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11248 "Issue FDISC: did:x%x", 11249 did, 0, 0); 11250 11251 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11252 if (!elsiocb->ndlp) 11253 goto err_out; 11254 11255 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11256 if (rc == IOCB_ERROR) { 11257 lpfc_nlp_put(ndlp); 11258 goto err_out; 11259 } 11260 11261 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11262 return 0; 11263 11264 err_out: 11265 lpfc_els_free_iocb(phba, elsiocb); 11266 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11267 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11268 "0256 Issue FDISC: Cannot send IOCB\n"); 11269 return 1; 11270 } 11271 11272 /** 11273 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11274 * @phba: pointer to lpfc hba data structure. 11275 * @cmdiocb: pointer to lpfc command iocb data structure. 11276 * @rspiocb: pointer to lpfc response iocb data structure. 11277 * 11278 * This routine is the completion callback function to the issuing of a LOGO 11279 * ELS command off a vport. It frees the command IOCB and then decrement the 11280 * reference count held on ndlp for this completion function, indicating that 11281 * the reference to the ndlp is no long needed. Note that the 11282 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11283 * callback function and an additional explicit ndlp reference decrementation 11284 * will trigger the actual release of the ndlp. 11285 **/ 11286 static void 11287 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11288 struct lpfc_iocbq *rspiocb) 11289 { 11290 struct lpfc_vport *vport = cmdiocb->vport; 11291 IOCB_t *irsp; 11292 struct lpfc_nodelist *ndlp; 11293 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11294 u32 ulp_status, ulp_word4, did, tmo; 11295 11296 ndlp = cmdiocb->ndlp; 11297 11298 ulp_status = get_job_ulpstatus(phba, rspiocb); 11299 ulp_word4 = get_job_word4(phba, rspiocb); 11300 11301 if (phba->sli_rev == LPFC_SLI_REV4) { 11302 did = get_job_els_rsp64_did(phba, cmdiocb); 11303 tmo = get_wqe_tmo(cmdiocb); 11304 } else { 11305 irsp = &rspiocb->iocb; 11306 did = get_job_els_rsp64_did(phba, rspiocb); 11307 tmo = irsp->ulpTimeout; 11308 } 11309 11310 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11311 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11312 ulp_status, ulp_word4, did); 11313 11314 /* NPIV LOGO completes to NPort <nlp_DID> */ 11315 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11316 "2928 NPIV LOGO completes to NPort x%x " 11317 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11318 ndlp->nlp_DID, ulp_status, ulp_word4, 11319 tmo, vport->num_disc_nodes, 11320 kref_read(&ndlp->kref), ndlp->nlp_flag, 11321 ndlp->fc4_xpt_flags); 11322 11323 if (ulp_status == IOSTAT_SUCCESS) { 11324 spin_lock_irq(shost->host_lock); 11325 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11326 vport->fc_flag &= ~FC_FABRIC; 11327 spin_unlock_irq(shost->host_lock); 11328 lpfc_can_disctmo(vport); 11329 } 11330 11331 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11332 /* Wake up lpfc_vport_delete if waiting...*/ 11333 if (ndlp->logo_waitq) 11334 wake_up(ndlp->logo_waitq); 11335 spin_lock_irq(&ndlp->lock); 11336 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11337 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11338 spin_unlock_irq(&ndlp->lock); 11339 } 11340 11341 /* Safe to release resources now. */ 11342 lpfc_els_free_iocb(phba, cmdiocb); 11343 lpfc_nlp_put(ndlp); 11344 } 11345 11346 /** 11347 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11348 * @vport: pointer to a virtual N_Port data structure. 11349 * @ndlp: pointer to a node-list data structure. 11350 * 11351 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11352 * 11353 * Note that the ndlp reference count will be incremented by 1 for holding the 11354 * ndlp and the reference to ndlp will be stored into the ndlp field of 11355 * the IOCB for the completion callback function to the LOGO ELS command. 11356 * 11357 * Return codes 11358 * 0 - Successfully issued logo off the @vport 11359 * 1 - Failed to issue logo off the @vport 11360 **/ 11361 int 11362 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11363 { 11364 int rc = 0; 11365 struct lpfc_hba *phba = vport->phba; 11366 struct lpfc_iocbq *elsiocb; 11367 uint8_t *pcmd; 11368 uint16_t cmdsize; 11369 11370 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11371 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11372 ELS_CMD_LOGO); 11373 if (!elsiocb) 11374 return 1; 11375 11376 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11377 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11378 pcmd += sizeof(uint32_t); 11379 11380 /* Fill in LOGO payload */ 11381 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11382 pcmd += sizeof(uint32_t); 11383 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11384 11385 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11386 "Issue LOGO npiv did:x%x flg:x%x", 11387 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11388 11389 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11390 spin_lock_irq(&ndlp->lock); 11391 ndlp->nlp_flag |= NLP_LOGO_SND; 11392 spin_unlock_irq(&ndlp->lock); 11393 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11394 if (!elsiocb->ndlp) { 11395 lpfc_els_free_iocb(phba, elsiocb); 11396 goto err; 11397 } 11398 11399 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11400 if (rc == IOCB_ERROR) { 11401 lpfc_els_free_iocb(phba, elsiocb); 11402 lpfc_nlp_put(ndlp); 11403 goto err; 11404 } 11405 return 0; 11406 11407 err: 11408 spin_lock_irq(&ndlp->lock); 11409 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11410 spin_unlock_irq(&ndlp->lock); 11411 return 1; 11412 } 11413 11414 /** 11415 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11416 * @t: timer context used to obtain the lpfc hba. 11417 * 11418 * This routine is invoked by the fabric iocb block timer after 11419 * timeout. It posts the fabric iocb block timeout event by setting the 11420 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11421 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11422 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11423 * posted event WORKER_FABRIC_BLOCK_TMO. 11424 **/ 11425 void 11426 lpfc_fabric_block_timeout(struct timer_list *t) 11427 { 11428 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11429 unsigned long iflags; 11430 uint32_t tmo_posted; 11431 11432 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11433 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11434 if (!tmo_posted) 11435 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11436 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11437 11438 if (!tmo_posted) 11439 lpfc_worker_wake_up(phba); 11440 return; 11441 } 11442 11443 /** 11444 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11445 * @phba: pointer to lpfc hba data structure. 11446 * 11447 * This routine issues one fabric iocb from the driver internal list to 11448 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11449 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11450 * remove one pending fabric iocb from the driver internal list and invokes 11451 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11452 **/ 11453 static void 11454 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11455 { 11456 struct lpfc_iocbq *iocb; 11457 unsigned long iflags; 11458 int ret; 11459 11460 repeat: 11461 iocb = NULL; 11462 spin_lock_irqsave(&phba->hbalock, iflags); 11463 /* Post any pending iocb to the SLI layer */ 11464 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11465 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11466 list); 11467 if (iocb) 11468 /* Increment fabric iocb count to hold the position */ 11469 atomic_inc(&phba->fabric_iocb_count); 11470 } 11471 spin_unlock_irqrestore(&phba->hbalock, iflags); 11472 if (iocb) { 11473 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11474 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11475 iocb->cmd_flag |= LPFC_IO_FABRIC; 11476 11477 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11478 "Fabric sched1: ste:x%x", 11479 iocb->vport->port_state, 0, 0); 11480 11481 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11482 11483 if (ret == IOCB_ERROR) { 11484 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11485 iocb->fabric_cmd_cmpl = NULL; 11486 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11487 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11488 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11489 iocb->cmd_cmpl(phba, iocb, iocb); 11490 11491 atomic_dec(&phba->fabric_iocb_count); 11492 goto repeat; 11493 } 11494 } 11495 } 11496 11497 /** 11498 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11499 * @phba: pointer to lpfc hba data structure. 11500 * 11501 * This routine unblocks the issuing fabric iocb command. The function 11502 * will clear the fabric iocb block bit and then invoke the routine 11503 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11504 * from the driver internal fabric iocb list. 11505 **/ 11506 void 11507 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11508 { 11509 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11510 11511 lpfc_resume_fabric_iocbs(phba); 11512 return; 11513 } 11514 11515 /** 11516 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11517 * @phba: pointer to lpfc hba data structure. 11518 * 11519 * This routine blocks the issuing fabric iocb for a specified amount of 11520 * time (currently 100 ms). This is done by set the fabric iocb block bit 11521 * and set up a timeout timer for 100ms. When the block bit is set, no more 11522 * fabric iocb will be issued out of the HBA. 11523 **/ 11524 static void 11525 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11526 { 11527 int blocked; 11528 11529 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11530 /* Start a timer to unblock fabric iocbs after 100ms */ 11531 if (!blocked) 11532 mod_timer(&phba->fabric_block_timer, 11533 jiffies + msecs_to_jiffies(100)); 11534 11535 return; 11536 } 11537 11538 /** 11539 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11540 * @phba: pointer to lpfc hba data structure. 11541 * @cmdiocb: pointer to lpfc command iocb data structure. 11542 * @rspiocb: pointer to lpfc response iocb data structure. 11543 * 11544 * This routine is the callback function that is put to the fabric iocb's 11545 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11546 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11547 * function first restores and invokes the original iocb's callback function 11548 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11549 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11550 **/ 11551 static void 11552 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11553 struct lpfc_iocbq *rspiocb) 11554 { 11555 struct ls_rjt stat; 11556 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11557 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11558 11559 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11560 11561 switch (ulp_status) { 11562 case IOSTAT_NPORT_RJT: 11563 case IOSTAT_FABRIC_RJT: 11564 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11565 lpfc_block_fabric_iocbs(phba); 11566 break; 11567 11568 case IOSTAT_NPORT_BSY: 11569 case IOSTAT_FABRIC_BSY: 11570 lpfc_block_fabric_iocbs(phba); 11571 break; 11572 11573 case IOSTAT_LS_RJT: 11574 stat.un.ls_rjt_error_be = 11575 cpu_to_be32(ulp_word4); 11576 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11577 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11578 lpfc_block_fabric_iocbs(phba); 11579 break; 11580 } 11581 11582 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11583 11584 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11585 cmdiocb->fabric_cmd_cmpl = NULL; 11586 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11587 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11588 11589 atomic_dec(&phba->fabric_iocb_count); 11590 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11591 /* Post any pending iocbs to HBA */ 11592 lpfc_resume_fabric_iocbs(phba); 11593 } 11594 } 11595 11596 /** 11597 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11598 * @phba: pointer to lpfc hba data structure. 11599 * @iocb: pointer to lpfc command iocb data structure. 11600 * 11601 * This routine is used as the top-level API for issuing a fabric iocb command 11602 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11603 * function makes sure that only one fabric bound iocb will be outstanding at 11604 * any given time. As such, this function will first check to see whether there 11605 * is already an outstanding fabric iocb on the wire. If so, it will put the 11606 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11607 * issued later. Otherwise, it will issue the iocb on the wire and update the 11608 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11609 * 11610 * Note, this implementation has a potential sending out fabric IOCBs out of 11611 * order. The problem is caused by the construction of the "ready" boolen does 11612 * not include the condition that the internal fabric IOCB list is empty. As 11613 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11614 * ahead of the fabric IOCBs in the internal list. 11615 * 11616 * Return code 11617 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11618 * IOCB_ERROR - failed to issue fabric iocb 11619 **/ 11620 static int 11621 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11622 { 11623 unsigned long iflags; 11624 int ready; 11625 int ret; 11626 11627 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11628 11629 spin_lock_irqsave(&phba->hbalock, iflags); 11630 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11631 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11632 11633 if (ready) 11634 /* Increment fabric iocb count to hold the position */ 11635 atomic_inc(&phba->fabric_iocb_count); 11636 spin_unlock_irqrestore(&phba->hbalock, iflags); 11637 if (ready) { 11638 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11639 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11640 iocb->cmd_flag |= LPFC_IO_FABRIC; 11641 11642 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11643 "Fabric sched2: ste:x%x", 11644 iocb->vport->port_state, 0, 0); 11645 11646 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11647 11648 if (ret == IOCB_ERROR) { 11649 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11650 iocb->fabric_cmd_cmpl = NULL; 11651 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11652 atomic_dec(&phba->fabric_iocb_count); 11653 } 11654 } else { 11655 spin_lock_irqsave(&phba->hbalock, iflags); 11656 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11657 spin_unlock_irqrestore(&phba->hbalock, iflags); 11658 ret = IOCB_SUCCESS; 11659 } 11660 return ret; 11661 } 11662 11663 /** 11664 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11665 * @vport: pointer to a virtual N_Port data structure. 11666 * 11667 * This routine aborts all the IOCBs associated with a @vport from the 11668 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11669 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11670 * list, removes each IOCB associated with the @vport off the list, set the 11671 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11672 * associated with the IOCB. 11673 **/ 11674 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11675 { 11676 LIST_HEAD(completions); 11677 struct lpfc_hba *phba = vport->phba; 11678 struct lpfc_iocbq *tmp_iocb, *piocb; 11679 11680 spin_lock_irq(&phba->hbalock); 11681 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11682 list) { 11683 11684 if (piocb->vport != vport) 11685 continue; 11686 11687 list_move_tail(&piocb->list, &completions); 11688 } 11689 spin_unlock_irq(&phba->hbalock); 11690 11691 /* Cancel all the IOCBs from the completions list */ 11692 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11693 IOERR_SLI_ABORTED); 11694 } 11695 11696 /** 11697 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11698 * @ndlp: pointer to a node-list data structure. 11699 * 11700 * This routine aborts all the IOCBs associated with an @ndlp from the 11701 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11702 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11703 * list, removes each IOCB associated with the @ndlp off the list, set the 11704 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11705 * associated with the IOCB. 11706 **/ 11707 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11708 { 11709 LIST_HEAD(completions); 11710 struct lpfc_hba *phba = ndlp->phba; 11711 struct lpfc_iocbq *tmp_iocb, *piocb; 11712 struct lpfc_sli_ring *pring; 11713 11714 pring = lpfc_phba_elsring(phba); 11715 11716 if (unlikely(!pring)) 11717 return; 11718 11719 spin_lock_irq(&phba->hbalock); 11720 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11721 list) { 11722 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11723 11724 list_move_tail(&piocb->list, &completions); 11725 } 11726 } 11727 spin_unlock_irq(&phba->hbalock); 11728 11729 /* Cancel all the IOCBs from the completions list */ 11730 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11731 IOERR_SLI_ABORTED); 11732 } 11733 11734 /** 11735 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11736 * @phba: pointer to lpfc hba data structure. 11737 * 11738 * This routine aborts all the IOCBs currently on the driver internal 11739 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11740 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11741 * list, removes IOCBs off the list, set the status field to 11742 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11743 * the IOCB. 11744 **/ 11745 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11746 { 11747 LIST_HEAD(completions); 11748 11749 spin_lock_irq(&phba->hbalock); 11750 list_splice_init(&phba->fabric_iocb_list, &completions); 11751 spin_unlock_irq(&phba->hbalock); 11752 11753 /* Cancel all the IOCBs from the completions list */ 11754 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11755 IOERR_SLI_ABORTED); 11756 } 11757 11758 /** 11759 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11760 * @vport: pointer to lpfc vport data structure. 11761 * 11762 * This routine is invoked by the vport cleanup for deletions and the cleanup 11763 * for an ndlp on removal. 11764 **/ 11765 void 11766 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11767 { 11768 struct lpfc_hba *phba = vport->phba; 11769 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11770 struct lpfc_nodelist *ndlp = NULL; 11771 unsigned long iflag = 0; 11772 11773 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11774 list_for_each_entry_safe(sglq_entry, sglq_next, 11775 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11776 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11777 lpfc_nlp_put(sglq_entry->ndlp); 11778 ndlp = sglq_entry->ndlp; 11779 sglq_entry->ndlp = NULL; 11780 11781 /* If the xri on the abts_els_sgl list is for the Fport 11782 * node and the vport is unloading, the xri aborted wcqe 11783 * likely isn't coming back. Just release the sgl. 11784 */ 11785 if ((vport->load_flag & FC_UNLOADING) && 11786 ndlp->nlp_DID == Fabric_DID) { 11787 list_del(&sglq_entry->list); 11788 sglq_entry->state = SGL_FREED; 11789 list_add_tail(&sglq_entry->list, 11790 &phba->sli4_hba.lpfc_els_sgl_list); 11791 } 11792 } 11793 } 11794 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11795 return; 11796 } 11797 11798 /** 11799 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11800 * @phba: pointer to lpfc hba data structure. 11801 * @axri: pointer to the els xri abort wcqe structure. 11802 * 11803 * This routine is invoked by the worker thread to process a SLI4 slow-path 11804 * ELS aborted xri. 11805 **/ 11806 void 11807 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11808 struct sli4_wcqe_xri_aborted *axri) 11809 { 11810 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11811 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11812 uint16_t lxri = 0; 11813 11814 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11815 unsigned long iflag = 0; 11816 struct lpfc_nodelist *ndlp; 11817 struct lpfc_sli_ring *pring; 11818 11819 pring = lpfc_phba_elsring(phba); 11820 11821 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11822 list_for_each_entry_safe(sglq_entry, sglq_next, 11823 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11824 if (sglq_entry->sli4_xritag == xri) { 11825 list_del(&sglq_entry->list); 11826 ndlp = sglq_entry->ndlp; 11827 sglq_entry->ndlp = NULL; 11828 list_add_tail(&sglq_entry->list, 11829 &phba->sli4_hba.lpfc_els_sgl_list); 11830 sglq_entry->state = SGL_FREED; 11831 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11832 iflag); 11833 11834 if (ndlp) { 11835 lpfc_set_rrq_active(phba, ndlp, 11836 sglq_entry->sli4_lxritag, 11837 rxid, 1); 11838 lpfc_nlp_put(ndlp); 11839 } 11840 11841 /* Check if TXQ queue needs to be serviced */ 11842 if (pring && !list_empty(&pring->txq)) 11843 lpfc_worker_wake_up(phba); 11844 return; 11845 } 11846 } 11847 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11848 lxri = lpfc_sli4_xri_inrange(phba, xri); 11849 if (lxri == NO_XRI) 11850 return; 11851 11852 spin_lock_irqsave(&phba->hbalock, iflag); 11853 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11854 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11855 spin_unlock_irqrestore(&phba->hbalock, iflag); 11856 return; 11857 } 11858 sglq_entry->state = SGL_XRI_ABORTED; 11859 spin_unlock_irqrestore(&phba->hbalock, iflag); 11860 return; 11861 } 11862 11863 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11864 * @vport: pointer to virtual port object. 11865 * @ndlp: nodelist pointer for the impacted node. 11866 * 11867 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11868 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11869 * the driver is required to send a LOGO to the remote node before it 11870 * attempts to recover its login to the remote node. 11871 */ 11872 void 11873 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11874 struct lpfc_nodelist *ndlp) 11875 { 11876 struct Scsi_Host *shost; 11877 struct lpfc_hba *phba; 11878 unsigned long flags = 0; 11879 11880 shost = lpfc_shost_from_vport(vport); 11881 phba = vport->phba; 11882 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11883 lpfc_printf_log(phba, KERN_INFO, 11884 LOG_SLI, "3093 No rport recovery needed. " 11885 "rport in state 0x%x\n", ndlp->nlp_state); 11886 return; 11887 } 11888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11889 "3094 Start rport recovery on shost id 0x%x " 11890 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11891 "flags 0x%x\n", 11892 shost->host_no, ndlp->nlp_DID, 11893 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11894 ndlp->nlp_flag); 11895 /* 11896 * The rport is not responding. Remove the FCP-2 flag to prevent 11897 * an ADISC in the follow-up recovery code. 11898 */ 11899 spin_lock_irqsave(&ndlp->lock, flags); 11900 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11901 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11902 spin_unlock_irqrestore(&ndlp->lock, flags); 11903 lpfc_unreg_rpi(vport, ndlp); 11904 } 11905 11906 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11907 { 11908 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11909 } 11910 11911 static void 11912 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11913 { 11914 u32 i; 11915 11916 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11917 return; 11918 11919 for (i = min; i <= max; i++) 11920 set_bit(i, vport->vmid_priority_range); 11921 } 11922 11923 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11924 { 11925 set_bit(ctcl_vmid, vport->vmid_priority_range); 11926 } 11927 11928 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11929 { 11930 u32 i; 11931 11932 i = find_first_bit(vport->vmid_priority_range, 11933 LPFC_VMID_MAX_PRIORITY_RANGE); 11934 11935 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11936 return 0; 11937 11938 clear_bit(i, vport->vmid_priority_range); 11939 return i; 11940 } 11941 11942 #define MAX_PRIORITY_DESC 255 11943 11944 static void 11945 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11946 struct lpfc_iocbq *rspiocb) 11947 { 11948 struct lpfc_vport *vport = cmdiocb->vport; 11949 struct priority_range_desc *desc; 11950 struct lpfc_dmabuf *prsp = NULL; 11951 struct lpfc_vmid_priority_range *vmid_range = NULL; 11952 u32 *data; 11953 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 11954 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11955 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11956 u8 *pcmd, max_desc; 11957 u32 len, i; 11958 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11959 11960 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11961 if (!prsp) 11962 goto out; 11963 11964 pcmd = prsp->virt; 11965 data = (u32 *)pcmd; 11966 if (data[0] == ELS_CMD_LS_RJT) { 11967 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11968 "3277 QFPA LS_RJT x%x x%x\n", 11969 data[0], data[1]); 11970 goto out; 11971 } 11972 if (ulp_status) { 11973 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 11974 "6529 QFPA failed with status x%x x%x\n", 11975 ulp_status, ulp_word4); 11976 goto out; 11977 } 11978 11979 if (!vport->qfpa_res) { 11980 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 11981 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 11982 GFP_KERNEL); 11983 if (!vport->qfpa_res) 11984 goto out; 11985 } 11986 11987 len = *((u32 *)(pcmd + 4)); 11988 len = be32_to_cpu(len); 11989 memcpy(vport->qfpa_res, pcmd, len + 8); 11990 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 11991 11992 desc = (struct priority_range_desc *)(pcmd + 8); 11993 vmid_range = vport->vmid_priority.vmid_range; 11994 if (!vmid_range) { 11995 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 11996 GFP_KERNEL); 11997 if (!vmid_range) { 11998 kfree(vport->qfpa_res); 11999 goto out; 12000 } 12001 vport->vmid_priority.vmid_range = vmid_range; 12002 } 12003 vport->vmid_priority.num_descriptors = len; 12004 12005 for (i = 0; i < len; i++, vmid_range++, desc++) { 12006 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12007 "6539 vmid values low=%d, high=%d, qos=%d, " 12008 "local ve id=%d\n", desc->lo_range, 12009 desc->hi_range, desc->qos_priority, 12010 desc->local_ve_id); 12011 12012 vmid_range->low = desc->lo_range << 1; 12013 if (desc->local_ve_id == QFPA_ODD_ONLY) 12014 vmid_range->low++; 12015 if (desc->qos_priority) 12016 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12017 vmid_range->qos = desc->qos_priority; 12018 12019 vmid_range->high = desc->hi_range << 1; 12020 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12021 (desc->local_ve_id == QFPA_EVEN_ODD)) 12022 vmid_range->high++; 12023 } 12024 lpfc_init_cs_ctl_bitmap(vport); 12025 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12026 lpfc_vmid_set_cs_ctl_range(vport, 12027 vport->vmid_priority.vmid_range[i].low, 12028 vport->vmid_priority.vmid_range[i].high); 12029 } 12030 12031 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12032 out: 12033 lpfc_els_free_iocb(phba, cmdiocb); 12034 lpfc_nlp_put(ndlp); 12035 } 12036 12037 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12038 { 12039 struct lpfc_hba *phba = vport->phba; 12040 struct lpfc_nodelist *ndlp; 12041 struct lpfc_iocbq *elsiocb; 12042 u8 *pcmd; 12043 int ret; 12044 12045 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12046 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12047 return -ENXIO; 12048 12049 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12050 ndlp->nlp_DID, ELS_CMD_QFPA); 12051 if (!elsiocb) 12052 return -ENOMEM; 12053 12054 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12055 12056 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12057 pcmd += 4; 12058 12059 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12060 12061 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12062 if (!elsiocb->ndlp) { 12063 lpfc_els_free_iocb(vport->phba, elsiocb); 12064 return -ENXIO; 12065 } 12066 12067 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12068 if (ret != IOCB_SUCCESS) { 12069 lpfc_els_free_iocb(phba, elsiocb); 12070 lpfc_nlp_put(ndlp); 12071 return -EIO; 12072 } 12073 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12074 return 0; 12075 } 12076 12077 int 12078 lpfc_vmid_uvem(struct lpfc_vport *vport, 12079 struct lpfc_vmid *vmid, bool instantiated) 12080 { 12081 struct lpfc_vem_id_desc *vem_id_desc; 12082 struct lpfc_nodelist *ndlp; 12083 struct lpfc_iocbq *elsiocb; 12084 struct instantiated_ve_desc *inst_desc; 12085 struct lpfc_vmid_context *vmid_context; 12086 u8 *pcmd; 12087 u32 *len; 12088 int ret = 0; 12089 12090 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12091 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12092 return -ENXIO; 12093 12094 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12095 if (!vmid_context) 12096 return -ENOMEM; 12097 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12098 ndlp, Fabric_DID, ELS_CMD_UVEM); 12099 if (!elsiocb) 12100 goto out; 12101 12102 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12103 "3427 Host vmid %s %d\n", 12104 vmid->host_vmid, instantiated); 12105 vmid_context->vmp = vmid; 12106 vmid_context->nlp = ndlp; 12107 vmid_context->instantiated = instantiated; 12108 elsiocb->vmid_tag.vmid_context = vmid_context; 12109 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12110 12111 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12112 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12113 LPFC_COMPRESS_VMID_SIZE); 12114 12115 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12116 len = (u32 *)(pcmd + 4); 12117 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12118 12119 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12120 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12121 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12122 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12123 LPFC_COMPRESS_VMID_SIZE); 12124 12125 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12126 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12127 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12128 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12129 LPFC_COMPRESS_VMID_SIZE); 12130 12131 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12132 bf_set(lpfc_instantiated_local_id, inst_desc, 12133 vmid->un.cs_ctl_vmid); 12134 if (instantiated) { 12135 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12136 } else { 12137 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12138 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12139 } 12140 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12141 12142 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12143 12144 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12145 if (!elsiocb->ndlp) { 12146 lpfc_els_free_iocb(vport->phba, elsiocb); 12147 goto out; 12148 } 12149 12150 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12151 if (ret != IOCB_SUCCESS) { 12152 lpfc_els_free_iocb(vport->phba, elsiocb); 12153 lpfc_nlp_put(ndlp); 12154 goto out; 12155 } 12156 12157 return 0; 12158 out: 12159 kfree(vmid_context); 12160 return -EIO; 12161 } 12162 12163 static void 12164 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12165 struct lpfc_iocbq *rspiocb) 12166 { 12167 struct lpfc_vport *vport = icmdiocb->vport; 12168 struct lpfc_dmabuf *prsp = NULL; 12169 struct lpfc_vmid_context *vmid_context = 12170 icmdiocb->vmid_tag.vmid_context; 12171 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12172 u8 *pcmd; 12173 u32 *data; 12174 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12175 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12176 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12177 struct lpfc_vmid *vmid; 12178 12179 vmid = vmid_context->vmp; 12180 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12181 ndlp = NULL; 12182 12183 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12184 if (!prsp) 12185 goto out; 12186 pcmd = prsp->virt; 12187 data = (u32 *)pcmd; 12188 if (data[0] == ELS_CMD_LS_RJT) { 12189 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12190 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12191 goto out; 12192 } 12193 if (ulp_status) { 12194 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12195 "4533 UVEM error status %x: %x\n", 12196 ulp_status, ulp_word4); 12197 goto out; 12198 } 12199 spin_lock(&phba->hbalock); 12200 /* Set IN USE flag */ 12201 vport->vmid_flag |= LPFC_VMID_IN_USE; 12202 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12203 spin_unlock(&phba->hbalock); 12204 12205 if (vmid_context->instantiated) { 12206 write_lock(&vport->vmid_lock); 12207 vmid->flag |= LPFC_VMID_REGISTERED; 12208 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12209 write_unlock(&vport->vmid_lock); 12210 } 12211 12212 out: 12213 kfree(vmid_context); 12214 lpfc_els_free_iocb(phba, icmdiocb); 12215 lpfc_nlp_put(ndlp); 12216 } 12217