1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 bool flogi_in_retry = false; 956 957 /* Check to see if link went down during discovery */ 958 if (lpfc_els_chk_latt(vport)) { 959 /* One additional decrement on node reference count to 960 * trigger the release of the node 961 */ 962 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 963 lpfc_nlp_put(ndlp); 964 goto out; 965 } 966 967 ulp_status = get_job_ulpstatus(phba, rspiocb); 968 ulp_word4 = get_job_word4(phba, rspiocb); 969 970 if (phba->sli_rev == LPFC_SLI_REV4) { 971 tmo = get_wqe_tmo(cmdiocb); 972 } else { 973 irsp = &rspiocb->iocb; 974 tmo = irsp->ulpTimeout; 975 } 976 977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 978 "FLOGI cmpl: status:x%x/x%x state:x%x", 979 ulp_status, ulp_word4, 980 vport->port_state); 981 982 if (ulp_status) { 983 /* 984 * In case of FIP mode, perform roundrobin FCF failover 985 * due to new FCF discovery 986 */ 987 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 988 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 989 if (phba->link_state < LPFC_LINK_UP) 990 goto stop_rr_fcf_flogi; 991 if ((phba->fcoe_cvl_eventtag_attn == 992 phba->fcoe_cvl_eventtag) && 993 (ulp_status == IOSTAT_LOCAL_REJECT) && 994 ((ulp_word4 & IOERR_PARAM_MASK) == 995 IOERR_SLI_ABORTED)) 996 goto stop_rr_fcf_flogi; 997 else 998 phba->fcoe_cvl_eventtag_attn = 999 phba->fcoe_cvl_eventtag; 1000 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1001 "2611 FLOGI failed on FCF (x%x), " 1002 "status:x%x/x%x, tmo:x%x, perform " 1003 "roundrobin FCF failover\n", 1004 phba->fcf.current_rec.fcf_indx, 1005 ulp_status, ulp_word4, tmo); 1006 lpfc_sli4_set_fcf_flogi_fail(phba, 1007 phba->fcf.current_rec.fcf_indx); 1008 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1009 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1010 if (rc) 1011 goto out; 1012 } 1013 1014 stop_rr_fcf_flogi: 1015 /* FLOGI failure */ 1016 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1017 ((ulp_word4 & IOERR_PARAM_MASK) == 1018 IOERR_LOOP_OPEN_FAILURE))) 1019 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1020 "2858 FLOGI failure Status:x%x/x%x TMO" 1021 ":x%x Data x%x x%x\n", 1022 ulp_status, ulp_word4, tmo, 1023 phba->hba_flag, phba->fcf.fcf_flag); 1024 1025 /* Check for retry */ 1026 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1027 /* Address a timing race with dev_loss. If dev_loss 1028 * is active on this FPort node, put the initial ref 1029 * count back to stop premature node release actions. 1030 */ 1031 lpfc_check_nlp_post_devloss(vport, ndlp); 1032 flogi_in_retry = true; 1033 goto out; 1034 } 1035 1036 /* The FLOGI will not be retried. If the FPort node is not 1037 * registered with the SCSI transport, remove the initial 1038 * reference to trigger node release. 1039 */ 1040 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1041 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1042 lpfc_nlp_put(ndlp); 1043 1044 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1045 "0150 FLOGI failure Status:x%x/x%x " 1046 "xri x%x TMO:x%x refcnt %d\n", 1047 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1048 tmo, kref_read(&ndlp->kref)); 1049 1050 /* If this is not a loop open failure, bail out */ 1051 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1052 ((ulp_word4 & IOERR_PARAM_MASK) == 1053 IOERR_LOOP_OPEN_FAILURE))) { 1054 /* FLOGI failure */ 1055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1056 "0100 FLOGI failure Status:x%x/x%x " 1057 "TMO:x%x\n", 1058 ulp_status, ulp_word4, tmo); 1059 goto flogifail; 1060 } 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1065 FC_PT2PT_NO_NVME); 1066 spin_unlock_irq(shost->host_lock); 1067 1068 /* If private loop, then allow max outstanding els to be 1069 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1070 * alpa map would take too long otherwise. 1071 */ 1072 if (phba->alpa_map[0] == 0) 1073 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1074 if ((phba->sli_rev == LPFC_SLI_REV4) && 1075 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1076 (vport->fc_prevDID != vport->fc_myDID) || 1077 phba->fc_topology_changed)) { 1078 if (vport->fc_flag & FC_VFI_REGISTERED) { 1079 if (phba->fc_topology_changed) { 1080 lpfc_unregister_fcf_prep(phba); 1081 spin_lock_irq(shost->host_lock); 1082 vport->fc_flag &= ~FC_VFI_REGISTERED; 1083 spin_unlock_irq(shost->host_lock); 1084 phba->fc_topology_changed = 0; 1085 } else { 1086 lpfc_sli4_unreg_all_rpis(vport); 1087 } 1088 } 1089 1090 /* Do not register VFI if the driver aborted FLOGI */ 1091 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1092 lpfc_issue_reg_vfi(vport); 1093 1094 lpfc_nlp_put(ndlp); 1095 goto out; 1096 } 1097 goto flogifail; 1098 } 1099 spin_lock_irq(shost->host_lock); 1100 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1101 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1102 spin_unlock_irq(shost->host_lock); 1103 1104 /* 1105 * The FLOGI succeeded. Sync the data for the CPU before 1106 * accessing it. 1107 */ 1108 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1109 if (!prsp) 1110 goto out; 1111 sp = prsp->virt + sizeof(uint32_t); 1112 1113 /* FLOGI completes successfully */ 1114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1115 "0101 FLOGI completes successfully, I/O tag:x%x " 1116 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1117 cmdiocb->iotag, cmdiocb->sli4_xritag, 1118 ulp_word4, sp->cmn.e_d_tov, 1119 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1120 vport->port_state, vport->fc_flag, 1121 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1122 1123 if (sp->cmn.priority_tagging) 1124 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1125 LPFC_VMID_TYPE_PRIO); 1126 /* reinitialize the VMID datastructure before returning */ 1127 if (lpfc_is_vmid_enabled(phba)) 1128 lpfc_reinit_vmid(vport); 1129 1130 /* 1131 * Address a timing race with dev_loss. If dev_loss is active on 1132 * this FPort node, put the initial ref count back to stop premature 1133 * node release actions. 1134 */ 1135 lpfc_check_nlp_post_devloss(vport, ndlp); 1136 if (vport->port_state == LPFC_FLOGI) { 1137 /* 1138 * If Common Service Parameters indicate Nport 1139 * we are point to point, if Fport we are Fabric. 1140 */ 1141 if (sp->cmn.fPort) 1142 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1143 ulp_word4); 1144 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1145 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1146 else { 1147 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1148 "2831 FLOGI response with cleared Fabric " 1149 "bit fcf_index 0x%x " 1150 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1151 "Fabric Name " 1152 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1153 phba->fcf.current_rec.fcf_indx, 1154 phba->fcf.current_rec.switch_name[0], 1155 phba->fcf.current_rec.switch_name[1], 1156 phba->fcf.current_rec.switch_name[2], 1157 phba->fcf.current_rec.switch_name[3], 1158 phba->fcf.current_rec.switch_name[4], 1159 phba->fcf.current_rec.switch_name[5], 1160 phba->fcf.current_rec.switch_name[6], 1161 phba->fcf.current_rec.switch_name[7], 1162 phba->fcf.current_rec.fabric_name[0], 1163 phba->fcf.current_rec.fabric_name[1], 1164 phba->fcf.current_rec.fabric_name[2], 1165 phba->fcf.current_rec.fabric_name[3], 1166 phba->fcf.current_rec.fabric_name[4], 1167 phba->fcf.current_rec.fabric_name[5], 1168 phba->fcf.current_rec.fabric_name[6], 1169 phba->fcf.current_rec.fabric_name[7]); 1170 1171 lpfc_nlp_put(ndlp); 1172 spin_lock_irq(&phba->hbalock); 1173 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1174 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1175 spin_unlock_irq(&phba->hbalock); 1176 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1177 goto out; 1178 } 1179 if (!rc) { 1180 /* Mark the FCF discovery process done */ 1181 if (phba->hba_flag & HBA_FIP_SUPPORT) 1182 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1183 LOG_ELS, 1184 "2769 FLOGI to FCF (x%x) " 1185 "completed successfully\n", 1186 phba->fcf.current_rec.fcf_indx); 1187 spin_lock_irq(&phba->hbalock); 1188 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1189 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1190 spin_unlock_irq(&phba->hbalock); 1191 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1192 goto out; 1193 } 1194 } else if (vport->port_state > LPFC_FLOGI && 1195 vport->fc_flag & FC_PT2PT) { 1196 /* 1197 * In a p2p topology, it is possible that discovery has 1198 * already progressed, and this completion can be ignored. 1199 * Recheck the indicated topology. 1200 */ 1201 if (!sp->cmn.fPort) 1202 goto out; 1203 } 1204 1205 flogifail: 1206 spin_lock_irq(&phba->hbalock); 1207 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1208 spin_unlock_irq(&phba->hbalock); 1209 1210 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1211 /* FLOGI failed, so just use loop map to make discovery list */ 1212 lpfc_disc_list_loopmap(vport); 1213 1214 /* Start discovery */ 1215 lpfc_disc_start(vport); 1216 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1217 (((ulp_word4 & IOERR_PARAM_MASK) != 1218 IOERR_SLI_ABORTED) && 1219 ((ulp_word4 & IOERR_PARAM_MASK) != 1220 IOERR_SLI_DOWN))) && 1221 (phba->link_state != LPFC_CLEAR_LA)) { 1222 /* If FLOGI failed enable link interrupt. */ 1223 lpfc_issue_clear_la(phba, vport); 1224 } 1225 out: 1226 if (!flogi_in_retry) 1227 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1228 1229 lpfc_els_free_iocb(phba, cmdiocb); 1230 lpfc_nlp_put(ndlp); 1231 } 1232 1233 /** 1234 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1235 * aborted during a link down 1236 * @phba: pointer to lpfc hba data structure. 1237 * @cmdiocb: pointer to lpfc command iocb data structure. 1238 * @rspiocb: pointer to lpfc response iocb data structure. 1239 * 1240 */ 1241 static void 1242 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1243 struct lpfc_iocbq *rspiocb) 1244 { 1245 uint32_t *pcmd; 1246 uint32_t cmd; 1247 u32 ulp_status, ulp_word4; 1248 1249 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1250 cmd = *pcmd; 1251 1252 ulp_status = get_job_ulpstatus(phba, rspiocb); 1253 ulp_word4 = get_job_word4(phba, rspiocb); 1254 1255 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1256 "6445 ELS completes after LINK_DOWN: " 1257 " Status %x/%x cmd x%x flg x%x\n", 1258 ulp_status, ulp_word4, cmd, 1259 cmdiocb->cmd_flag); 1260 1261 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1262 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1263 atomic_dec(&phba->fabric_iocb_count); 1264 } 1265 lpfc_els_free_iocb(phba, cmdiocb); 1266 } 1267 1268 /** 1269 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1270 * @vport: pointer to a host virtual N_Port data structure. 1271 * @ndlp: pointer to a node-list data structure. 1272 * @retry: number of retries to the command IOCB. 1273 * 1274 * This routine issues a Fabric Login (FLOGI) Request ELS command 1275 * for a @vport. The initiator service parameters are put into the payload 1276 * of the FLOGI Request IOCB and the top-level callback function pointer 1277 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1278 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1279 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1280 * 1281 * Note that the ndlp reference count will be incremented by 1 for holding the 1282 * ndlp and the reference to ndlp will be stored into the ndlp field of 1283 * the IOCB for the completion callback function to the FLOGI ELS command. 1284 * 1285 * Return code 1286 * 0 - successfully issued flogi iocb for @vport 1287 * 1 - failed to issue flogi iocb for @vport 1288 **/ 1289 static int 1290 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1291 uint8_t retry) 1292 { 1293 struct lpfc_hba *phba = vport->phba; 1294 struct serv_parm *sp; 1295 union lpfc_wqe128 *wqe = NULL; 1296 IOCB_t *icmd = NULL; 1297 struct lpfc_iocbq *elsiocb; 1298 struct lpfc_iocbq defer_flogi_acc; 1299 u8 *pcmd, ct; 1300 uint16_t cmdsize; 1301 uint32_t tmo, did; 1302 int rc; 1303 1304 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1305 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1306 ndlp->nlp_DID, ELS_CMD_FLOGI); 1307 1308 if (!elsiocb) 1309 return 1; 1310 1311 wqe = &elsiocb->wqe; 1312 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1313 icmd = &elsiocb->iocb; 1314 1315 /* For FLOGI request, remainder of payload is service parameters */ 1316 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1317 pcmd += sizeof(uint32_t); 1318 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1319 sp = (struct serv_parm *) pcmd; 1320 1321 /* Setup CSPs accordingly for Fabric */ 1322 sp->cmn.e_d_tov = 0; 1323 sp->cmn.w2.r_a_tov = 0; 1324 sp->cmn.virtual_fabric_support = 0; 1325 sp->cls1.classValid = 0; 1326 if (sp->cmn.fcphLow < FC_PH3) 1327 sp->cmn.fcphLow = FC_PH3; 1328 if (sp->cmn.fcphHigh < FC_PH3) 1329 sp->cmn.fcphHigh = FC_PH3; 1330 1331 /* Determine if switch supports priority tagging */ 1332 if (phba->cfg_vmid_priority_tagging) { 1333 sp->cmn.priority_tagging = 1; 1334 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1335 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1336 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1337 sizeof(phba->wwpn)); 1338 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1339 sizeof(phba->wwnn)); 1340 } 1341 } 1342 1343 if (phba->sli_rev == LPFC_SLI_REV4) { 1344 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1345 LPFC_SLI_INTF_IF_TYPE_0) { 1346 /* FLOGI needs to be 3 for WQE FCFI */ 1347 ct = SLI4_CT_FCFI; 1348 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1349 1350 /* Set the fcfi to the fcfi we registered with */ 1351 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1352 phba->fcf.fcfi); 1353 } 1354 1355 /* Can't do SLI4 class2 without support sequence coalescing */ 1356 sp->cls2.classValid = 0; 1357 sp->cls2.seqDelivery = 0; 1358 } else { 1359 /* Historical, setting sequential-delivery bit for SLI3 */ 1360 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1361 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1362 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1363 sp->cmn.request_multiple_Nport = 1; 1364 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1365 icmd->ulpCt_h = 1; 1366 icmd->ulpCt_l = 0; 1367 } else { 1368 sp->cmn.request_multiple_Nport = 0; 1369 } 1370 1371 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1372 icmd->un.elsreq64.myID = 0; 1373 icmd->un.elsreq64.fl = 1; 1374 } 1375 } 1376 1377 tmo = phba->fc_ratov; 1378 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1379 lpfc_set_disctmo(vport); 1380 phba->fc_ratov = tmo; 1381 1382 phba->fc_stat.elsXmitFLOGI++; 1383 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1384 1385 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1386 "Issue FLOGI: opt:x%x", 1387 phba->sli3_options, 0, 0); 1388 1389 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1390 if (!elsiocb->ndlp) { 1391 lpfc_els_free_iocb(phba, elsiocb); 1392 return 1; 1393 } 1394 1395 /* Avoid race with FLOGI completion and hba_flags. */ 1396 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1397 1398 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1399 if (rc == IOCB_ERROR) { 1400 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1401 lpfc_els_free_iocb(phba, elsiocb); 1402 lpfc_nlp_put(ndlp); 1403 return 1; 1404 } 1405 1406 /* Clear external loopback plug detected flag */ 1407 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1408 1409 /* Check for a deferred FLOGI ACC condition */ 1410 if (phba->defer_flogi_acc_flag) { 1411 /* lookup ndlp for received FLOGI */ 1412 ndlp = lpfc_findnode_did(vport, 0); 1413 if (!ndlp) 1414 return 0; 1415 1416 did = vport->fc_myDID; 1417 vport->fc_myDID = Fabric_DID; 1418 1419 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1420 1421 if (phba->sli_rev == LPFC_SLI_REV4) { 1422 bf_set(wqe_ctxt_tag, 1423 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1424 phba->defer_flogi_acc_rx_id); 1425 bf_set(wqe_rcvoxid, 1426 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1427 phba->defer_flogi_acc_ox_id); 1428 } else { 1429 icmd = &defer_flogi_acc.iocb; 1430 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1431 icmd->unsli3.rcvsli3.ox_id = 1432 phba->defer_flogi_acc_ox_id; 1433 } 1434 1435 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1436 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1437 " ox_id: x%x, hba_flag x%x\n", 1438 phba->defer_flogi_acc_rx_id, 1439 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1440 1441 /* Send deferred FLOGI ACC */ 1442 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1443 ndlp, NULL); 1444 1445 phba->defer_flogi_acc_flag = false; 1446 vport->fc_myDID = did; 1447 1448 /* Decrement ndlp reference count to indicate the node can be 1449 * released when other references are removed. 1450 */ 1451 lpfc_nlp_put(ndlp); 1452 } 1453 1454 return 0; 1455 } 1456 1457 /** 1458 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1459 * @phba: pointer to lpfc hba data structure. 1460 * 1461 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1462 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1463 * list and issues an abort IOCB commond on each outstanding IOCB that 1464 * contains a active Fabric_DID ndlp. Note that this function is to issue 1465 * the abort IOCB command on all the outstanding IOCBs, thus when this 1466 * function returns, it does not guarantee all the IOCBs are actually aborted. 1467 * 1468 * Return code 1469 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1470 **/ 1471 int 1472 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1473 { 1474 struct lpfc_sli_ring *pring; 1475 struct lpfc_iocbq *iocb, *next_iocb; 1476 struct lpfc_nodelist *ndlp; 1477 u32 ulp_command; 1478 1479 /* Abort outstanding I/O on NPort <nlp_DID> */ 1480 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1481 "0201 Abort outstanding I/O on NPort x%x\n", 1482 Fabric_DID); 1483 1484 pring = lpfc_phba_elsring(phba); 1485 if (unlikely(!pring)) 1486 return -EIO; 1487 1488 /* 1489 * Check the txcmplq for an iocb that matches the nport the driver is 1490 * searching for. 1491 */ 1492 spin_lock_irq(&phba->hbalock); 1493 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1494 ulp_command = get_job_cmnd(phba, iocb); 1495 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1496 ndlp = iocb->ndlp; 1497 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1498 if ((phba->pport->fc_flag & FC_PT2PT) && 1499 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1500 iocb->fabric_cmd_cmpl = 1501 lpfc_ignore_els_cmpl; 1502 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1503 NULL); 1504 } 1505 } 1506 } 1507 /* Make sure HBA is alive */ 1508 lpfc_issue_hb_tmo(phba); 1509 1510 spin_unlock_irq(&phba->hbalock); 1511 1512 return 0; 1513 } 1514 1515 /** 1516 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1517 * @vport: pointer to a host virtual N_Port data structure. 1518 * 1519 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1520 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1521 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1522 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1523 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1524 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1525 * @vport. 1526 * 1527 * Return code 1528 * 0 - failed to issue initial flogi for @vport 1529 * 1 - successfully issued initial flogi for @vport 1530 **/ 1531 int 1532 lpfc_initial_flogi(struct lpfc_vport *vport) 1533 { 1534 struct lpfc_nodelist *ndlp; 1535 1536 vport->port_state = LPFC_FLOGI; 1537 lpfc_set_disctmo(vport); 1538 1539 /* First look for the Fabric ndlp */ 1540 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1541 if (!ndlp) { 1542 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1543 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1544 if (!ndlp) 1545 return 0; 1546 /* Set the node type */ 1547 ndlp->nlp_type |= NLP_FABRIC; 1548 1549 /* Put ndlp onto node list */ 1550 lpfc_enqueue_node(vport, ndlp); 1551 } 1552 1553 /* Reset the Fabric flag, topology change may have happened */ 1554 vport->fc_flag &= ~FC_FABRIC; 1555 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1556 /* A node reference should be retained while registered with a 1557 * transport or dev-loss-evt work is pending. 1558 * Otherwise, decrement node reference to trigger release. 1559 */ 1560 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1561 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1562 lpfc_nlp_put(ndlp); 1563 return 0; 1564 } 1565 return 1; 1566 } 1567 1568 /** 1569 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1570 * @vport: pointer to a host virtual N_Port data structure. 1571 * 1572 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1573 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1574 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1575 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1576 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1577 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1578 * @vport. 1579 * 1580 * Return code 1581 * 0 - failed to issue initial fdisc for @vport 1582 * 1 - successfully issued initial fdisc for @vport 1583 **/ 1584 int 1585 lpfc_initial_fdisc(struct lpfc_vport *vport) 1586 { 1587 struct lpfc_nodelist *ndlp; 1588 1589 /* First look for the Fabric ndlp */ 1590 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1591 if (!ndlp) { 1592 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1593 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1594 if (!ndlp) 1595 return 0; 1596 1597 /* NPIV is only supported in Fabrics. */ 1598 ndlp->nlp_type |= NLP_FABRIC; 1599 1600 /* Put ndlp onto node list */ 1601 lpfc_enqueue_node(vport, ndlp); 1602 } 1603 1604 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1605 /* A node reference should be retained while registered with a 1606 * transport or dev-loss-evt work is pending. 1607 * Otherwise, decrement node reference to trigger release. 1608 */ 1609 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1610 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1611 lpfc_nlp_put(ndlp); 1612 return 0; 1613 } 1614 return 1; 1615 } 1616 1617 /** 1618 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1619 * @vport: pointer to a host virtual N_Port data structure. 1620 * 1621 * This routine checks whether there are more remaining Port Logins 1622 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1623 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1624 * to issue ELS PLOGIs up to the configured discover threads with the 1625 * @vport (@vport->cfg_discovery_threads). The function also decrement 1626 * the @vport's num_disc_node by 1 if it is not already 0. 1627 **/ 1628 void 1629 lpfc_more_plogi(struct lpfc_vport *vport) 1630 { 1631 if (vport->num_disc_nodes) 1632 vport->num_disc_nodes--; 1633 1634 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1635 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1636 "0232 Continue discovery with %d PLOGIs to go " 1637 "Data: x%x x%x x%x\n", 1638 vport->num_disc_nodes, vport->fc_plogi_cnt, 1639 vport->fc_flag, vport->port_state); 1640 /* Check to see if there are more PLOGIs to be sent */ 1641 if (vport->fc_flag & FC_NLP_MORE) 1642 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1643 lpfc_els_disc_plogi(vport); 1644 1645 return; 1646 } 1647 1648 /** 1649 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1650 * @phba: pointer to lpfc hba data structure. 1651 * @prsp: pointer to response IOCB payload. 1652 * @ndlp: pointer to a node-list data structure. 1653 * 1654 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1655 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1656 * The following cases are considered N_Port confirmed: 1657 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1658 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1659 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1660 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1661 * 1) if there is a node on vport list other than the @ndlp with the same 1662 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1663 * on that node to release the RPI associated with the node; 2) if there is 1664 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1665 * into, a new node shall be allocated (or activated). In either case, the 1666 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1667 * be released and the new_ndlp shall be put on to the vport node list and 1668 * its pointer returned as the confirmed node. 1669 * 1670 * Note that before the @ndlp got "released", the keepDID from not-matching 1671 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1672 * of the @ndlp. This is because the release of @ndlp is actually to put it 1673 * into an inactive state on the vport node list and the vport node list 1674 * management algorithm does not allow two node with a same DID. 1675 * 1676 * Return code 1677 * pointer to the PLOGI N_Port @ndlp 1678 **/ 1679 static struct lpfc_nodelist * 1680 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1681 struct lpfc_nodelist *ndlp) 1682 { 1683 struct lpfc_vport *vport = ndlp->vport; 1684 struct lpfc_nodelist *new_ndlp; 1685 struct serv_parm *sp; 1686 uint8_t name[sizeof(struct lpfc_name)]; 1687 uint32_t keepDID = 0, keep_nlp_flag = 0; 1688 uint32_t keep_new_nlp_flag = 0; 1689 uint16_t keep_nlp_state; 1690 u32 keep_nlp_fc4_type = 0; 1691 struct lpfc_nvme_rport *keep_nrport = NULL; 1692 unsigned long *active_rrqs_xri_bitmap = NULL; 1693 1694 /* Fabric nodes can have the same WWPN so we don't bother searching 1695 * by WWPN. Just return the ndlp that was given to us. 1696 */ 1697 if (ndlp->nlp_type & NLP_FABRIC) 1698 return ndlp; 1699 1700 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1701 memset(name, 0, sizeof(struct lpfc_name)); 1702 1703 /* Now we find out if the NPort we are logging into, matches the WWPN 1704 * we have for that ndlp. If not, we have some work to do. 1705 */ 1706 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1707 1708 /* return immediately if the WWPN matches ndlp */ 1709 if (!new_ndlp || (new_ndlp == ndlp)) 1710 return ndlp; 1711 1712 /* 1713 * Unregister from backend if not done yet. Could have been skipped 1714 * due to ADISC 1715 */ 1716 lpfc_nlp_unreg_node(vport, new_ndlp); 1717 1718 if (phba->sli_rev == LPFC_SLI_REV4) { 1719 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1720 GFP_KERNEL); 1721 if (active_rrqs_xri_bitmap) 1722 memset(active_rrqs_xri_bitmap, 0, 1723 phba->cfg_rrq_xri_bitmap_sz); 1724 } 1725 1726 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1727 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1728 "new_ndlp x%x x%x x%x\n", 1729 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1730 (new_ndlp ? new_ndlp->nlp_DID : 0), 1731 (new_ndlp ? new_ndlp->nlp_flag : 0), 1732 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1733 1734 keepDID = new_ndlp->nlp_DID; 1735 1736 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1737 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1738 phba->cfg_rrq_xri_bitmap_sz); 1739 1740 /* At this point in this routine, we know new_ndlp will be 1741 * returned. however, any previous GID_FTs that were done 1742 * would have updated nlp_fc4_type in ndlp, so we must ensure 1743 * new_ndlp has the right value. 1744 */ 1745 if (vport->fc_flag & FC_FABRIC) { 1746 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1747 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1748 } 1749 1750 lpfc_unreg_rpi(vport, new_ndlp); 1751 new_ndlp->nlp_DID = ndlp->nlp_DID; 1752 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1753 if (phba->sli_rev == LPFC_SLI_REV4) 1754 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1755 ndlp->active_rrqs_xri_bitmap, 1756 phba->cfg_rrq_xri_bitmap_sz); 1757 1758 /* Lock both ndlps */ 1759 spin_lock_irq(&ndlp->lock); 1760 spin_lock_irq(&new_ndlp->lock); 1761 keep_new_nlp_flag = new_ndlp->nlp_flag; 1762 keep_nlp_flag = ndlp->nlp_flag; 1763 new_ndlp->nlp_flag = ndlp->nlp_flag; 1764 1765 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1766 if (keep_new_nlp_flag & NLP_UNREG_INP) 1767 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1768 else 1769 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1770 1771 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1772 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1773 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1774 else 1775 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1776 1777 /* 1778 * Retain the DROPPED flag. This will take care of the init 1779 * refcount when affecting the state change 1780 */ 1781 if (keep_new_nlp_flag & NLP_DROPPED) 1782 new_ndlp->nlp_flag |= NLP_DROPPED; 1783 else 1784 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1785 1786 ndlp->nlp_flag = keep_new_nlp_flag; 1787 1788 /* if ndlp had NLP_UNREG_INP set, keep it */ 1789 if (keep_nlp_flag & NLP_UNREG_INP) 1790 ndlp->nlp_flag |= NLP_UNREG_INP; 1791 else 1792 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1793 1794 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1795 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1796 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1797 else 1798 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1799 1800 /* 1801 * Retain the DROPPED flag. This will take care of the init 1802 * refcount when affecting the state change 1803 */ 1804 if (keep_nlp_flag & NLP_DROPPED) 1805 ndlp->nlp_flag |= NLP_DROPPED; 1806 else 1807 ndlp->nlp_flag &= ~NLP_DROPPED; 1808 1809 spin_unlock_irq(&new_ndlp->lock); 1810 spin_unlock_irq(&ndlp->lock); 1811 1812 /* Set nlp_states accordingly */ 1813 keep_nlp_state = new_ndlp->nlp_state; 1814 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1815 1816 /* interchange the nvme remoteport structs */ 1817 keep_nrport = new_ndlp->nrport; 1818 new_ndlp->nrport = ndlp->nrport; 1819 1820 /* Move this back to NPR state */ 1821 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1822 /* The ndlp doesn't have a portname yet, but does have an 1823 * NPort ID. The new_ndlp portname matches the Rport's 1824 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1825 */ 1826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1827 "3179 PLOGI confirm NEW: %x %x\n", 1828 new_ndlp->nlp_DID, keepDID); 1829 1830 /* Two ndlps cannot have the same did on the nodelist. 1831 * The KeepDID and keep_nlp_fc4_type need to be swapped 1832 * because ndlp is inflight with no WWPN. 1833 */ 1834 ndlp->nlp_DID = keepDID; 1835 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1836 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1837 if (phba->sli_rev == LPFC_SLI_REV4 && 1838 active_rrqs_xri_bitmap) 1839 memcpy(ndlp->active_rrqs_xri_bitmap, 1840 active_rrqs_xri_bitmap, 1841 phba->cfg_rrq_xri_bitmap_sz); 1842 1843 } else { 1844 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1845 "3180 PLOGI confirm SWAP: %x %x\n", 1846 new_ndlp->nlp_DID, keepDID); 1847 1848 lpfc_unreg_rpi(vport, ndlp); 1849 1850 /* The ndlp and new_ndlp both have WWPNs but are swapping 1851 * NPort Ids and attributes. 1852 */ 1853 ndlp->nlp_DID = keepDID; 1854 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1855 1856 if (phba->sli_rev == LPFC_SLI_REV4 && 1857 active_rrqs_xri_bitmap) 1858 memcpy(ndlp->active_rrqs_xri_bitmap, 1859 active_rrqs_xri_bitmap, 1860 phba->cfg_rrq_xri_bitmap_sz); 1861 1862 /* Since we are switching over to the new_ndlp, 1863 * reset the old ndlp state 1864 */ 1865 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1866 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1867 keep_nlp_state = NLP_STE_NPR_NODE; 1868 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1869 ndlp->nrport = keep_nrport; 1870 } 1871 1872 /* 1873 * If ndlp is not associated with any rport we can drop it here else 1874 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1875 */ 1876 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1877 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1878 1879 if (phba->sli_rev == LPFC_SLI_REV4 && 1880 active_rrqs_xri_bitmap) 1881 mempool_free(active_rrqs_xri_bitmap, 1882 phba->active_rrq_pool); 1883 1884 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1885 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1886 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1887 new_ndlp->nlp_fc4_type); 1888 1889 return new_ndlp; 1890 } 1891 1892 /** 1893 * lpfc_end_rscn - Check and handle more rscn for a vport 1894 * @vport: pointer to a host virtual N_Port data structure. 1895 * 1896 * This routine checks whether more Registration State Change 1897 * Notifications (RSCNs) came in while the discovery state machine was in 1898 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1899 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1900 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1901 * handling the RSCNs. 1902 **/ 1903 void 1904 lpfc_end_rscn(struct lpfc_vport *vport) 1905 { 1906 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1907 1908 if (vport->fc_flag & FC_RSCN_MODE) { 1909 /* 1910 * Check to see if more RSCNs came in while we were 1911 * processing this one. 1912 */ 1913 if (vport->fc_rscn_id_cnt || 1914 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1915 lpfc_els_handle_rscn(vport); 1916 else { 1917 spin_lock_irq(shost->host_lock); 1918 vport->fc_flag &= ~FC_RSCN_MODE; 1919 spin_unlock_irq(shost->host_lock); 1920 } 1921 } 1922 } 1923 1924 /** 1925 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1926 * @phba: pointer to lpfc hba data structure. 1927 * @cmdiocb: pointer to lpfc command iocb data structure. 1928 * @rspiocb: pointer to lpfc response iocb data structure. 1929 * 1930 * This routine will call the clear rrq function to free the rrq and 1931 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1932 * exist then the clear_rrq is still called because the rrq needs to 1933 * be freed. 1934 **/ 1935 1936 static void 1937 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1938 struct lpfc_iocbq *rspiocb) 1939 { 1940 struct lpfc_vport *vport = cmdiocb->vport; 1941 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1942 struct lpfc_node_rrq *rrq; 1943 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1944 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1945 1946 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1947 rrq = cmdiocb->context_un.rrq; 1948 cmdiocb->rsp_iocb = rspiocb; 1949 1950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1951 "RRQ cmpl: status:x%x/x%x did:x%x", 1952 ulp_status, ulp_word4, 1953 get_job_els_rsp64_did(phba, cmdiocb)); 1954 1955 1956 /* rrq completes to NPort <nlp_DID> */ 1957 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1958 "2880 RRQ completes to DID x%x " 1959 "Data: x%x x%x x%x x%x x%x\n", 1960 ndlp->nlp_DID, ulp_status, ulp_word4, 1961 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1962 1963 if (ulp_status) { 1964 /* Check for retry */ 1965 /* RRQ failed Don't print the vport to vport rjts */ 1966 if (ulp_status != IOSTAT_LS_RJT || 1967 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1968 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1969 (phba)->pport->cfg_log_verbose & LOG_ELS) 1970 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1971 "2881 RRQ failure DID:%06X Status:" 1972 "x%x/x%x\n", 1973 ndlp->nlp_DID, ulp_status, 1974 ulp_word4); 1975 } 1976 1977 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1978 lpfc_els_free_iocb(phba, cmdiocb); 1979 lpfc_nlp_put(ndlp); 1980 return; 1981 } 1982 /** 1983 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1984 * @phba: pointer to lpfc hba data structure. 1985 * @cmdiocb: pointer to lpfc command iocb data structure. 1986 * @rspiocb: pointer to lpfc response iocb data structure. 1987 * 1988 * This routine is the completion callback function for issuing the Port 1989 * Login (PLOGI) command. For PLOGI completion, there must be an active 1990 * ndlp on the vport node list that matches the remote node ID from the 1991 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1992 * ignored and command IOCB released. The PLOGI response IOCB status is 1993 * checked for error conditions. If there is error status reported, PLOGI 1994 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1995 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1996 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1997 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1998 * there are additional N_Port nodes with the vport that need to perform 1999 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 2000 * PLOGIs. 2001 **/ 2002 static void 2003 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2004 struct lpfc_iocbq *rspiocb) 2005 { 2006 struct lpfc_vport *vport = cmdiocb->vport; 2007 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2008 IOCB_t *irsp; 2009 struct lpfc_nodelist *ndlp, *free_ndlp; 2010 struct lpfc_dmabuf *prsp; 2011 int disc; 2012 struct serv_parm *sp = NULL; 2013 u32 ulp_status, ulp_word4, did, iotag; 2014 bool release_node = false; 2015 2016 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2017 cmdiocb->rsp_iocb = rspiocb; 2018 2019 ulp_status = get_job_ulpstatus(phba, rspiocb); 2020 ulp_word4 = get_job_word4(phba, rspiocb); 2021 did = get_job_els_rsp64_did(phba, cmdiocb); 2022 2023 if (phba->sli_rev == LPFC_SLI_REV4) { 2024 iotag = get_wqe_reqtag(cmdiocb); 2025 } else { 2026 irsp = &rspiocb->iocb; 2027 iotag = irsp->ulpIoTag; 2028 } 2029 2030 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2031 "PLOGI cmpl: status:x%x/x%x did:x%x", 2032 ulp_status, ulp_word4, did); 2033 2034 ndlp = lpfc_findnode_did(vport, did); 2035 if (!ndlp) { 2036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2037 "0136 PLOGI completes to NPort x%x " 2038 "with no ndlp. Data: x%x x%x x%x\n", 2039 did, ulp_status, ulp_word4, iotag); 2040 goto out_freeiocb; 2041 } 2042 2043 /* Since ndlp can be freed in the disc state machine, note if this node 2044 * is being used during discovery. 2045 */ 2046 spin_lock_irq(&ndlp->lock); 2047 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2048 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2049 spin_unlock_irq(&ndlp->lock); 2050 2051 /* PLOGI completes to NPort <nlp_DID> */ 2052 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2053 "0102 PLOGI completes to NPort x%06x " 2054 "Data: x%x x%x x%x x%x x%x\n", 2055 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2056 ulp_status, ulp_word4, 2057 disc, vport->num_disc_nodes); 2058 2059 /* Check to see if link went down during discovery */ 2060 if (lpfc_els_chk_latt(vport)) { 2061 spin_lock_irq(&ndlp->lock); 2062 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2063 spin_unlock_irq(&ndlp->lock); 2064 goto out; 2065 } 2066 2067 if (ulp_status) { 2068 /* Check for retry */ 2069 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2070 /* ELS command is being retried */ 2071 if (disc) { 2072 spin_lock_irq(&ndlp->lock); 2073 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2074 spin_unlock_irq(&ndlp->lock); 2075 } 2076 goto out; 2077 } 2078 /* PLOGI failed Don't print the vport to vport rjts */ 2079 if (ulp_status != IOSTAT_LS_RJT || 2080 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2081 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2082 (phba)->pport->cfg_log_verbose & LOG_ELS) 2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2084 "2753 PLOGI failure DID:%06X " 2085 "Status:x%x/x%x\n", 2086 ndlp->nlp_DID, ulp_status, 2087 ulp_word4); 2088 2089 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2090 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2091 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2092 NLP_EVT_CMPL_PLOGI); 2093 2094 /* If a PLOGI collision occurred, the node needs to continue 2095 * with the reglogin process. 2096 */ 2097 spin_lock_irq(&ndlp->lock); 2098 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2099 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2100 spin_unlock_irq(&ndlp->lock); 2101 goto out; 2102 } 2103 2104 /* No PLOGI collision and the node is not registered with the 2105 * scsi or nvme transport. It is no longer an active node. Just 2106 * start the device remove process. 2107 */ 2108 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2109 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2110 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2111 release_node = true; 2112 } 2113 spin_unlock_irq(&ndlp->lock); 2114 2115 if (release_node) 2116 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2117 NLP_EVT_DEVICE_RM); 2118 } else { 2119 /* Good status, call state machine */ 2120 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2121 struct lpfc_dmabuf, list); 2122 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2123 2124 sp = (struct serv_parm *)((u8 *)prsp->virt + 2125 sizeof(u32)); 2126 2127 ndlp->vmid_support = 0; 2128 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2129 (phba->cfg_vmid_priority_tagging && 2130 sp->cmn.priority_tagging)) { 2131 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2132 "4018 app_hdr_support %d tagging %d DID x%x\n", 2133 sp->cmn.app_hdr_support, 2134 sp->cmn.priority_tagging, 2135 ndlp->nlp_DID); 2136 /* if the dest port supports VMID, mark it in ndlp */ 2137 ndlp->vmid_support = 1; 2138 } 2139 2140 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2141 NLP_EVT_CMPL_PLOGI); 2142 } 2143 2144 if (disc && vport->num_disc_nodes) { 2145 /* Check to see if there are more PLOGIs to be sent */ 2146 lpfc_more_plogi(vport); 2147 2148 if (vport->num_disc_nodes == 0) { 2149 spin_lock_irq(shost->host_lock); 2150 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2151 spin_unlock_irq(shost->host_lock); 2152 2153 lpfc_can_disctmo(vport); 2154 lpfc_end_rscn(vport); 2155 } 2156 } 2157 2158 out: 2159 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2160 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2161 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2162 2163 out_freeiocb: 2164 /* Release the reference on the original I/O request. */ 2165 free_ndlp = cmdiocb->ndlp; 2166 2167 lpfc_els_free_iocb(phba, cmdiocb); 2168 lpfc_nlp_put(free_ndlp); 2169 return; 2170 } 2171 2172 /** 2173 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2174 * @vport: pointer to a host virtual N_Port data structure. 2175 * @did: destination port identifier. 2176 * @retry: number of retries to the command IOCB. 2177 * 2178 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2179 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2180 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2181 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2182 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2183 * 2184 * Note that the ndlp reference count will be incremented by 1 for holding 2185 * the ndlp and the reference to ndlp will be stored into the ndlp field 2186 * of the IOCB for the completion callback function to the PLOGI ELS command. 2187 * 2188 * Return code 2189 * 0 - Successfully issued a plogi for @vport 2190 * 1 - failed to issue a plogi for @vport 2191 **/ 2192 int 2193 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2194 { 2195 struct lpfc_hba *phba = vport->phba; 2196 struct serv_parm *sp; 2197 struct lpfc_nodelist *ndlp; 2198 struct lpfc_iocbq *elsiocb; 2199 uint8_t *pcmd; 2200 uint16_t cmdsize; 2201 int ret; 2202 2203 ndlp = lpfc_findnode_did(vport, did); 2204 if (!ndlp) 2205 return 1; 2206 2207 /* Defer the processing of the issue PLOGI until after the 2208 * outstanding UNREG_RPI mbox command completes, unless we 2209 * are going offline. This logic does not apply for Fabric DIDs 2210 */ 2211 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && 2212 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2213 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2215 "4110 Issue PLOGI x%x deferred " 2216 "on NPort x%x rpi x%x flg x%x Data:" 2217 " x%px\n", 2218 ndlp->nlp_defer_did, ndlp->nlp_DID, 2219 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2220 2221 /* We can only defer 1st PLOGI */ 2222 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2223 ndlp->nlp_defer_did = did; 2224 return 0; 2225 } 2226 2227 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2228 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2229 ELS_CMD_PLOGI); 2230 if (!elsiocb) 2231 return 1; 2232 2233 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2234 2235 /* For PLOGI request, remainder of payload is service parameters */ 2236 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2237 pcmd += sizeof(uint32_t); 2238 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2239 sp = (struct serv_parm *) pcmd; 2240 2241 /* 2242 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2243 * to device on remote loops work. 2244 */ 2245 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2246 sp->cmn.altBbCredit = 1; 2247 2248 if (sp->cmn.fcphLow < FC_PH_4_3) 2249 sp->cmn.fcphLow = FC_PH_4_3; 2250 2251 if (sp->cmn.fcphHigh < FC_PH3) 2252 sp->cmn.fcphHigh = FC_PH3; 2253 2254 sp->cmn.valid_vendor_ver_level = 0; 2255 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2256 sp->cmn.bbRcvSizeMsb &= 0xF; 2257 2258 /* Check if the destination port supports VMID */ 2259 ndlp->vmid_support = 0; 2260 if (vport->vmid_priority_tagging) 2261 sp->cmn.priority_tagging = 1; 2262 else if (phba->cfg_vmid_app_header && 2263 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2264 sp->cmn.app_hdr_support = 1; 2265 2266 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2267 "Issue PLOGI: did:x%x", 2268 did, 0, 0); 2269 2270 /* If our firmware supports this feature, convey that 2271 * information to the target using the vendor specific field. 2272 */ 2273 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2274 sp->cmn.valid_vendor_ver_level = 1; 2275 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2276 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2277 } 2278 2279 phba->fc_stat.elsXmitPLOGI++; 2280 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2281 2282 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2283 "Issue PLOGI: did:x%x refcnt %d", 2284 did, kref_read(&ndlp->kref), 0); 2285 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2286 if (!elsiocb->ndlp) { 2287 lpfc_els_free_iocb(phba, elsiocb); 2288 return 1; 2289 } 2290 2291 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2292 if (ret) { 2293 lpfc_els_free_iocb(phba, elsiocb); 2294 lpfc_nlp_put(ndlp); 2295 return 1; 2296 } 2297 2298 return 0; 2299 } 2300 2301 /** 2302 * lpfc_cmpl_els_prli - Completion callback function for prli 2303 * @phba: pointer to lpfc hba data structure. 2304 * @cmdiocb: pointer to lpfc command iocb data structure. 2305 * @rspiocb: pointer to lpfc response iocb data structure. 2306 * 2307 * This routine is the completion callback function for a Process Login 2308 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2309 * status. If there is error status reported, PRLI retry shall be attempted 2310 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2311 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2312 * ndlp to mark the PRLI completion. 2313 **/ 2314 static void 2315 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2316 struct lpfc_iocbq *rspiocb) 2317 { 2318 struct lpfc_vport *vport = cmdiocb->vport; 2319 struct lpfc_nodelist *ndlp; 2320 char *mode; 2321 u32 loglevel; 2322 u32 ulp_status; 2323 u32 ulp_word4; 2324 bool release_node = false; 2325 2326 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2327 cmdiocb->rsp_iocb = rspiocb; 2328 2329 ndlp = cmdiocb->ndlp; 2330 2331 ulp_status = get_job_ulpstatus(phba, rspiocb); 2332 ulp_word4 = get_job_word4(phba, rspiocb); 2333 2334 spin_lock_irq(&ndlp->lock); 2335 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2336 2337 /* Driver supports multiple FC4 types. Counters matter. */ 2338 vport->fc_prli_sent--; 2339 ndlp->fc4_prli_sent--; 2340 spin_unlock_irq(&ndlp->lock); 2341 2342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2343 "PRLI cmpl: status:x%x/x%x did:x%x", 2344 ulp_status, ulp_word4, 2345 ndlp->nlp_DID); 2346 2347 /* PRLI completes to NPort <nlp_DID> */ 2348 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2349 "0103 PRLI completes to NPort x%06x " 2350 "Data: x%x x%x x%x x%x\n", 2351 ndlp->nlp_DID, ulp_status, ulp_word4, 2352 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2353 2354 /* Check to see if link went down during discovery */ 2355 if (lpfc_els_chk_latt(vport)) 2356 goto out; 2357 2358 if (ulp_status) { 2359 /* Check for retry */ 2360 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2361 /* ELS command is being retried */ 2362 goto out; 2363 } 2364 2365 /* If we don't send GFT_ID to Fabric, a PRLI error 2366 * could be expected. 2367 */ 2368 if ((vport->fc_flag & FC_FABRIC) || 2369 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2370 mode = KERN_ERR; 2371 loglevel = LOG_TRACE_EVENT; 2372 } else { 2373 mode = KERN_INFO; 2374 loglevel = LOG_ELS; 2375 } 2376 2377 /* PRLI failed */ 2378 lpfc_printf_vlog(vport, mode, loglevel, 2379 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2380 "data: x%x x%x\n", 2381 ndlp->nlp_DID, ulp_status, 2382 ulp_word4, ndlp->nlp_state, 2383 ndlp->fc4_prli_sent); 2384 2385 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2386 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2387 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2388 NLP_EVT_CMPL_PRLI); 2389 2390 /* The following condition catches an inflight transition 2391 * mismatch typically caused by an RSCN. Skip any 2392 * processing to allow recovery. 2393 */ 2394 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2395 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) { 2396 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2397 "2784 PRLI cmpl: state mismatch " 2398 "DID x%06x nstate x%x nflag x%x\n", 2399 ndlp->nlp_DID, ndlp->nlp_state, 2400 ndlp->nlp_flag); 2401 goto out; 2402 } 2403 2404 /* 2405 * For P2P topology, retain the node so that PLOGI can be 2406 * attempted on it again. 2407 */ 2408 if (vport->fc_flag & FC_PT2PT) 2409 goto out; 2410 2411 /* As long as this node is not registered with the SCSI 2412 * or NVMe transport and no other PRLIs are outstanding, 2413 * it is no longer an active node. Otherwise devloss 2414 * handles the final cleanup. 2415 */ 2416 spin_lock_irq(&ndlp->lock); 2417 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2418 !ndlp->fc4_prli_sent) { 2419 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2420 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2421 release_node = true; 2422 } 2423 spin_unlock_irq(&ndlp->lock); 2424 2425 if (release_node) 2426 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2427 NLP_EVT_DEVICE_RM); 2428 } else { 2429 /* Good status, call state machine. However, if another 2430 * PRLI is outstanding, don't call the state machine 2431 * because final disposition to Mapped or Unmapped is 2432 * completed there. 2433 */ 2434 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2435 NLP_EVT_CMPL_PRLI); 2436 } 2437 2438 out: 2439 lpfc_els_free_iocb(phba, cmdiocb); 2440 lpfc_nlp_put(ndlp); 2441 return; 2442 } 2443 2444 /** 2445 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2446 * @vport: pointer to a host virtual N_Port data structure. 2447 * @ndlp: pointer to a node-list data structure. 2448 * @retry: number of retries to the command IOCB. 2449 * 2450 * This routine issues a Process Login (PRLI) ELS command for the 2451 * @vport. The PRLI service parameters are set up in the payload of the 2452 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2453 * is put to the IOCB completion callback func field before invoking the 2454 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2455 * 2456 * Note that the ndlp reference count will be incremented by 1 for holding the 2457 * ndlp and the reference to ndlp will be stored into the ndlp field of 2458 * the IOCB for the completion callback function to the PRLI ELS command. 2459 * 2460 * Return code 2461 * 0 - successfully issued prli iocb command for @vport 2462 * 1 - failed to issue prli iocb command for @vport 2463 **/ 2464 int 2465 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2466 uint8_t retry) 2467 { 2468 int rc = 0; 2469 struct lpfc_hba *phba = vport->phba; 2470 PRLI *npr; 2471 struct lpfc_nvme_prli *npr_nvme; 2472 struct lpfc_iocbq *elsiocb; 2473 uint8_t *pcmd; 2474 uint16_t cmdsize; 2475 u32 local_nlp_type, elscmd; 2476 2477 /* 2478 * If we are in RSCN mode, the FC4 types supported from a 2479 * previous GFT_ID command may not be accurate. So, if we 2480 * are a NVME Initiator, always look for the possibility of 2481 * the remote NPort beng a NVME Target. 2482 */ 2483 if (phba->sli_rev == LPFC_SLI_REV4 && 2484 vport->fc_flag & FC_RSCN_MODE && 2485 vport->nvmei_support) 2486 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2487 local_nlp_type = ndlp->nlp_fc4_type; 2488 2489 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2490 * fields here before any of them can complete. 2491 */ 2492 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2493 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2494 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2495 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2496 ndlp->nvme_fb_size = 0; 2497 2498 send_next_prli: 2499 if (local_nlp_type & NLP_FC4_FCP) { 2500 /* Payload is 4 + 16 = 20 x14 bytes. */ 2501 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2502 elscmd = ELS_CMD_PRLI; 2503 } else if (local_nlp_type & NLP_FC4_NVME) { 2504 /* Payload is 4 + 20 = 24 x18 bytes. */ 2505 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2506 elscmd = ELS_CMD_NVMEPRLI; 2507 } else { 2508 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2509 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2510 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2511 return 1; 2512 } 2513 2514 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2515 * FC4 type, implicitly LOGO. 2516 */ 2517 if (phba->sli_rev == LPFC_SLI_REV3 && 2518 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2519 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2520 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2521 ndlp->nlp_type); 2522 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2523 return 1; 2524 } 2525 2526 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2527 ndlp->nlp_DID, elscmd); 2528 if (!elsiocb) 2529 return 1; 2530 2531 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2532 2533 /* For PRLI request, remainder of payload is service parameters */ 2534 memset(pcmd, 0, cmdsize); 2535 2536 if (local_nlp_type & NLP_FC4_FCP) { 2537 /* Remainder of payload is FCP PRLI parameter page. 2538 * Note: this data structure is defined as 2539 * BE/LE in the structure definition so no 2540 * byte swap call is made. 2541 */ 2542 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2543 pcmd += sizeof(uint32_t); 2544 npr = (PRLI *)pcmd; 2545 2546 /* 2547 * If our firmware version is 3.20 or later, 2548 * set the following bits for FC-TAPE support. 2549 */ 2550 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2551 npr->ConfmComplAllowed = 1; 2552 npr->Retry = 1; 2553 npr->TaskRetryIdReq = 1; 2554 } 2555 npr->estabImagePair = 1; 2556 npr->readXferRdyDis = 1; 2557 if (vport->cfg_first_burst_size) 2558 npr->writeXferRdyDis = 1; 2559 2560 /* For FCP support */ 2561 npr->prliType = PRLI_FCP_TYPE; 2562 npr->initiatorFunc = 1; 2563 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2564 2565 /* Remove FCP type - processed. */ 2566 local_nlp_type &= ~NLP_FC4_FCP; 2567 } else if (local_nlp_type & NLP_FC4_NVME) { 2568 /* Remainder of payload is NVME PRLI parameter page. 2569 * This data structure is the newer definition that 2570 * uses bf macros so a byte swap is required. 2571 */ 2572 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2573 pcmd += sizeof(uint32_t); 2574 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2575 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2576 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2577 if (phba->nsler) { 2578 bf_set(prli_nsler, npr_nvme, 1); 2579 bf_set(prli_conf, npr_nvme, 1); 2580 } 2581 2582 /* Only initiators request first burst. */ 2583 if ((phba->cfg_nvme_enable_fb) && 2584 !phba->nvmet_support) 2585 bf_set(prli_fba, npr_nvme, 1); 2586 2587 if (phba->nvmet_support) { 2588 bf_set(prli_tgt, npr_nvme, 1); 2589 bf_set(prli_disc, npr_nvme, 1); 2590 } else { 2591 bf_set(prli_init, npr_nvme, 1); 2592 bf_set(prli_conf, npr_nvme, 1); 2593 } 2594 2595 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2596 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2597 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2598 2599 /* Remove NVME type - processed. */ 2600 local_nlp_type &= ~NLP_FC4_NVME; 2601 } 2602 2603 phba->fc_stat.elsXmitPRLI++; 2604 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2605 2606 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2607 "Issue PRLI: did:x%x refcnt %d", 2608 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2609 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2610 if (!elsiocb->ndlp) { 2611 lpfc_els_free_iocb(phba, elsiocb); 2612 return 1; 2613 } 2614 2615 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2616 if (rc == IOCB_ERROR) { 2617 lpfc_els_free_iocb(phba, elsiocb); 2618 lpfc_nlp_put(ndlp); 2619 return 1; 2620 } 2621 2622 /* The vport counters are used for lpfc_scan_finished, but 2623 * the ndlp is used to track outstanding PRLIs for different 2624 * FC4 types. 2625 */ 2626 spin_lock_irq(&ndlp->lock); 2627 ndlp->nlp_flag |= NLP_PRLI_SND; 2628 vport->fc_prli_sent++; 2629 ndlp->fc4_prli_sent++; 2630 spin_unlock_irq(&ndlp->lock); 2631 2632 /* The driver supports 2 FC4 types. Make sure 2633 * a PRLI is issued for all types before exiting. 2634 */ 2635 if (phba->sli_rev == LPFC_SLI_REV4 && 2636 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2637 goto send_next_prli; 2638 else 2639 return 0; 2640 } 2641 2642 /** 2643 * lpfc_rscn_disc - Perform rscn discovery for a vport 2644 * @vport: pointer to a host virtual N_Port data structure. 2645 * 2646 * This routine performs Registration State Change Notification (RSCN) 2647 * discovery for a @vport. If the @vport's node port recovery count is not 2648 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2649 * the nodes that need recovery. If none of the PLOGI were needed through 2650 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2651 * invoked to check and handle possible more RSCN came in during the period 2652 * of processing the current ones. 2653 **/ 2654 static void 2655 lpfc_rscn_disc(struct lpfc_vport *vport) 2656 { 2657 lpfc_can_disctmo(vport); 2658 2659 /* RSCN discovery */ 2660 /* go thru NPR nodes and issue ELS PLOGIs */ 2661 if (vport->fc_npr_cnt) 2662 if (lpfc_els_disc_plogi(vport)) 2663 return; 2664 2665 lpfc_end_rscn(vport); 2666 } 2667 2668 /** 2669 * lpfc_adisc_done - Complete the adisc phase of discovery 2670 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2671 * 2672 * This function is called when the final ADISC is completed during discovery. 2673 * This function handles clearing link attention or issuing reg_vpi depending 2674 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2675 * discovery. 2676 * This function is called with no locks held. 2677 **/ 2678 static void 2679 lpfc_adisc_done(struct lpfc_vport *vport) 2680 { 2681 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2682 struct lpfc_hba *phba = vport->phba; 2683 2684 /* 2685 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2686 * and continue discovery. 2687 */ 2688 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2689 !(vport->fc_flag & FC_RSCN_MODE) && 2690 (phba->sli_rev < LPFC_SLI_REV4)) { 2691 2692 /* 2693 * If link is down, clear_la and reg_vpi will be done after 2694 * flogi following a link up event 2695 */ 2696 if (!lpfc_is_link_up(phba)) 2697 return; 2698 2699 /* The ADISCs are complete. Doesn't matter if they 2700 * succeeded or failed because the ADISC completion 2701 * routine guarantees to call the state machine and 2702 * the RPI is either unregistered (failed ADISC response) 2703 * or the RPI is still valid and the node is marked 2704 * mapped for a target. The exchanges should be in the 2705 * correct state. This code is specific to SLI3. 2706 */ 2707 lpfc_issue_clear_la(phba, vport); 2708 lpfc_issue_reg_vpi(phba, vport); 2709 return; 2710 } 2711 /* 2712 * For SLI2, we need to set port_state to READY 2713 * and continue discovery. 2714 */ 2715 if (vport->port_state < LPFC_VPORT_READY) { 2716 /* If we get here, there is nothing to ADISC */ 2717 lpfc_issue_clear_la(phba, vport); 2718 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2719 vport->num_disc_nodes = 0; 2720 /* go thru NPR list, issue ELS PLOGIs */ 2721 if (vport->fc_npr_cnt) 2722 lpfc_els_disc_plogi(vport); 2723 if (!vport->num_disc_nodes) { 2724 spin_lock_irq(shost->host_lock); 2725 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2726 spin_unlock_irq(shost->host_lock); 2727 lpfc_can_disctmo(vport); 2728 lpfc_end_rscn(vport); 2729 } 2730 } 2731 vport->port_state = LPFC_VPORT_READY; 2732 } else 2733 lpfc_rscn_disc(vport); 2734 } 2735 2736 /** 2737 * lpfc_more_adisc - Issue more adisc as needed 2738 * @vport: pointer to a host virtual N_Port data structure. 2739 * 2740 * This routine determines whether there are more ndlps on a @vport 2741 * node list need to have Address Discover (ADISC) issued. If so, it will 2742 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2743 * remaining nodes which need to have ADISC sent. 2744 **/ 2745 void 2746 lpfc_more_adisc(struct lpfc_vport *vport) 2747 { 2748 if (vport->num_disc_nodes) 2749 vport->num_disc_nodes--; 2750 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2751 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2752 "0210 Continue discovery with %d ADISCs to go " 2753 "Data: x%x x%x x%x\n", 2754 vport->num_disc_nodes, vport->fc_adisc_cnt, 2755 vport->fc_flag, vport->port_state); 2756 /* Check to see if there are more ADISCs to be sent */ 2757 if (vport->fc_flag & FC_NLP_MORE) { 2758 lpfc_set_disctmo(vport); 2759 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2760 lpfc_els_disc_adisc(vport); 2761 } 2762 if (!vport->num_disc_nodes) 2763 lpfc_adisc_done(vport); 2764 return; 2765 } 2766 2767 /** 2768 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2769 * @phba: pointer to lpfc hba data structure. 2770 * @cmdiocb: pointer to lpfc command iocb data structure. 2771 * @rspiocb: pointer to lpfc response iocb data structure. 2772 * 2773 * This routine is the completion function for issuing the Address Discover 2774 * (ADISC) command. It first checks to see whether link went down during 2775 * the discovery process. If so, the node will be marked as node port 2776 * recovery for issuing discover IOCB by the link attention handler and 2777 * exit. Otherwise, the response status is checked. If error was reported 2778 * in the response status, the ADISC command shall be retried by invoking 2779 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2780 * the response status, the state machine is invoked to set transition 2781 * with respect to NLP_EVT_CMPL_ADISC event. 2782 **/ 2783 static void 2784 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2785 struct lpfc_iocbq *rspiocb) 2786 { 2787 struct lpfc_vport *vport = cmdiocb->vport; 2788 IOCB_t *irsp; 2789 struct lpfc_nodelist *ndlp; 2790 int disc; 2791 u32 ulp_status, ulp_word4, tmo; 2792 bool release_node = false; 2793 2794 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2795 cmdiocb->rsp_iocb = rspiocb; 2796 2797 ndlp = cmdiocb->ndlp; 2798 2799 ulp_status = get_job_ulpstatus(phba, rspiocb); 2800 ulp_word4 = get_job_word4(phba, rspiocb); 2801 2802 if (phba->sli_rev == LPFC_SLI_REV4) { 2803 tmo = get_wqe_tmo(cmdiocb); 2804 } else { 2805 irsp = &rspiocb->iocb; 2806 tmo = irsp->ulpTimeout; 2807 } 2808 2809 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2810 "ADISC cmpl: status:x%x/x%x did:x%x", 2811 ulp_status, ulp_word4, 2812 ndlp->nlp_DID); 2813 2814 /* Since ndlp can be freed in the disc state machine, note if this node 2815 * is being used during discovery. 2816 */ 2817 spin_lock_irq(&ndlp->lock); 2818 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2819 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2820 spin_unlock_irq(&ndlp->lock); 2821 /* ADISC completes to NPort <nlp_DID> */ 2822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2823 "0104 ADISC completes to NPort x%x " 2824 "Data: x%x x%x x%x x%x x%x\n", 2825 ndlp->nlp_DID, ulp_status, ulp_word4, 2826 tmo, disc, vport->num_disc_nodes); 2827 /* Check to see if link went down during discovery */ 2828 if (lpfc_els_chk_latt(vport)) { 2829 spin_lock_irq(&ndlp->lock); 2830 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2831 spin_unlock_irq(&ndlp->lock); 2832 goto out; 2833 } 2834 2835 if (ulp_status) { 2836 /* Check for retry */ 2837 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2838 /* ELS command is being retried */ 2839 if (disc) { 2840 spin_lock_irq(&ndlp->lock); 2841 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2842 spin_unlock_irq(&ndlp->lock); 2843 lpfc_set_disctmo(vport); 2844 } 2845 goto out; 2846 } 2847 /* ADISC failed */ 2848 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2849 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2850 ndlp->nlp_DID, ulp_status, 2851 ulp_word4); 2852 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2853 NLP_EVT_CMPL_ADISC); 2854 2855 /* As long as this node is not registered with the SCSI or NVMe 2856 * transport, it is no longer an active node. Otherwise 2857 * devloss handles the final cleanup. 2858 */ 2859 spin_lock_irq(&ndlp->lock); 2860 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2861 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2862 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2863 release_node = true; 2864 } 2865 spin_unlock_irq(&ndlp->lock); 2866 2867 if (release_node) 2868 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2869 NLP_EVT_DEVICE_RM); 2870 } else 2871 /* Good status, call state machine */ 2872 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2873 NLP_EVT_CMPL_ADISC); 2874 2875 /* Check to see if there are more ADISCs to be sent */ 2876 if (disc && vport->num_disc_nodes) 2877 lpfc_more_adisc(vport); 2878 out: 2879 lpfc_els_free_iocb(phba, cmdiocb); 2880 lpfc_nlp_put(ndlp); 2881 return; 2882 } 2883 2884 /** 2885 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2886 * @vport: pointer to a virtual N_Port data structure. 2887 * @ndlp: pointer to a node-list data structure. 2888 * @retry: number of retries to the command IOCB. 2889 * 2890 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2891 * @vport. It prepares the payload of the ADISC ELS command, updates the 2892 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2893 * to issue the ADISC ELS command. 2894 * 2895 * Note that the ndlp reference count will be incremented by 1 for holding the 2896 * ndlp and the reference to ndlp will be stored into the ndlp field of 2897 * the IOCB for the completion callback function to the ADISC ELS command. 2898 * 2899 * Return code 2900 * 0 - successfully issued adisc 2901 * 1 - failed to issue adisc 2902 **/ 2903 int 2904 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2905 uint8_t retry) 2906 { 2907 int rc = 0; 2908 struct lpfc_hba *phba = vport->phba; 2909 ADISC *ap; 2910 struct lpfc_iocbq *elsiocb; 2911 uint8_t *pcmd; 2912 uint16_t cmdsize; 2913 2914 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2915 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2916 ndlp->nlp_DID, ELS_CMD_ADISC); 2917 if (!elsiocb) 2918 return 1; 2919 2920 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2921 2922 /* For ADISC request, remainder of payload is service parameters */ 2923 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2924 pcmd += sizeof(uint32_t); 2925 2926 /* Fill in ADISC payload */ 2927 ap = (ADISC *) pcmd; 2928 ap->hardAL_PA = phba->fc_pref_ALPA; 2929 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2930 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2931 ap->DID = be32_to_cpu(vport->fc_myDID); 2932 2933 phba->fc_stat.elsXmitADISC++; 2934 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2935 spin_lock_irq(&ndlp->lock); 2936 ndlp->nlp_flag |= NLP_ADISC_SND; 2937 spin_unlock_irq(&ndlp->lock); 2938 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2939 if (!elsiocb->ndlp) { 2940 lpfc_els_free_iocb(phba, elsiocb); 2941 goto err; 2942 } 2943 2944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2945 "Issue ADISC: did:x%x refcnt %d", 2946 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2947 2948 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2949 if (rc == IOCB_ERROR) { 2950 lpfc_els_free_iocb(phba, elsiocb); 2951 lpfc_nlp_put(ndlp); 2952 goto err; 2953 } 2954 2955 return 0; 2956 2957 err: 2958 spin_lock_irq(&ndlp->lock); 2959 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2960 spin_unlock_irq(&ndlp->lock); 2961 return 1; 2962 } 2963 2964 /** 2965 * lpfc_cmpl_els_logo - Completion callback function for logo 2966 * @phba: pointer to lpfc hba data structure. 2967 * @cmdiocb: pointer to lpfc command iocb data structure. 2968 * @rspiocb: pointer to lpfc response iocb data structure. 2969 * 2970 * This routine is the completion function for issuing the ELS Logout (LOGO) 2971 * command. If no error status was reported from the LOGO response, the 2972 * state machine of the associated ndlp shall be invoked for transition with 2973 * respect to NLP_EVT_CMPL_LOGO event. 2974 **/ 2975 static void 2976 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2977 struct lpfc_iocbq *rspiocb) 2978 { 2979 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2980 struct lpfc_vport *vport = ndlp->vport; 2981 IOCB_t *irsp; 2982 unsigned long flags; 2983 uint32_t skip_recovery = 0; 2984 int wake_up_waiter = 0; 2985 u32 ulp_status; 2986 u32 ulp_word4; 2987 u32 tmo; 2988 2989 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2990 cmdiocb->rsp_iocb = rspiocb; 2991 2992 ulp_status = get_job_ulpstatus(phba, rspiocb); 2993 ulp_word4 = get_job_word4(phba, rspiocb); 2994 2995 if (phba->sli_rev == LPFC_SLI_REV4) { 2996 tmo = get_wqe_tmo(cmdiocb); 2997 } else { 2998 irsp = &rspiocb->iocb; 2999 tmo = irsp->ulpTimeout; 3000 } 3001 3002 spin_lock_irq(&ndlp->lock); 3003 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3004 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 3005 wake_up_waiter = 1; 3006 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 3007 } 3008 spin_unlock_irq(&ndlp->lock); 3009 3010 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3011 "LOGO cmpl: status:x%x/x%x did:x%x", 3012 ulp_status, ulp_word4, 3013 ndlp->nlp_DID); 3014 3015 /* LOGO completes to NPort <nlp_DID> */ 3016 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3017 "0105 LOGO completes to NPort x%x " 3018 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 3019 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 3020 ulp_status, ulp_word4, 3021 tmo, vport->num_disc_nodes); 3022 3023 if (lpfc_els_chk_latt(vport)) { 3024 skip_recovery = 1; 3025 goto out; 3026 } 3027 3028 /* The LOGO will not be retried on failure. A LOGO was 3029 * issued to the remote rport and a ACC or RJT or no Answer are 3030 * all acceptable. Note the failure and move forward with 3031 * discovery. The PLOGI will retry. 3032 */ 3033 if (ulp_status) { 3034 /* LOGO failed */ 3035 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3036 "2756 LOGO failure, No Retry DID:%06X " 3037 "Status:x%x/x%x\n", 3038 ndlp->nlp_DID, ulp_status, 3039 ulp_word4); 3040 3041 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3042 skip_recovery = 1; 3043 } 3044 3045 /* Call state machine. This will unregister the rpi if needed. */ 3046 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3047 3048 if (skip_recovery) 3049 goto out; 3050 3051 /* The driver sets this flag for an NPIV instance that doesn't want to 3052 * log into the remote port. 3053 */ 3054 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3055 spin_lock_irq(&ndlp->lock); 3056 if (phba->sli_rev == LPFC_SLI_REV4) 3057 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3058 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3059 spin_unlock_irq(&ndlp->lock); 3060 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3061 NLP_EVT_DEVICE_RM); 3062 goto out_rsrc_free; 3063 } 3064 3065 out: 3066 /* At this point, the LOGO processing is complete. NOTE: For a 3067 * pt2pt topology, we are assuming the NPortID will only change 3068 * on link up processing. For a LOGO / PLOGI initiated by the 3069 * Initiator, we are assuming the NPortID is not going to change. 3070 */ 3071 3072 if (wake_up_waiter && ndlp->logo_waitq) 3073 wake_up(ndlp->logo_waitq); 3074 /* 3075 * If the node is a target, the handling attempts to recover the port. 3076 * For any other port type, the rpi is unregistered as an implicit 3077 * LOGO. 3078 */ 3079 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3080 skip_recovery == 0) { 3081 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3082 spin_lock_irqsave(&ndlp->lock, flags); 3083 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3084 spin_unlock_irqrestore(&ndlp->lock, flags); 3085 3086 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3087 "3187 LOGO completes to NPort x%x: Start " 3088 "Recovery Data: x%x x%x x%x x%x\n", 3089 ndlp->nlp_DID, ulp_status, 3090 ulp_word4, tmo, 3091 vport->num_disc_nodes); 3092 3093 lpfc_els_free_iocb(phba, cmdiocb); 3094 lpfc_nlp_put(ndlp); 3095 3096 lpfc_disc_start(vport); 3097 return; 3098 } 3099 3100 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3101 * driver sends a LOGO to the rport to cleanup. For fabric and 3102 * initiator ports cleanup the node as long as it the node is not 3103 * register with the transport. 3104 */ 3105 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3106 spin_lock_irq(&ndlp->lock); 3107 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3108 spin_unlock_irq(&ndlp->lock); 3109 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3110 NLP_EVT_DEVICE_RM); 3111 } 3112 out_rsrc_free: 3113 /* Driver is done with the I/O. */ 3114 lpfc_els_free_iocb(phba, cmdiocb); 3115 lpfc_nlp_put(ndlp); 3116 } 3117 3118 /** 3119 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3120 * @vport: pointer to a virtual N_Port data structure. 3121 * @ndlp: pointer to a node-list data structure. 3122 * @retry: number of retries to the command IOCB. 3123 * 3124 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3125 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3126 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3127 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3128 * 3129 * Note that the ndlp reference count will be incremented by 1 for holding the 3130 * ndlp and the reference to ndlp will be stored into the ndlp field of 3131 * the IOCB for the completion callback function to the LOGO ELS command. 3132 * 3133 * Callers of this routine are expected to unregister the RPI first 3134 * 3135 * Return code 3136 * 0 - successfully issued logo 3137 * 1 - failed to issue logo 3138 **/ 3139 int 3140 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3141 uint8_t retry) 3142 { 3143 struct lpfc_hba *phba = vport->phba; 3144 struct lpfc_iocbq *elsiocb; 3145 uint8_t *pcmd; 3146 uint16_t cmdsize; 3147 int rc; 3148 3149 spin_lock_irq(&ndlp->lock); 3150 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3151 spin_unlock_irq(&ndlp->lock); 3152 return 0; 3153 } 3154 spin_unlock_irq(&ndlp->lock); 3155 3156 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3157 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3158 ndlp->nlp_DID, ELS_CMD_LOGO); 3159 if (!elsiocb) 3160 return 1; 3161 3162 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3163 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3164 pcmd += sizeof(uint32_t); 3165 3166 /* Fill in LOGO payload */ 3167 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3168 pcmd += sizeof(uint32_t); 3169 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3170 3171 phba->fc_stat.elsXmitLOGO++; 3172 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3173 spin_lock_irq(&ndlp->lock); 3174 ndlp->nlp_flag |= NLP_LOGO_SND; 3175 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3176 spin_unlock_irq(&ndlp->lock); 3177 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3178 if (!elsiocb->ndlp) { 3179 lpfc_els_free_iocb(phba, elsiocb); 3180 goto err; 3181 } 3182 3183 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3184 "Issue LOGO: did:x%x refcnt %d", 3185 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3186 3187 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3188 if (rc == IOCB_ERROR) { 3189 lpfc_els_free_iocb(phba, elsiocb); 3190 lpfc_nlp_put(ndlp); 3191 goto err; 3192 } 3193 3194 spin_lock_irq(&ndlp->lock); 3195 ndlp->nlp_prev_state = ndlp->nlp_state; 3196 spin_unlock_irq(&ndlp->lock); 3197 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3198 return 0; 3199 3200 err: 3201 spin_lock_irq(&ndlp->lock); 3202 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3203 spin_unlock_irq(&ndlp->lock); 3204 return 1; 3205 } 3206 3207 /** 3208 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3209 * @phba: pointer to lpfc hba data structure. 3210 * @cmdiocb: pointer to lpfc command iocb data structure. 3211 * @rspiocb: pointer to lpfc response iocb data structure. 3212 * 3213 * This routine is a generic completion callback function for ELS commands. 3214 * Specifically, it is the callback function which does not need to perform 3215 * any command specific operations. It is currently used by the ELS command 3216 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3217 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3218 * Other than certain debug loggings, this callback function simply invokes the 3219 * lpfc_els_chk_latt() routine to check whether link went down during the 3220 * discovery process. 3221 **/ 3222 static void 3223 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3224 struct lpfc_iocbq *rspiocb) 3225 { 3226 struct lpfc_vport *vport = cmdiocb->vport; 3227 struct lpfc_nodelist *free_ndlp; 3228 IOCB_t *irsp; 3229 u32 ulp_status, ulp_word4, tmo, did, iotag; 3230 3231 ulp_status = get_job_ulpstatus(phba, rspiocb); 3232 ulp_word4 = get_job_word4(phba, rspiocb); 3233 did = get_job_els_rsp64_did(phba, cmdiocb); 3234 3235 if (phba->sli_rev == LPFC_SLI_REV4) { 3236 tmo = get_wqe_tmo(cmdiocb); 3237 iotag = get_wqe_reqtag(cmdiocb); 3238 } else { 3239 irsp = &rspiocb->iocb; 3240 tmo = irsp->ulpTimeout; 3241 iotag = irsp->ulpIoTag; 3242 } 3243 3244 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3245 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3246 ulp_status, ulp_word4, did); 3247 3248 /* ELS cmd tag <ulpIoTag> completes */ 3249 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3250 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3251 iotag, ulp_status, ulp_word4, tmo); 3252 3253 /* Check to see if link went down during discovery */ 3254 lpfc_els_chk_latt(vport); 3255 3256 free_ndlp = cmdiocb->ndlp; 3257 3258 lpfc_els_free_iocb(phba, cmdiocb); 3259 lpfc_nlp_put(free_ndlp); 3260 } 3261 3262 /** 3263 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3264 * @vport: pointer to lpfc_vport data structure. 3265 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3266 * 3267 * This routine registers the rpi assigned to the fabric controller 3268 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3269 * state triggering a registration with the SCSI transport. 3270 * 3271 * This routine is single out because the fabric controller node 3272 * does not receive a PLOGI. This routine is consumed by the 3273 * SCR and RDF ELS commands. Callers are expected to qualify 3274 * with SLI4 first. 3275 **/ 3276 static int 3277 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3278 { 3279 int rc = 0; 3280 struct lpfc_hba *phba = vport->phba; 3281 struct lpfc_nodelist *ns_ndlp; 3282 LPFC_MBOXQ_t *mbox; 3283 3284 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3285 return rc; 3286 3287 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3288 if (!ns_ndlp) 3289 return -ENODEV; 3290 3291 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3292 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3293 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3294 ns_ndlp->nlp_state); 3295 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3296 return -ENODEV; 3297 3298 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3299 if (!mbox) { 3300 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3301 "0936 %s: no memory for reg_login " 3302 "Data: x%x x%x x%x x%x\n", __func__, 3303 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3304 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3305 return -ENOMEM; 3306 } 3307 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3308 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3309 if (rc) { 3310 rc = -EACCES; 3311 goto out; 3312 } 3313 3314 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3315 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3316 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3317 if (!mbox->ctx_ndlp) { 3318 rc = -ENOMEM; 3319 goto out; 3320 } 3321 3322 mbox->vport = vport; 3323 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3324 if (rc == MBX_NOT_FINISHED) { 3325 rc = -ENODEV; 3326 lpfc_nlp_put(fc_ndlp); 3327 goto out; 3328 } 3329 /* Success path. Exit. */ 3330 lpfc_nlp_set_state(vport, fc_ndlp, 3331 NLP_STE_REG_LOGIN_ISSUE); 3332 return 0; 3333 3334 out: 3335 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3336 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3337 "0938 %s: failed to format reg_login " 3338 "Data: x%x x%x x%x x%x\n", __func__, 3339 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3340 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3341 return rc; 3342 } 3343 3344 /** 3345 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3346 * @phba: pointer to lpfc hba data structure. 3347 * @cmdiocb: pointer to lpfc command iocb data structure. 3348 * @rspiocb: pointer to lpfc response iocb data structure. 3349 * 3350 * This routine is a generic completion callback function for Discovery ELS cmd. 3351 * Currently used by the ELS command issuing routines for the ELS State Change 3352 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3353 * These commands will be retried once only for ELS timeout errors. 3354 **/ 3355 static void 3356 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3357 struct lpfc_iocbq *rspiocb) 3358 { 3359 struct lpfc_vport *vport = cmdiocb->vport; 3360 IOCB_t *irsp; 3361 struct lpfc_els_rdf_rsp *prdf; 3362 struct lpfc_dmabuf *pcmd, *prsp; 3363 u32 *pdata; 3364 u32 cmd; 3365 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3366 u32 ulp_status, ulp_word4, tmo, did, iotag; 3367 3368 ulp_status = get_job_ulpstatus(phba, rspiocb); 3369 ulp_word4 = get_job_word4(phba, rspiocb); 3370 did = get_job_els_rsp64_did(phba, cmdiocb); 3371 3372 if (phba->sli_rev == LPFC_SLI_REV4) { 3373 tmo = get_wqe_tmo(cmdiocb); 3374 iotag = get_wqe_reqtag(cmdiocb); 3375 } else { 3376 irsp = &rspiocb->iocb; 3377 tmo = irsp->ulpTimeout; 3378 iotag = irsp->ulpIoTag; 3379 } 3380 3381 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3382 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3383 ulp_status, ulp_word4, did); 3384 3385 /* ELS cmd tag <ulpIoTag> completes */ 3386 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3387 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3388 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3389 3390 pcmd = cmdiocb->cmd_dmabuf; 3391 if (!pcmd) 3392 goto out; 3393 3394 pdata = (u32 *)pcmd->virt; 3395 if (!pdata) 3396 goto out; 3397 cmd = *pdata; 3398 3399 /* Only 1 retry for ELS Timeout only */ 3400 if (ulp_status == IOSTAT_LOCAL_REJECT && 3401 ((ulp_word4 & IOERR_PARAM_MASK) == 3402 IOERR_SEQUENCE_TIMEOUT)) { 3403 cmdiocb->retry++; 3404 if (cmdiocb->retry <= 1) { 3405 switch (cmd) { 3406 case ELS_CMD_SCR: 3407 lpfc_issue_els_scr(vport, cmdiocb->retry); 3408 break; 3409 case ELS_CMD_EDC: 3410 lpfc_issue_els_edc(vport, cmdiocb->retry); 3411 break; 3412 case ELS_CMD_RDF: 3413 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3414 break; 3415 } 3416 goto out; 3417 } 3418 phba->fc_stat.elsRetryExceeded++; 3419 } 3420 if (cmd == ELS_CMD_EDC) { 3421 /* must be called before checking uplStatus and returning */ 3422 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3423 return; 3424 } 3425 if (ulp_status) { 3426 /* ELS discovery cmd completes with error */ 3427 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3428 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3429 ulp_status, ulp_word4); 3430 goto out; 3431 } 3432 3433 /* The RDF response doesn't have any impact on the running driver 3434 * but the notification descriptors are dumped here for support. 3435 */ 3436 if (cmd == ELS_CMD_RDF) { 3437 int i; 3438 3439 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3440 if (!prsp) 3441 goto out; 3442 3443 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3444 if (!prdf) 3445 goto out; 3446 3447 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3448 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3449 lpfc_printf_vlog(vport, KERN_INFO, 3450 LOG_ELS | LOG_CGN_MGMT, 3451 "4677 Fabric RDF Notification Grant " 3452 "Data: 0x%08x Reg: %x %x\n", 3453 be32_to_cpu( 3454 prdf->reg_d1.desc_tags[i]), 3455 phba->cgn_reg_signal, 3456 phba->cgn_reg_fpin); 3457 } 3458 3459 out: 3460 /* Check to see if link went down during discovery */ 3461 lpfc_els_chk_latt(vport); 3462 lpfc_els_free_iocb(phba, cmdiocb); 3463 lpfc_nlp_put(ndlp); 3464 return; 3465 } 3466 3467 /** 3468 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3469 * @vport: pointer to a host virtual N_Port data structure. 3470 * @retry: retry counter for the command IOCB. 3471 * 3472 * This routine issues a State Change Request (SCR) to a fabric node 3473 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3474 * first search the @vport node list to find the matching ndlp. If no such 3475 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3476 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3477 * routine is invoked to send the SCR IOCB. 3478 * 3479 * Note that the ndlp reference count will be incremented by 1 for holding the 3480 * ndlp and the reference to ndlp will be stored into the ndlp field of 3481 * the IOCB for the completion callback function to the SCR ELS command. 3482 * 3483 * Return code 3484 * 0 - Successfully issued scr command 3485 * 1 - Failed to issue scr command 3486 **/ 3487 int 3488 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3489 { 3490 int rc = 0; 3491 struct lpfc_hba *phba = vport->phba; 3492 struct lpfc_iocbq *elsiocb; 3493 uint8_t *pcmd; 3494 uint16_t cmdsize; 3495 struct lpfc_nodelist *ndlp; 3496 3497 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3498 3499 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3500 if (!ndlp) { 3501 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3502 if (!ndlp) 3503 return 1; 3504 lpfc_enqueue_node(vport, ndlp); 3505 } 3506 3507 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3508 ndlp->nlp_DID, ELS_CMD_SCR); 3509 if (!elsiocb) 3510 return 1; 3511 3512 if (phba->sli_rev == LPFC_SLI_REV4) { 3513 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3514 if (rc) { 3515 lpfc_els_free_iocb(phba, elsiocb); 3516 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3517 "0937 %s: Failed to reg fc node, rc %d\n", 3518 __func__, rc); 3519 return 1; 3520 } 3521 } 3522 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3523 3524 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3525 pcmd += sizeof(uint32_t); 3526 3527 /* For SCR, remainder of payload is SCR parameter page */ 3528 memset(pcmd, 0, sizeof(SCR)); 3529 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3530 3531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3532 "Issue SCR: did:x%x", 3533 ndlp->nlp_DID, 0, 0); 3534 3535 phba->fc_stat.elsXmitSCR++; 3536 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3537 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3538 if (!elsiocb->ndlp) { 3539 lpfc_els_free_iocb(phba, elsiocb); 3540 return 1; 3541 } 3542 3543 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3544 "Issue SCR: did:x%x refcnt %d", 3545 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3546 3547 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3548 if (rc == IOCB_ERROR) { 3549 lpfc_els_free_iocb(phba, elsiocb); 3550 lpfc_nlp_put(ndlp); 3551 return 1; 3552 } 3553 3554 return 0; 3555 } 3556 3557 /** 3558 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3559 * or the other nport (pt2pt). 3560 * @vport: pointer to a host virtual N_Port data structure. 3561 * @retry: number of retries to the command IOCB. 3562 * 3563 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3564 * when connected to a fabric, or to the remote port when connected 3565 * in point-to-point mode. When sent to the Fabric Controller, it will 3566 * replay the RSCN to registered recipients. 3567 * 3568 * Note that the ndlp reference count will be incremented by 1 for holding the 3569 * ndlp and the reference to ndlp will be stored into the ndlp field of 3570 * the IOCB for the completion callback function to the RSCN ELS command. 3571 * 3572 * Return code 3573 * 0 - Successfully issued RSCN command 3574 * 1 - Failed to issue RSCN command 3575 **/ 3576 int 3577 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3578 { 3579 int rc = 0; 3580 struct lpfc_hba *phba = vport->phba; 3581 struct lpfc_iocbq *elsiocb; 3582 struct lpfc_nodelist *ndlp; 3583 struct { 3584 struct fc_els_rscn rscn; 3585 struct fc_els_rscn_page portid; 3586 } *event; 3587 uint32_t nportid; 3588 uint16_t cmdsize = sizeof(*event); 3589 3590 /* Not supported for private loop */ 3591 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3592 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3593 return 1; 3594 3595 if (vport->fc_flag & FC_PT2PT) { 3596 /* find any mapped nport - that would be the other nport */ 3597 ndlp = lpfc_findnode_mapped(vport); 3598 if (!ndlp) 3599 return 1; 3600 } else { 3601 nportid = FC_FID_FCTRL; 3602 /* find the fabric controller node */ 3603 ndlp = lpfc_findnode_did(vport, nportid); 3604 if (!ndlp) { 3605 /* if one didn't exist, make one */ 3606 ndlp = lpfc_nlp_init(vport, nportid); 3607 if (!ndlp) 3608 return 1; 3609 lpfc_enqueue_node(vport, ndlp); 3610 } 3611 } 3612 3613 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3614 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3615 3616 if (!elsiocb) 3617 return 1; 3618 3619 event = elsiocb->cmd_dmabuf->virt; 3620 3621 event->rscn.rscn_cmd = ELS_RSCN; 3622 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3623 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3624 3625 nportid = vport->fc_myDID; 3626 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3627 event->portid.rscn_page_flags = 0; 3628 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3629 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3630 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3631 3632 phba->fc_stat.elsXmitRSCN++; 3633 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3634 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3635 if (!elsiocb->ndlp) { 3636 lpfc_els_free_iocb(phba, elsiocb); 3637 return 1; 3638 } 3639 3640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3641 "Issue RSCN: did:x%x", 3642 ndlp->nlp_DID, 0, 0); 3643 3644 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3645 if (rc == IOCB_ERROR) { 3646 lpfc_els_free_iocb(phba, elsiocb); 3647 lpfc_nlp_put(ndlp); 3648 return 1; 3649 } 3650 3651 return 0; 3652 } 3653 3654 /** 3655 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3656 * @vport: pointer to a host virtual N_Port data structure. 3657 * @nportid: N_Port identifier to the remote node. 3658 * @retry: number of retries to the command IOCB. 3659 * 3660 * This routine issues a Fibre Channel Address Resolution Response 3661 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3662 * is passed into the function. It first search the @vport node list to find 3663 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3664 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3665 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3666 * 3667 * Note that the ndlp reference count will be incremented by 1 for holding the 3668 * ndlp and the reference to ndlp will be stored into the ndlp field of 3669 * the IOCB for the completion callback function to the FARPR ELS command. 3670 * 3671 * Return code 3672 * 0 - Successfully issued farpr command 3673 * 1 - Failed to issue farpr command 3674 **/ 3675 static int 3676 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3677 { 3678 int rc = 0; 3679 struct lpfc_hba *phba = vport->phba; 3680 struct lpfc_iocbq *elsiocb; 3681 FARP *fp; 3682 uint8_t *pcmd; 3683 uint32_t *lp; 3684 uint16_t cmdsize; 3685 struct lpfc_nodelist *ondlp; 3686 struct lpfc_nodelist *ndlp; 3687 3688 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3689 3690 ndlp = lpfc_findnode_did(vport, nportid); 3691 if (!ndlp) { 3692 ndlp = lpfc_nlp_init(vport, nportid); 3693 if (!ndlp) 3694 return 1; 3695 lpfc_enqueue_node(vport, ndlp); 3696 } 3697 3698 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3699 ndlp->nlp_DID, ELS_CMD_FARPR); 3700 if (!elsiocb) 3701 return 1; 3702 3703 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3704 3705 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3706 pcmd += sizeof(uint32_t); 3707 3708 /* Fill in FARPR payload */ 3709 fp = (FARP *) (pcmd); 3710 memset(fp, 0, sizeof(FARP)); 3711 lp = (uint32_t *) pcmd; 3712 *lp++ = be32_to_cpu(nportid); 3713 *lp++ = be32_to_cpu(vport->fc_myDID); 3714 fp->Rflags = 0; 3715 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3716 3717 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3718 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3719 ondlp = lpfc_findnode_did(vport, nportid); 3720 if (ondlp) { 3721 memcpy(&fp->OportName, &ondlp->nlp_portname, 3722 sizeof(struct lpfc_name)); 3723 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3724 sizeof(struct lpfc_name)); 3725 } 3726 3727 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3728 "Issue FARPR: did:x%x", 3729 ndlp->nlp_DID, 0, 0); 3730 3731 phba->fc_stat.elsXmitFARPR++; 3732 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3733 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3734 if (!elsiocb->ndlp) { 3735 lpfc_els_free_iocb(phba, elsiocb); 3736 return 1; 3737 } 3738 3739 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3740 if (rc == IOCB_ERROR) { 3741 /* The additional lpfc_nlp_put will cause the following 3742 * lpfc_els_free_iocb routine to trigger the release of 3743 * the node. 3744 */ 3745 lpfc_els_free_iocb(phba, elsiocb); 3746 lpfc_nlp_put(ndlp); 3747 return 1; 3748 } 3749 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3750 * trigger the release of the node. 3751 */ 3752 /* Don't release reference count as RDF is likely outstanding */ 3753 return 0; 3754 } 3755 3756 /** 3757 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3758 * @vport: pointer to a host virtual N_Port data structure. 3759 * @retry: retry counter for the command IOCB. 3760 * 3761 * This routine issues an ELS RDF to the Fabric Controller to register 3762 * for diagnostic functions. 3763 * 3764 * Note that the ndlp reference count will be incremented by 1 for holding the 3765 * ndlp and the reference to ndlp will be stored into the ndlp field of 3766 * the IOCB for the completion callback function to the RDF ELS command. 3767 * 3768 * Return code 3769 * 0 - Successfully issued rdf command 3770 * 1 - Failed to issue rdf command 3771 **/ 3772 int 3773 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3774 { 3775 struct lpfc_hba *phba = vport->phba; 3776 struct lpfc_iocbq *elsiocb; 3777 struct lpfc_els_rdf_req *prdf; 3778 struct lpfc_nodelist *ndlp; 3779 uint16_t cmdsize; 3780 int rc; 3781 3782 cmdsize = sizeof(*prdf); 3783 3784 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3785 if (!ndlp) { 3786 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3787 if (!ndlp) 3788 return -ENODEV; 3789 lpfc_enqueue_node(vport, ndlp); 3790 } 3791 3792 /* RDF ELS is not required on an NPIV VN_Port. */ 3793 if (vport->port_type == LPFC_NPIV_PORT) 3794 return -EACCES; 3795 3796 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3797 ndlp->nlp_DID, ELS_CMD_RDF); 3798 if (!elsiocb) 3799 return -ENOMEM; 3800 3801 /* Configure the payload for the supported FPIN events. */ 3802 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3803 memset(prdf, 0, cmdsize); 3804 prdf->rdf.fpin_cmd = ELS_RDF; 3805 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3806 sizeof(struct fc_els_rdf)); 3807 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3808 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3809 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3810 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3811 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3812 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3813 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3814 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3815 3816 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3817 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3818 ndlp->nlp_DID, phba->cgn_reg_signal, 3819 phba->cgn_reg_fpin); 3820 3821 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3822 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3823 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3824 if (!elsiocb->ndlp) { 3825 lpfc_els_free_iocb(phba, elsiocb); 3826 return -EIO; 3827 } 3828 3829 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3830 "Issue RDF: did:x%x refcnt %d", 3831 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3832 3833 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3834 if (rc == IOCB_ERROR) { 3835 lpfc_els_free_iocb(phba, elsiocb); 3836 lpfc_nlp_put(ndlp); 3837 return -EIO; 3838 } 3839 return 0; 3840 } 3841 3842 /** 3843 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3844 * @vport: pointer to a host virtual N_Port data structure. 3845 * @cmdiocb: pointer to lpfc command iocb data structure. 3846 * @ndlp: pointer to a node-list data structure. 3847 * 3848 * A received RDF implies a possible change to fabric supported diagnostic 3849 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3850 * RDF request to reregister for supported diagnostic functions. 3851 * 3852 * Return code 3853 * 0 - Success 3854 * -EIO - Failed to process received RDF 3855 **/ 3856 static int 3857 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3858 struct lpfc_nodelist *ndlp) 3859 { 3860 /* Send LS_ACC */ 3861 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3862 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3863 "1623 Failed to RDF_ACC from x%x for x%x\n", 3864 ndlp->nlp_DID, vport->fc_myDID); 3865 return -EIO; 3866 } 3867 3868 /* Issue new RDF for reregistering */ 3869 if (lpfc_issue_els_rdf(vport, 0)) { 3870 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3871 "2623 Failed to re register RDF for x%x\n", 3872 vport->fc_myDID); 3873 return -EIO; 3874 } 3875 3876 return 0; 3877 } 3878 3879 /** 3880 * lpfc_least_capable_settings - helper function for EDC rsp processing 3881 * @phba: pointer to lpfc hba data structure. 3882 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3883 * 3884 * This helper routine determines the least capable setting for 3885 * congestion signals, signal freq, including scale, from the 3886 * congestion detection descriptor in the EDC rsp. The routine 3887 * sets @phba values in preparation for a set_featues mailbox. 3888 **/ 3889 static void 3890 lpfc_least_capable_settings(struct lpfc_hba *phba, 3891 struct fc_diag_cg_sig_desc *pcgd) 3892 { 3893 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3894 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3895 3896 /* Get rsp signal and frequency capabilities. */ 3897 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3898 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3899 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3900 3901 /* If the Fport does not support signals. Set FPIN only */ 3902 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3903 goto out_no_support; 3904 3905 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3906 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3907 * to milliSeconds. 3908 */ 3909 switch (rsp_sig_freq_scale) { 3910 case EDC_CG_SIGFREQ_SEC: 3911 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3912 break; 3913 case EDC_CG_SIGFREQ_MSEC: 3914 rsp_sig_freq_cyc = 1; 3915 break; 3916 default: 3917 goto out_no_support; 3918 } 3919 3920 /* Convenient shorthand. */ 3921 drv_sig_cap = phba->cgn_reg_signal; 3922 3923 /* Choose the least capable frequency. */ 3924 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3925 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3926 3927 /* Should be some common signals support. Settle on least capable 3928 * signal and adjust FPIN values. Initialize defaults to ease the 3929 * decision. 3930 */ 3931 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3932 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3933 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3934 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3935 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3936 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3937 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3938 } 3939 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3940 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3941 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3942 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3943 } 3944 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3945 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3946 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3947 } 3948 } 3949 3950 /* We are NOT recording signal frequency in congestion info buffer */ 3951 return; 3952 3953 out_no_support: 3954 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3955 phba->cgn_sig_freq = 0; 3956 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3957 } 3958 3959 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3960 FC_LS_TLV_DTAG_INIT); 3961 3962 /** 3963 * lpfc_cmpl_els_edc - Completion callback function for EDC 3964 * @phba: pointer to lpfc hba data structure. 3965 * @cmdiocb: pointer to lpfc command iocb data structure. 3966 * @rspiocb: pointer to lpfc response iocb data structure. 3967 * 3968 * This routine is the completion callback function for issuing the Exchange 3969 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3970 * notify the FPort of its Congestion and Link Fault capabilities. This 3971 * routine parses the FPort's response and decides on the least common 3972 * values applicable to both FPort and NPort for Warnings and Alarms that 3973 * are communicated via hardware signals. 3974 **/ 3975 static void 3976 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3977 struct lpfc_iocbq *rspiocb) 3978 { 3979 IOCB_t *irsp_iocb; 3980 struct fc_els_edc_resp *edc_rsp; 3981 struct fc_tlv_desc *tlv; 3982 struct fc_diag_cg_sig_desc *pcgd; 3983 struct fc_diag_lnkflt_desc *plnkflt; 3984 struct lpfc_dmabuf *pcmd, *prsp; 3985 const char *dtag_nm; 3986 u32 *pdata, dtag; 3987 int desc_cnt = 0, bytes_remain; 3988 bool rcv_cap_desc = false; 3989 struct lpfc_nodelist *ndlp; 3990 u32 ulp_status, ulp_word4, tmo, did, iotag; 3991 3992 ndlp = cmdiocb->ndlp; 3993 3994 ulp_status = get_job_ulpstatus(phba, rspiocb); 3995 ulp_word4 = get_job_word4(phba, rspiocb); 3996 did = get_job_els_rsp64_did(phba, rspiocb); 3997 3998 if (phba->sli_rev == LPFC_SLI_REV4) { 3999 tmo = get_wqe_tmo(rspiocb); 4000 iotag = get_wqe_reqtag(rspiocb); 4001 } else { 4002 irsp_iocb = &rspiocb->iocb; 4003 tmo = irsp_iocb->ulpTimeout; 4004 iotag = irsp_iocb->ulpIoTag; 4005 } 4006 4007 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4008 "EDC cmpl: status:x%x/x%x did:x%x", 4009 ulp_status, ulp_word4, did); 4010 4011 /* ELS cmd tag <ulpIoTag> completes */ 4012 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4013 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 4014 iotag, ulp_status, ulp_word4, tmo); 4015 4016 pcmd = cmdiocb->cmd_dmabuf; 4017 if (!pcmd) 4018 goto out; 4019 4020 pdata = (u32 *)pcmd->virt; 4021 if (!pdata) 4022 goto out; 4023 4024 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4025 if (ulp_status) 4026 goto out; 4027 4028 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4029 if (!prsp) 4030 goto out; 4031 4032 edc_rsp = prsp->virt; 4033 if (!edc_rsp) 4034 goto out; 4035 4036 /* ELS cmd tag <ulpIoTag> completes */ 4037 lpfc_printf_log(phba, KERN_INFO, 4038 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4039 "4676 Fabric EDC Rsp: " 4040 "0x%02x, 0x%08x\n", 4041 edc_rsp->acc_hdr.la_cmd, 4042 be32_to_cpu(edc_rsp->desc_list_len)); 4043 4044 /* 4045 * Payload length in bytes is the response descriptor list 4046 * length minus the 12 bytes of Link Service Request 4047 * Information descriptor in the reply. 4048 */ 4049 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4050 sizeof(struct fc_els_lsri_desc); 4051 if (bytes_remain <= 0) 4052 goto out; 4053 4054 tlv = edc_rsp->desc; 4055 4056 /* 4057 * cycle through EDC diagnostic descriptors to find the 4058 * congestion signaling capability descriptor 4059 */ 4060 while (bytes_remain) { 4061 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4062 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4063 "6461 Truncated TLV hdr on " 4064 "Diagnostic descriptor[%d]\n", 4065 desc_cnt); 4066 goto out; 4067 } 4068 4069 dtag = be32_to_cpu(tlv->desc_tag); 4070 switch (dtag) { 4071 case ELS_DTAG_LNK_FAULT_CAP: 4072 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4073 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4074 sizeof(struct fc_diag_lnkflt_desc)) { 4075 lpfc_printf_log(phba, KERN_WARNING, 4076 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4077 "6462 Truncated Link Fault Diagnostic " 4078 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4079 desc_cnt, bytes_remain, 4080 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4081 sizeof(struct fc_diag_lnkflt_desc)); 4082 goto out; 4083 } 4084 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4085 lpfc_printf_log(phba, KERN_INFO, 4086 LOG_ELS | LOG_LDS_EVENT, 4087 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4088 "0x%08x 0x%08x 0x%08x\n", 4089 be32_to_cpu(plnkflt->desc_tag), 4090 be32_to_cpu(plnkflt->desc_len), 4091 be32_to_cpu( 4092 plnkflt->degrade_activate_threshold), 4093 be32_to_cpu( 4094 plnkflt->degrade_deactivate_threshold), 4095 be32_to_cpu(plnkflt->fec_degrade_interval)); 4096 break; 4097 case ELS_DTAG_CG_SIGNAL_CAP: 4098 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4099 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4100 sizeof(struct fc_diag_cg_sig_desc)) { 4101 lpfc_printf_log( 4102 phba, KERN_WARNING, LOG_CGN_MGMT, 4103 "6463 Truncated Cgn Signal Diagnostic " 4104 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4105 desc_cnt, bytes_remain, 4106 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4107 sizeof(struct fc_diag_cg_sig_desc)); 4108 goto out; 4109 } 4110 4111 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4112 lpfc_printf_log( 4113 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4114 "4616 CGN Desc Data: 0x%08x 0x%08x " 4115 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4116 be32_to_cpu(pcgd->desc_tag), 4117 be32_to_cpu(pcgd->desc_len), 4118 be32_to_cpu(pcgd->xmt_signal_capability), 4119 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4120 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4121 be32_to_cpu(pcgd->rcv_signal_capability), 4122 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4123 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4124 4125 /* Compare driver and Fport capabilities and choose 4126 * least common. 4127 */ 4128 lpfc_least_capable_settings(phba, pcgd); 4129 rcv_cap_desc = true; 4130 break; 4131 default: 4132 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4133 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4134 "4919 unknown Diagnostic " 4135 "Descriptor[%d]: tag x%x (%s)\n", 4136 desc_cnt, dtag, dtag_nm); 4137 } 4138 4139 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4140 tlv = fc_tlv_next_desc(tlv); 4141 desc_cnt++; 4142 } 4143 4144 out: 4145 if (!rcv_cap_desc) { 4146 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4147 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4148 phba->cgn_sig_freq = 0; 4149 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4150 "4202 EDC rsp error - sending RDF " 4151 "for FPIN only.\n"); 4152 } 4153 4154 lpfc_config_cgn_signal(phba); 4155 4156 /* Check to see if link went down during discovery */ 4157 lpfc_els_chk_latt(phba->pport); 4158 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4159 "EDC Cmpl: did:x%x refcnt %d", 4160 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4161 lpfc_els_free_iocb(phba, cmdiocb); 4162 lpfc_nlp_put(ndlp); 4163 } 4164 4165 static void 4166 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4167 { 4168 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4169 4170 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4171 lft->desc_len = cpu_to_be32( 4172 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4173 4174 lft->degrade_activate_threshold = 4175 cpu_to_be32(phba->degrade_activate_threshold); 4176 lft->degrade_deactivate_threshold = 4177 cpu_to_be32(phba->degrade_deactivate_threshold); 4178 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4179 } 4180 4181 static void 4182 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4183 { 4184 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4185 4186 /* We are assuming cgd was zero'ed before calling this routine */ 4187 4188 /* Configure the congestion detection capability */ 4189 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4190 4191 /* Descriptor len doesn't include the tag or len fields. */ 4192 cgd->desc_len = cpu_to_be32( 4193 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4194 4195 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4196 * xmt_signal_frequency.count already set to 0. 4197 * xmt_signal_frequency.units already set to 0. 4198 */ 4199 4200 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4201 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4202 * rcv_signal_frequency.count already set to 0. 4203 * rcv_signal_frequency.units already set to 0. 4204 */ 4205 phba->cgn_sig_freq = 0; 4206 return; 4207 } 4208 switch (phba->cgn_reg_signal) { 4209 case EDC_CG_SIG_WARN_ONLY: 4210 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4211 break; 4212 case EDC_CG_SIG_WARN_ALARM: 4213 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4214 break; 4215 default: 4216 /* rcv_signal_capability left 0 thus no support */ 4217 break; 4218 } 4219 4220 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4221 * the completion we settle on the higher frequency. 4222 */ 4223 cgd->rcv_signal_frequency.count = 4224 cpu_to_be16(lpfc_fabric_cgn_frequency); 4225 cgd->rcv_signal_frequency.units = 4226 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4227 } 4228 4229 static bool 4230 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4231 { 4232 if (!(phba->lmt & LMT_64Gb)) 4233 return false; 4234 if (phba->sli_rev != LPFC_SLI_REV4) 4235 return false; 4236 4237 if (phba->sli4_hba.conf_trunk) { 4238 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4239 return true; 4240 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4241 return true; 4242 } 4243 return false; 4244 } 4245 4246 /** 4247 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4248 * @vport: pointer to a host virtual N_Port data structure. 4249 * @retry: retry counter for the command iocb. 4250 * 4251 * This routine issues an ELS EDC to the F-Port Controller to communicate 4252 * this N_Port's support of hardware signals in its Congestion 4253 * Capabilities Descriptor. 4254 * 4255 * Note: This routine does not check if one or more signals are 4256 * set in the cgn_reg_signal parameter. The caller makes the 4257 * decision to enforce cgn_reg_signal as nonzero or zero depending 4258 * on the conditions. During Fabric requests, the driver 4259 * requires cgn_reg_signals to be nonzero. But a dynamic request 4260 * to set the congestion mode to OFF from Monitor or Manage 4261 * would correctly issue an EDC with no signals enabled to 4262 * turn off switch functionality and then update the FW. 4263 * 4264 * Return code 4265 * 0 - Successfully issued edc command 4266 * 1 - Failed to issue edc command 4267 **/ 4268 int 4269 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4270 { 4271 struct lpfc_hba *phba = vport->phba; 4272 struct lpfc_iocbq *elsiocb; 4273 struct fc_els_edc *edc_req; 4274 struct fc_tlv_desc *tlv; 4275 u16 cmdsize; 4276 struct lpfc_nodelist *ndlp; 4277 u8 *pcmd = NULL; 4278 u32 cgn_desc_size, lft_desc_size; 4279 int rc; 4280 4281 if (vport->port_type == LPFC_NPIV_PORT) 4282 return -EACCES; 4283 4284 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4285 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4286 return -ENODEV; 4287 4288 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4289 sizeof(struct fc_diag_cg_sig_desc) : 0; 4290 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4291 sizeof(struct fc_diag_lnkflt_desc) : 0; 4292 cmdsize = cgn_desc_size + lft_desc_size; 4293 4294 /* Skip EDC if no applicable descriptors */ 4295 if (!cmdsize) 4296 goto try_rdf; 4297 4298 cmdsize += sizeof(struct fc_els_edc); 4299 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4300 ndlp->nlp_DID, ELS_CMD_EDC); 4301 if (!elsiocb) 4302 goto try_rdf; 4303 4304 /* Configure the payload for the supported Diagnostics capabilities. */ 4305 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4306 memset(pcmd, 0, cmdsize); 4307 edc_req = (struct fc_els_edc *)pcmd; 4308 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4309 edc_req->edc_cmd = ELS_EDC; 4310 tlv = edc_req->desc; 4311 4312 if (cgn_desc_size) { 4313 lpfc_format_edc_cgn_desc(phba, tlv); 4314 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4315 tlv = fc_tlv_next_desc(tlv); 4316 } 4317 4318 if (lft_desc_size) 4319 lpfc_format_edc_lft_desc(phba, tlv); 4320 4321 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4322 "4623 Xmit EDC to remote " 4323 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4324 ndlp->nlp_DID, phba->cgn_reg_signal, 4325 phba->cgn_reg_fpin); 4326 4327 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4328 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4329 if (!elsiocb->ndlp) { 4330 lpfc_els_free_iocb(phba, elsiocb); 4331 return -EIO; 4332 } 4333 4334 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4335 "Issue EDC: did:x%x refcnt %d", 4336 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4337 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4338 if (rc == IOCB_ERROR) { 4339 /* The additional lpfc_nlp_put will cause the following 4340 * lpfc_els_free_iocb routine to trigger the rlease of 4341 * the node. 4342 */ 4343 lpfc_els_free_iocb(phba, elsiocb); 4344 lpfc_nlp_put(ndlp); 4345 goto try_rdf; 4346 } 4347 return 0; 4348 try_rdf: 4349 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4350 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4351 rc = lpfc_issue_els_rdf(vport, 0); 4352 return rc; 4353 } 4354 4355 /** 4356 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4357 * @vport: pointer to a host virtual N_Port data structure. 4358 * @nlp: pointer to a node-list data structure. 4359 * 4360 * This routine cancels the timer with a delayed IOCB-command retry for 4361 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4362 * removes the ELS retry event if it presents. In addition, if the 4363 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4364 * commands are sent for the @vport's nodes that require issuing discovery 4365 * ADISC. 4366 **/ 4367 void 4368 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4369 { 4370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4371 struct lpfc_work_evt *evtp; 4372 4373 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4374 return; 4375 spin_lock_irq(&nlp->lock); 4376 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4377 spin_unlock_irq(&nlp->lock); 4378 del_timer_sync(&nlp->nlp_delayfunc); 4379 nlp->nlp_last_elscmd = 0; 4380 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4381 list_del_init(&nlp->els_retry_evt.evt_listp); 4382 /* Decrement nlp reference count held for the delayed retry */ 4383 evtp = &nlp->els_retry_evt; 4384 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4385 } 4386 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4387 spin_lock_irq(&nlp->lock); 4388 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4389 spin_unlock_irq(&nlp->lock); 4390 if (vport->num_disc_nodes) { 4391 if (vport->port_state < LPFC_VPORT_READY) { 4392 /* Check if there are more ADISCs to be sent */ 4393 lpfc_more_adisc(vport); 4394 } else { 4395 /* Check if there are more PLOGIs to be sent */ 4396 lpfc_more_plogi(vport); 4397 if (vport->num_disc_nodes == 0) { 4398 spin_lock_irq(shost->host_lock); 4399 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4400 spin_unlock_irq(shost->host_lock); 4401 lpfc_can_disctmo(vport); 4402 lpfc_end_rscn(vport); 4403 } 4404 } 4405 } 4406 } 4407 return; 4408 } 4409 4410 /** 4411 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4412 * @t: pointer to the timer function associated data (ndlp). 4413 * 4414 * This routine is invoked by the ndlp delayed-function timer to check 4415 * whether there is any pending ELS retry event(s) with the node. If not, it 4416 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4417 * adds the delayed events to the HBA work list and invokes the 4418 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4419 * event. Note that lpfc_nlp_get() is called before posting the event to 4420 * the work list to hold reference count of ndlp so that it guarantees the 4421 * reference to ndlp will still be available when the worker thread gets 4422 * to the event associated with the ndlp. 4423 **/ 4424 void 4425 lpfc_els_retry_delay(struct timer_list *t) 4426 { 4427 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4428 struct lpfc_vport *vport = ndlp->vport; 4429 struct lpfc_hba *phba = vport->phba; 4430 unsigned long flags; 4431 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4432 4433 spin_lock_irqsave(&phba->hbalock, flags); 4434 if (!list_empty(&evtp->evt_listp)) { 4435 spin_unlock_irqrestore(&phba->hbalock, flags); 4436 return; 4437 } 4438 4439 /* We need to hold the node by incrementing the reference 4440 * count until the queued work is done 4441 */ 4442 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4443 if (evtp->evt_arg1) { 4444 evtp->evt = LPFC_EVT_ELS_RETRY; 4445 list_add_tail(&evtp->evt_listp, &phba->work_list); 4446 lpfc_worker_wake_up(phba); 4447 } 4448 spin_unlock_irqrestore(&phba->hbalock, flags); 4449 return; 4450 } 4451 4452 /** 4453 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4454 * @ndlp: pointer to a node-list data structure. 4455 * 4456 * This routine is the worker-thread handler for processing the @ndlp delayed 4457 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4458 * the last ELS command from the associated ndlp and invokes the proper ELS 4459 * function according to the delayed ELS command to retry the command. 4460 **/ 4461 void 4462 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4463 { 4464 struct lpfc_vport *vport = ndlp->vport; 4465 uint32_t cmd, retry; 4466 4467 spin_lock_irq(&ndlp->lock); 4468 cmd = ndlp->nlp_last_elscmd; 4469 ndlp->nlp_last_elscmd = 0; 4470 4471 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4472 spin_unlock_irq(&ndlp->lock); 4473 return; 4474 } 4475 4476 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4477 spin_unlock_irq(&ndlp->lock); 4478 /* 4479 * If a discovery event readded nlp_delayfunc after timer 4480 * firing and before processing the timer, cancel the 4481 * nlp_delayfunc. 4482 */ 4483 del_timer_sync(&ndlp->nlp_delayfunc); 4484 retry = ndlp->nlp_retry; 4485 ndlp->nlp_retry = 0; 4486 4487 switch (cmd) { 4488 case ELS_CMD_FLOGI: 4489 lpfc_issue_els_flogi(vport, ndlp, retry); 4490 break; 4491 case ELS_CMD_PLOGI: 4492 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4493 ndlp->nlp_prev_state = ndlp->nlp_state; 4494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4495 } 4496 break; 4497 case ELS_CMD_ADISC: 4498 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4499 ndlp->nlp_prev_state = ndlp->nlp_state; 4500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4501 } 4502 break; 4503 case ELS_CMD_PRLI: 4504 case ELS_CMD_NVMEPRLI: 4505 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4506 ndlp->nlp_prev_state = ndlp->nlp_state; 4507 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4508 } 4509 break; 4510 case ELS_CMD_LOGO: 4511 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4512 ndlp->nlp_prev_state = ndlp->nlp_state; 4513 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4514 } 4515 break; 4516 case ELS_CMD_FDISC: 4517 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4518 lpfc_issue_els_fdisc(vport, ndlp, retry); 4519 break; 4520 } 4521 return; 4522 } 4523 4524 /** 4525 * lpfc_link_reset - Issue link reset 4526 * @vport: pointer to a virtual N_Port data structure. 4527 * 4528 * This routine performs link reset by sending INIT_LINK mailbox command. 4529 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4530 * INIT_LINK mailbox command. 4531 * 4532 * Return code 4533 * 0 - Link reset initiated successfully 4534 * 1 - Failed to initiate link reset 4535 **/ 4536 int 4537 lpfc_link_reset(struct lpfc_vport *vport) 4538 { 4539 struct lpfc_hba *phba = vport->phba; 4540 LPFC_MBOXQ_t *mbox; 4541 uint32_t control; 4542 int rc; 4543 4544 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4545 "2851 Attempt link reset\n"); 4546 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4547 if (!mbox) { 4548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4549 "2852 Failed to allocate mbox memory"); 4550 return 1; 4551 } 4552 4553 /* Enable Link attention interrupts */ 4554 if (phba->sli_rev <= LPFC_SLI_REV3) { 4555 spin_lock_irq(&phba->hbalock); 4556 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4557 control = readl(phba->HCregaddr); 4558 control |= HC_LAINT_ENA; 4559 writel(control, phba->HCregaddr); 4560 readl(phba->HCregaddr); /* flush */ 4561 spin_unlock_irq(&phba->hbalock); 4562 } 4563 4564 lpfc_init_link(phba, mbox, phba->cfg_topology, 4565 phba->cfg_link_speed); 4566 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4567 mbox->vport = vport; 4568 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4569 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4571 "2853 Failed to issue INIT_LINK " 4572 "mbox command, rc:x%x\n", rc); 4573 mempool_free(mbox, phba->mbox_mem_pool); 4574 return 1; 4575 } 4576 4577 return 0; 4578 } 4579 4580 /** 4581 * lpfc_els_retry - Make retry decision on an els command iocb 4582 * @phba: pointer to lpfc hba data structure. 4583 * @cmdiocb: pointer to lpfc command iocb data structure. 4584 * @rspiocb: pointer to lpfc response iocb data structure. 4585 * 4586 * This routine makes a retry decision on an ELS command IOCB, which has 4587 * failed. The following ELS IOCBs use this function for retrying the command 4588 * when previously issued command responsed with error status: FLOGI, PLOGI, 4589 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4590 * returned error status, it makes the decision whether a retry shall be 4591 * issued for the command, and whether a retry shall be made immediately or 4592 * delayed. In the former case, the corresponding ELS command issuing-function 4593 * is called to retry the command. In the later case, the ELS command shall 4594 * be posted to the ndlp delayed event and delayed function timer set to the 4595 * ndlp for the delayed command issusing. 4596 * 4597 * Return code 4598 * 0 - No retry of els command is made 4599 * 1 - Immediate or delayed retry of els command is made 4600 **/ 4601 static int 4602 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4603 struct lpfc_iocbq *rspiocb) 4604 { 4605 struct lpfc_vport *vport = cmdiocb->vport; 4606 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4607 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4608 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4609 uint32_t *elscmd; 4610 struct ls_rjt stat; 4611 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4612 int logerr = 0; 4613 uint32_t cmd = 0; 4614 uint32_t did; 4615 int link_reset = 0, rc; 4616 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4617 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4618 4619 4620 /* Note: cmd_dmabuf may be 0 for internal driver abort 4621 * of delays ELS command. 4622 */ 4623 4624 if (pcmd && pcmd->virt) { 4625 elscmd = (uint32_t *) (pcmd->virt); 4626 cmd = *elscmd++; 4627 } 4628 4629 if (ndlp) 4630 did = ndlp->nlp_DID; 4631 else { 4632 /* We should only hit this case for retrying PLOGI */ 4633 did = get_job_els_rsp64_did(phba, rspiocb); 4634 ndlp = lpfc_findnode_did(vport, did); 4635 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4636 return 0; 4637 } 4638 4639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4640 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4641 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4642 4643 switch (ulp_status) { 4644 case IOSTAT_FCP_RSP_ERROR: 4645 break; 4646 case IOSTAT_REMOTE_STOP: 4647 if (phba->sli_rev == LPFC_SLI_REV4) { 4648 /* This IO was aborted by the target, we don't 4649 * know the rxid and because we did not send the 4650 * ABTS we cannot generate and RRQ. 4651 */ 4652 lpfc_set_rrq_active(phba, ndlp, 4653 cmdiocb->sli4_lxritag, 0, 0); 4654 } 4655 break; 4656 case IOSTAT_LOCAL_REJECT: 4657 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4658 case IOERR_LOOP_OPEN_FAILURE: 4659 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4660 delay = 1000; 4661 retry = 1; 4662 break; 4663 4664 case IOERR_ILLEGAL_COMMAND: 4665 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4666 "0124 Retry illegal cmd x%x " 4667 "retry:x%x delay:x%x\n", 4668 cmd, cmdiocb->retry, delay); 4669 retry = 1; 4670 /* All command's retry policy */ 4671 maxretry = 8; 4672 if (cmdiocb->retry > 2) 4673 delay = 1000; 4674 break; 4675 4676 case IOERR_NO_RESOURCES: 4677 logerr = 1; /* HBA out of resources */ 4678 retry = 1; 4679 if (cmdiocb->retry > 100) 4680 delay = 100; 4681 maxretry = 250; 4682 break; 4683 4684 case IOERR_ILLEGAL_FRAME: 4685 delay = 100; 4686 retry = 1; 4687 break; 4688 4689 case IOERR_INVALID_RPI: 4690 if (cmd == ELS_CMD_PLOGI && 4691 did == NameServer_DID) { 4692 /* Continue forever if plogi to */ 4693 /* the nameserver fails */ 4694 maxretry = 0; 4695 delay = 100; 4696 } else if (cmd == ELS_CMD_PRLI && 4697 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4698 /* State-command disagreement. The PRLI was 4699 * failed with an invalid rpi meaning there 4700 * some unexpected state change. Don't retry. 4701 */ 4702 maxretry = 0; 4703 retry = 0; 4704 break; 4705 } 4706 retry = 1; 4707 break; 4708 4709 case IOERR_SEQUENCE_TIMEOUT: 4710 if (cmd == ELS_CMD_PLOGI && 4711 did == NameServer_DID && 4712 (cmdiocb->retry + 1) == maxretry) { 4713 /* Reset the Link */ 4714 link_reset = 1; 4715 break; 4716 } 4717 retry = 1; 4718 delay = 100; 4719 break; 4720 case IOERR_SLI_ABORTED: 4721 /* Retry ELS PLOGI command? 4722 * Possibly the rport just wasn't ready. 4723 */ 4724 if (cmd == ELS_CMD_PLOGI) { 4725 /* No retry if state change */ 4726 if (ndlp && 4727 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4728 goto out_retry; 4729 retry = 1; 4730 maxretry = 2; 4731 } 4732 break; 4733 } 4734 break; 4735 4736 case IOSTAT_NPORT_RJT: 4737 case IOSTAT_FABRIC_RJT: 4738 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4739 retry = 1; 4740 break; 4741 } 4742 break; 4743 4744 case IOSTAT_NPORT_BSY: 4745 case IOSTAT_FABRIC_BSY: 4746 logerr = 1; /* Fabric / Remote NPort out of resources */ 4747 retry = 1; 4748 break; 4749 4750 case IOSTAT_LS_RJT: 4751 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4752 /* Added for Vendor specifc support 4753 * Just keep retrying for these Rsn / Exp codes 4754 */ 4755 if ((vport->fc_flag & FC_PT2PT) && 4756 cmd == ELS_CMD_NVMEPRLI) { 4757 switch (stat.un.b.lsRjtRsnCode) { 4758 case LSRJT_UNABLE_TPC: 4759 case LSRJT_INVALID_CMD: 4760 case LSRJT_LOGICAL_ERR: 4761 case LSRJT_CMD_UNSUPPORTED: 4762 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4763 "0168 NVME PRLI LS_RJT " 4764 "reason %x port doesn't " 4765 "support NVME, disabling NVME\n", 4766 stat.un.b.lsRjtRsnCode); 4767 retry = 0; 4768 vport->fc_flag |= FC_PT2PT_NO_NVME; 4769 goto out_retry; 4770 } 4771 } 4772 switch (stat.un.b.lsRjtRsnCode) { 4773 case LSRJT_UNABLE_TPC: 4774 /* Special case for PRLI LS_RJTs. Recall that lpfc 4775 * uses a single routine to issue both PRLI FC4 types. 4776 * If the PRLI is rejected because that FC4 type 4777 * isn't really supported, don't retry and cause 4778 * multiple transport registrations. Otherwise, parse 4779 * the reason code/reason code explanation and take the 4780 * appropriate action. 4781 */ 4782 lpfc_printf_vlog(vport, KERN_INFO, 4783 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4784 "0153 ELS cmd x%x LS_RJT by x%x. " 4785 "RsnCode x%x RsnCodeExp x%x\n", 4786 cmd, did, stat.un.b.lsRjtRsnCode, 4787 stat.un.b.lsRjtRsnCodeExp); 4788 4789 switch (stat.un.b.lsRjtRsnCodeExp) { 4790 case LSEXP_CANT_GIVE_DATA: 4791 case LSEXP_CMD_IN_PROGRESS: 4792 if (cmd == ELS_CMD_PLOGI) { 4793 delay = 1000; 4794 maxretry = 48; 4795 } 4796 retry = 1; 4797 break; 4798 case LSEXP_REQ_UNSUPPORTED: 4799 case LSEXP_NO_RSRC_ASSIGN: 4800 /* These explanation codes get no retry. */ 4801 if (cmd == ELS_CMD_PRLI || 4802 cmd == ELS_CMD_NVMEPRLI) 4803 break; 4804 fallthrough; 4805 default: 4806 /* Limit the delay and retry action to a limited 4807 * cmd set. There are other ELS commands where 4808 * a retry is not expected. 4809 */ 4810 if (cmd == ELS_CMD_PLOGI || 4811 cmd == ELS_CMD_PRLI || 4812 cmd == ELS_CMD_NVMEPRLI) { 4813 delay = 1000; 4814 maxretry = lpfc_max_els_tries + 1; 4815 retry = 1; 4816 } 4817 break; 4818 } 4819 4820 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4821 (cmd == ELS_CMD_FDISC) && 4822 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4823 lpfc_printf_vlog(vport, KERN_ERR, 4824 LOG_TRACE_EVENT, 4825 "0125 FDISC Failed (x%x). " 4826 "Fabric out of resources\n", 4827 stat.un.lsRjtError); 4828 lpfc_vport_set_state(vport, 4829 FC_VPORT_NO_FABRIC_RSCS); 4830 } 4831 break; 4832 4833 case LSRJT_LOGICAL_BSY: 4834 if ((cmd == ELS_CMD_PLOGI) || 4835 (cmd == ELS_CMD_PRLI) || 4836 (cmd == ELS_CMD_NVMEPRLI)) { 4837 delay = 1000; 4838 maxretry = 48; 4839 } else if (cmd == ELS_CMD_FDISC) { 4840 /* FDISC retry policy */ 4841 maxretry = 48; 4842 if (cmdiocb->retry >= 32) 4843 delay = 1000; 4844 } 4845 retry = 1; 4846 break; 4847 4848 case LSRJT_LOGICAL_ERR: 4849 /* There are some cases where switches return this 4850 * error when they are not ready and should be returning 4851 * Logical Busy. We should delay every time. 4852 */ 4853 if (cmd == ELS_CMD_FDISC && 4854 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4855 maxretry = 3; 4856 delay = 1000; 4857 retry = 1; 4858 } else if (cmd == ELS_CMD_FLOGI && 4859 stat.un.b.lsRjtRsnCodeExp == 4860 LSEXP_NOTHING_MORE) { 4861 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4862 retry = 1; 4863 lpfc_printf_vlog(vport, KERN_ERR, 4864 LOG_TRACE_EVENT, 4865 "0820 FLOGI Failed (x%x). " 4866 "BBCredit Not Supported\n", 4867 stat.un.lsRjtError); 4868 } 4869 break; 4870 4871 case LSRJT_PROTOCOL_ERR: 4872 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4873 (cmd == ELS_CMD_FDISC) && 4874 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4875 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4876 ) { 4877 lpfc_printf_vlog(vport, KERN_ERR, 4878 LOG_TRACE_EVENT, 4879 "0122 FDISC Failed (x%x). " 4880 "Fabric Detected Bad WWN\n", 4881 stat.un.lsRjtError); 4882 lpfc_vport_set_state(vport, 4883 FC_VPORT_FABRIC_REJ_WWN); 4884 } 4885 break; 4886 case LSRJT_VENDOR_UNIQUE: 4887 if ((stat.un.b.vendorUnique == 0x45) && 4888 (cmd == ELS_CMD_FLOGI)) { 4889 goto out_retry; 4890 } 4891 break; 4892 case LSRJT_CMD_UNSUPPORTED: 4893 /* lpfc nvmet returns this type of LS_RJT when it 4894 * receives an FCP PRLI because lpfc nvmet only 4895 * support NVME. ELS request is terminated for FCP4 4896 * on this rport. 4897 */ 4898 if (stat.un.b.lsRjtRsnCodeExp == 4899 LSEXP_REQ_UNSUPPORTED) { 4900 if (cmd == ELS_CMD_PRLI) 4901 goto out_retry; 4902 } 4903 break; 4904 } 4905 break; 4906 4907 case IOSTAT_INTERMED_RSP: 4908 case IOSTAT_BA_RJT: 4909 break; 4910 4911 default: 4912 break; 4913 } 4914 4915 if (link_reset) { 4916 rc = lpfc_link_reset(vport); 4917 if (rc) { 4918 /* Do not give up. Retry PLOGI one more time and attempt 4919 * link reset if PLOGI fails again. 4920 */ 4921 retry = 1; 4922 delay = 100; 4923 goto out_retry; 4924 } 4925 return 1; 4926 } 4927 4928 if (did == FDMI_DID) 4929 retry = 1; 4930 4931 if ((cmd == ELS_CMD_FLOGI) && 4932 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4933 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4934 /* FLOGI retry policy */ 4935 retry = 1; 4936 /* retry FLOGI forever */ 4937 if (phba->link_flag != LS_LOOPBACK_MODE) 4938 maxretry = 0; 4939 else 4940 maxretry = 2; 4941 4942 if (cmdiocb->retry >= 100) 4943 delay = 5000; 4944 else if (cmdiocb->retry >= 32) 4945 delay = 1000; 4946 } else if ((cmd == ELS_CMD_FDISC) && 4947 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4948 /* retry FDISCs every second up to devloss */ 4949 retry = 1; 4950 maxretry = vport->cfg_devloss_tmo; 4951 delay = 1000; 4952 } 4953 4954 cmdiocb->retry++; 4955 if (maxretry && (cmdiocb->retry >= maxretry)) { 4956 phba->fc_stat.elsRetryExceeded++; 4957 retry = 0; 4958 } 4959 4960 if ((vport->load_flag & FC_UNLOADING) != 0) 4961 retry = 0; 4962 4963 out_retry: 4964 if (retry) { 4965 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4966 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4967 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4968 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4969 "2849 Stop retry ELS command " 4970 "x%x to remote NPORT x%x, " 4971 "Data: x%x x%x\n", cmd, did, 4972 cmdiocb->retry, delay); 4973 return 0; 4974 } 4975 } 4976 4977 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4978 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4979 "0107 Retry ELS command x%x to remote " 4980 "NPORT x%x Data: x%x x%x\n", 4981 cmd, did, cmdiocb->retry, delay); 4982 4983 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4984 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4985 ((ulp_word4 & IOERR_PARAM_MASK) != 4986 IOERR_NO_RESOURCES))) { 4987 /* Don't reset timer for no resources */ 4988 4989 /* If discovery / RSCN timer is running, reset it */ 4990 if (timer_pending(&vport->fc_disctmo) || 4991 (vport->fc_flag & FC_RSCN_MODE)) 4992 lpfc_set_disctmo(vport); 4993 } 4994 4995 phba->fc_stat.elsXmitRetry++; 4996 if (ndlp && delay) { 4997 phba->fc_stat.elsDelayRetry++; 4998 ndlp->nlp_retry = cmdiocb->retry; 4999 5000 /* delay is specified in milliseconds */ 5001 mod_timer(&ndlp->nlp_delayfunc, 5002 jiffies + msecs_to_jiffies(delay)); 5003 spin_lock_irq(&ndlp->lock); 5004 ndlp->nlp_flag |= NLP_DELAY_TMO; 5005 spin_unlock_irq(&ndlp->lock); 5006 5007 ndlp->nlp_prev_state = ndlp->nlp_state; 5008 if ((cmd == ELS_CMD_PRLI) || 5009 (cmd == ELS_CMD_NVMEPRLI)) 5010 lpfc_nlp_set_state(vport, ndlp, 5011 NLP_STE_PRLI_ISSUE); 5012 else if (cmd != ELS_CMD_ADISC) 5013 lpfc_nlp_set_state(vport, ndlp, 5014 NLP_STE_NPR_NODE); 5015 ndlp->nlp_last_elscmd = cmd; 5016 5017 return 1; 5018 } 5019 switch (cmd) { 5020 case ELS_CMD_FLOGI: 5021 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 5022 return 1; 5023 case ELS_CMD_FDISC: 5024 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 5025 return 1; 5026 case ELS_CMD_PLOGI: 5027 if (ndlp) { 5028 ndlp->nlp_prev_state = ndlp->nlp_state; 5029 lpfc_nlp_set_state(vport, ndlp, 5030 NLP_STE_PLOGI_ISSUE); 5031 } 5032 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5033 return 1; 5034 case ELS_CMD_ADISC: 5035 ndlp->nlp_prev_state = ndlp->nlp_state; 5036 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5037 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5038 return 1; 5039 case ELS_CMD_PRLI: 5040 case ELS_CMD_NVMEPRLI: 5041 ndlp->nlp_prev_state = ndlp->nlp_state; 5042 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5043 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5044 return 1; 5045 case ELS_CMD_LOGO: 5046 ndlp->nlp_prev_state = ndlp->nlp_state; 5047 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5048 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5049 return 1; 5050 } 5051 } 5052 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5053 if (logerr) { 5054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5055 "0137 No retry ELS command x%x to remote " 5056 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 5057 cmd, did, ulp_status, 5058 ulp_word4); 5059 } 5060 else { 5061 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5062 "0108 No retry ELS command x%x to remote " 5063 "NPORT x%x Retried:%d Error:x%x/%x\n", 5064 cmd, did, cmdiocb->retry, ulp_status, 5065 ulp_word4); 5066 } 5067 return 0; 5068 } 5069 5070 /** 5071 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5072 * @phba: pointer to lpfc hba data structure. 5073 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5074 * 5075 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5076 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5077 * checks to see whether there is a lpfc DMA buffer associated with the 5078 * response of the command IOCB. If so, it will be released before releasing 5079 * the lpfc DMA buffer associated with the IOCB itself. 5080 * 5081 * Return code 5082 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5083 **/ 5084 static int 5085 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5086 { 5087 struct lpfc_dmabuf *buf_ptr; 5088 5089 /* Free the response before processing the command. */ 5090 if (!list_empty(&buf_ptr1->list)) { 5091 list_remove_head(&buf_ptr1->list, buf_ptr, 5092 struct lpfc_dmabuf, 5093 list); 5094 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5095 kfree(buf_ptr); 5096 } 5097 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5098 kfree(buf_ptr1); 5099 return 0; 5100 } 5101 5102 /** 5103 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5104 * @phba: pointer to lpfc hba data structure. 5105 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5106 * 5107 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5108 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5109 * pool. 5110 * 5111 * Return code 5112 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5113 **/ 5114 static int 5115 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5116 { 5117 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5118 kfree(buf_ptr); 5119 return 0; 5120 } 5121 5122 /** 5123 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5124 * @phba: pointer to lpfc hba data structure. 5125 * @elsiocb: pointer to lpfc els command iocb data structure. 5126 * 5127 * This routine frees a command IOCB and its associated resources. The 5128 * command IOCB data structure contains the reference to various associated 5129 * resources, these fields must be set to NULL if the associated reference 5130 * not present: 5131 * cmd_dmabuf - reference to cmd. 5132 * cmd_dmabuf->next - reference to rsp 5133 * rsp_dmabuf - unused 5134 * bpl_dmabuf - reference to bpl 5135 * 5136 * It first properly decrements the reference count held on ndlp for the 5137 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5138 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5139 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5140 * adds the DMA buffer the @phba data structure for the delayed release. 5141 * If reference to the Buffer Pointer List (BPL) is present, the 5142 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5143 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5144 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5145 * 5146 * Return code 5147 * 0 - Success (currently, always return 0) 5148 **/ 5149 int 5150 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5151 { 5152 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5153 5154 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5155 elsiocb->ndlp = NULL; 5156 5157 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5158 if (elsiocb->cmd_dmabuf) { 5159 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5160 /* Firmware could still be in progress of DMAing 5161 * payload, so don't free data buffer till after 5162 * a hbeat. 5163 */ 5164 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5165 buf_ptr = elsiocb->cmd_dmabuf; 5166 elsiocb->cmd_dmabuf = NULL; 5167 if (buf_ptr) { 5168 buf_ptr1 = NULL; 5169 spin_lock_irq(&phba->hbalock); 5170 if (!list_empty(&buf_ptr->list)) { 5171 list_remove_head(&buf_ptr->list, 5172 buf_ptr1, struct lpfc_dmabuf, 5173 list); 5174 INIT_LIST_HEAD(&buf_ptr1->list); 5175 list_add_tail(&buf_ptr1->list, 5176 &phba->elsbuf); 5177 phba->elsbuf_cnt++; 5178 } 5179 INIT_LIST_HEAD(&buf_ptr->list); 5180 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5181 phba->elsbuf_cnt++; 5182 spin_unlock_irq(&phba->hbalock); 5183 } 5184 } else { 5185 buf_ptr1 = elsiocb->cmd_dmabuf; 5186 lpfc_els_free_data(phba, buf_ptr1); 5187 elsiocb->cmd_dmabuf = NULL; 5188 } 5189 } 5190 5191 if (elsiocb->bpl_dmabuf) { 5192 buf_ptr = elsiocb->bpl_dmabuf; 5193 lpfc_els_free_bpl(phba, buf_ptr); 5194 elsiocb->bpl_dmabuf = NULL; 5195 } 5196 lpfc_sli_release_iocbq(phba, elsiocb); 5197 return 0; 5198 } 5199 5200 /** 5201 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5202 * @phba: pointer to lpfc hba data structure. 5203 * @cmdiocb: pointer to lpfc command iocb data structure. 5204 * @rspiocb: pointer to lpfc response iocb data structure. 5205 * 5206 * This routine is the completion callback function to the Logout (LOGO) 5207 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5208 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5209 * release the ndlp if it has the last reference remaining (reference count 5210 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp 5211 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5212 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5213 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5214 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5215 * IOCB data structure. 5216 **/ 5217 static void 5218 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5219 struct lpfc_iocbq *rspiocb) 5220 { 5221 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5222 struct lpfc_vport *vport = cmdiocb->vport; 5223 u32 ulp_status, ulp_word4; 5224 5225 ulp_status = get_job_ulpstatus(phba, rspiocb); 5226 ulp_word4 = get_job_word4(phba, rspiocb); 5227 5228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5229 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5230 ulp_status, ulp_word4, ndlp->nlp_DID); 5231 /* ACC to LOGO completes to NPort <nlp_DID> */ 5232 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5233 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5234 "Data: x%x x%x x%x\n", 5235 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5236 ndlp->nlp_state, ndlp->nlp_rpi); 5237 5238 /* This clause allows the LOGO ACC to complete and free resources 5239 * for the Fabric Domain Controller. It does deliberately skip 5240 * the unreg_rpi and release rpi because some fabrics send RDP 5241 * requests after logging out from the initiator. 5242 */ 5243 if (ndlp->nlp_type & NLP_FABRIC && 5244 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5245 goto out; 5246 5247 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5248 /* If PLOGI is being retried, PLOGI completion will cleanup the 5249 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5250 * progress on nodes discovered from last RSCN. 5251 */ 5252 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5253 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5254 goto out; 5255 5256 /* NPort Recovery mode or node is just allocated */ 5257 if (!lpfc_nlp_not_used(ndlp)) { 5258 /* A LOGO is completing and the node is in NPR state. 5259 * Just unregister the RPI because the node is still 5260 * required. 5261 */ 5262 lpfc_unreg_rpi(vport, ndlp); 5263 } else { 5264 /* Indicate the node has already released, should 5265 * not reference to it from within lpfc_els_free_iocb. 5266 */ 5267 cmdiocb->ndlp = NULL; 5268 } 5269 } 5270 out: 5271 /* 5272 * The driver received a LOGO from the rport and has ACK'd it. 5273 * At this point, the driver is done so release the IOCB 5274 */ 5275 lpfc_els_free_iocb(phba, cmdiocb); 5276 lpfc_nlp_put(ndlp); 5277 } 5278 5279 /** 5280 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5281 * @phba: pointer to lpfc hba data structure. 5282 * @pmb: pointer to the driver internal queue element for mailbox command. 5283 * 5284 * This routine is the completion callback function for unregister default 5285 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5286 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5287 * decrements the ndlp reference count held for this completion callback 5288 * function. After that, it invokes the lpfc_nlp_not_used() to check 5289 * whether there is only one reference left on the ndlp. If so, it will 5290 * perform one more decrement and trigger the release of the ndlp. 5291 **/ 5292 void 5293 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5294 { 5295 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5296 u32 mbx_flag = pmb->mbox_flag; 5297 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5298 5299 if (ndlp) { 5300 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5301 "0006 rpi x%x DID:%x flg:%x %d x%px " 5302 "mbx_cmd x%x mbx_flag x%x x%px\n", 5303 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5304 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5305 mbx_flag, pmb); 5306 5307 /* This ends the default/temporary RPI cleanup logic for this 5308 * ndlp and the node and rpi needs to be released. Free the rpi 5309 * first on an UNREG_LOGIN and then release the final 5310 * references. 5311 */ 5312 spin_lock_irq(&ndlp->lock); 5313 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5314 if (mbx_cmd == MBX_UNREG_LOGIN) 5315 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5316 spin_unlock_irq(&ndlp->lock); 5317 lpfc_nlp_put(ndlp); 5318 lpfc_drop_node(ndlp->vport, ndlp); 5319 } 5320 5321 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5322 } 5323 5324 /** 5325 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5326 * @phba: pointer to lpfc hba data structure. 5327 * @cmdiocb: pointer to lpfc command iocb data structure. 5328 * @rspiocb: pointer to lpfc response iocb data structure. 5329 * 5330 * This routine is the completion callback function for ELS Response IOCB 5331 * command. In normal case, this callback function just properly sets the 5332 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5333 * field in the command IOCB is not NULL, the referred mailbox command will 5334 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5335 * the IOCB. 5336 **/ 5337 static void 5338 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5339 struct lpfc_iocbq *rspiocb) 5340 { 5341 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5342 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5343 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5344 IOCB_t *irsp; 5345 LPFC_MBOXQ_t *mbox = NULL; 5346 u32 ulp_status, ulp_word4, tmo, did, iotag; 5347 5348 if (!vport) { 5349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5350 "3177 ELS response failed\n"); 5351 goto out; 5352 } 5353 if (cmdiocb->context_un.mbox) 5354 mbox = cmdiocb->context_un.mbox; 5355 5356 ulp_status = get_job_ulpstatus(phba, rspiocb); 5357 ulp_word4 = get_job_word4(phba, rspiocb); 5358 did = get_job_els_rsp64_did(phba, cmdiocb); 5359 5360 if (phba->sli_rev == LPFC_SLI_REV4) { 5361 tmo = get_wqe_tmo(cmdiocb); 5362 iotag = get_wqe_reqtag(cmdiocb); 5363 } else { 5364 irsp = &rspiocb->iocb; 5365 tmo = irsp->ulpTimeout; 5366 iotag = irsp->ulpIoTag; 5367 } 5368 5369 /* Check to see if link went down during discovery */ 5370 if (!ndlp || lpfc_els_chk_latt(vport)) { 5371 if (mbox) 5372 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5373 goto out; 5374 } 5375 5376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5377 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5378 ulp_status, ulp_word4, did); 5379 /* ELS response tag <ulpIoTag> completes */ 5380 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5381 "0110 ELS response tag x%x completes " 5382 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5383 iotag, ulp_status, ulp_word4, tmo, 5384 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5385 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5386 if (mbox) { 5387 if (ulp_status == 0 5388 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5389 if (!lpfc_unreg_rpi(vport, ndlp) && 5390 (!(vport->fc_flag & FC_PT2PT))) { 5391 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5392 ndlp->nlp_state == 5393 NLP_STE_REG_LOGIN_ISSUE) { 5394 lpfc_printf_vlog(vport, KERN_INFO, 5395 LOG_DISCOVERY, 5396 "0314 PLOGI recov " 5397 "DID x%x " 5398 "Data: x%x x%x x%x\n", 5399 ndlp->nlp_DID, 5400 ndlp->nlp_state, 5401 ndlp->nlp_rpi, 5402 ndlp->nlp_flag); 5403 goto out_free_mbox; 5404 } 5405 } 5406 5407 /* Increment reference count to ndlp to hold the 5408 * reference to ndlp for the callback function. 5409 */ 5410 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5411 if (!mbox->ctx_ndlp) 5412 goto out_free_mbox; 5413 5414 mbox->vport = vport; 5415 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5416 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5417 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5418 } 5419 else { 5420 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5421 ndlp->nlp_prev_state = ndlp->nlp_state; 5422 lpfc_nlp_set_state(vport, ndlp, 5423 NLP_STE_REG_LOGIN_ISSUE); 5424 } 5425 5426 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5427 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5428 != MBX_NOT_FINISHED) 5429 goto out; 5430 5431 /* Decrement the ndlp reference count we 5432 * set for this failed mailbox command. 5433 */ 5434 lpfc_nlp_put(ndlp); 5435 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5436 5437 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5438 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5439 "0138 ELS rsp: Cannot issue reg_login for x%x " 5440 "Data: x%x x%x x%x\n", 5441 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5442 ndlp->nlp_rpi); 5443 } 5444 out_free_mbox: 5445 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5446 } 5447 out: 5448 if (ndlp && shost) { 5449 spin_lock_irq(&ndlp->lock); 5450 if (mbox) 5451 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5452 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5453 spin_unlock_irq(&ndlp->lock); 5454 } 5455 5456 /* An SLI4 NPIV instance wants to drop the node at this point under 5457 * these conditions and release the RPI. 5458 */ 5459 if (phba->sli_rev == LPFC_SLI_REV4 && 5460 vport && vport->port_type == LPFC_NPIV_PORT && 5461 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5462 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5463 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5464 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5465 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5466 spin_lock_irq(&ndlp->lock); 5467 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5468 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5469 spin_unlock_irq(&ndlp->lock); 5470 } 5471 } 5472 5473 lpfc_drop_node(vport, ndlp); 5474 } 5475 5476 /* Release the originating I/O reference. */ 5477 lpfc_els_free_iocb(phba, cmdiocb); 5478 lpfc_nlp_put(ndlp); 5479 return; 5480 } 5481 5482 /** 5483 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5484 * @vport: pointer to a host virtual N_Port data structure. 5485 * @flag: the els command code to be accepted. 5486 * @oldiocb: pointer to the original lpfc command iocb data structure. 5487 * @ndlp: pointer to a node-list data structure. 5488 * @mbox: pointer to the driver internal queue element for mailbox command. 5489 * 5490 * This routine prepares and issues an Accept (ACC) response IOCB 5491 * command. It uses the @flag to properly set up the IOCB field for the 5492 * specific ACC response command to be issued and invokes the 5493 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5494 * @mbox pointer is passed in, it will be put into the context_un.mbox 5495 * field of the IOCB for the completion callback function to issue the 5496 * mailbox command to the HBA later when callback is invoked. 5497 * 5498 * Note that the ndlp reference count will be incremented by 1 for holding the 5499 * ndlp and the reference to ndlp will be stored into the ndlp field of 5500 * the IOCB for the completion callback function to the corresponding 5501 * response ELS IOCB command. 5502 * 5503 * Return code 5504 * 0 - Successfully issued acc response 5505 * 1 - Failed to issue acc response 5506 **/ 5507 int 5508 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5509 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5510 LPFC_MBOXQ_t *mbox) 5511 { 5512 struct lpfc_hba *phba = vport->phba; 5513 IOCB_t *icmd; 5514 IOCB_t *oldcmd; 5515 union lpfc_wqe128 *wqe; 5516 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5517 struct lpfc_iocbq *elsiocb; 5518 uint8_t *pcmd; 5519 struct serv_parm *sp; 5520 uint16_t cmdsize; 5521 int rc; 5522 ELS_PKT *els_pkt_ptr; 5523 struct fc_els_rdf_resp *rdf_resp; 5524 5525 switch (flag) { 5526 case ELS_CMD_ACC: 5527 cmdsize = sizeof(uint32_t); 5528 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5529 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5530 if (!elsiocb) { 5531 spin_lock_irq(&ndlp->lock); 5532 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5533 spin_unlock_irq(&ndlp->lock); 5534 return 1; 5535 } 5536 5537 if (phba->sli_rev == LPFC_SLI_REV4) { 5538 wqe = &elsiocb->wqe; 5539 /* XRI / rx_id */ 5540 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5541 bf_get(wqe_ctxt_tag, 5542 &oldwqe->xmit_els_rsp.wqe_com)); 5543 5544 /* oxid */ 5545 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5546 bf_get(wqe_rcvoxid, 5547 &oldwqe->xmit_els_rsp.wqe_com)); 5548 } else { 5549 icmd = &elsiocb->iocb; 5550 oldcmd = &oldiocb->iocb; 5551 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5552 icmd->unsli3.rcvsli3.ox_id = 5553 oldcmd->unsli3.rcvsli3.ox_id; 5554 } 5555 5556 pcmd = elsiocb->cmd_dmabuf->virt; 5557 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5558 pcmd += sizeof(uint32_t); 5559 5560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5561 "Issue ACC: did:x%x flg:x%x", 5562 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5563 break; 5564 case ELS_CMD_FLOGI: 5565 case ELS_CMD_PLOGI: 5566 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5567 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5568 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5569 if (!elsiocb) 5570 return 1; 5571 5572 if (phba->sli_rev == LPFC_SLI_REV4) { 5573 wqe = &elsiocb->wqe; 5574 /* XRI / rx_id */ 5575 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5576 bf_get(wqe_ctxt_tag, 5577 &oldwqe->xmit_els_rsp.wqe_com)); 5578 5579 /* oxid */ 5580 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5581 bf_get(wqe_rcvoxid, 5582 &oldwqe->xmit_els_rsp.wqe_com)); 5583 } else { 5584 icmd = &elsiocb->iocb; 5585 oldcmd = &oldiocb->iocb; 5586 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5587 icmd->unsli3.rcvsli3.ox_id = 5588 oldcmd->unsli3.rcvsli3.ox_id; 5589 } 5590 5591 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5592 5593 if (mbox) 5594 elsiocb->context_un.mbox = mbox; 5595 5596 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5597 pcmd += sizeof(uint32_t); 5598 sp = (struct serv_parm *)pcmd; 5599 5600 if (flag == ELS_CMD_FLOGI) { 5601 /* Copy the received service parameters back */ 5602 memcpy(sp, &phba->fc_fabparam, 5603 sizeof(struct serv_parm)); 5604 5605 /* Clear the F_Port bit */ 5606 sp->cmn.fPort = 0; 5607 5608 /* Mark all class service parameters as invalid */ 5609 sp->cls1.classValid = 0; 5610 sp->cls2.classValid = 0; 5611 sp->cls3.classValid = 0; 5612 sp->cls4.classValid = 0; 5613 5614 /* Copy our worldwide names */ 5615 memcpy(&sp->portName, &vport->fc_sparam.portName, 5616 sizeof(struct lpfc_name)); 5617 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5618 sizeof(struct lpfc_name)); 5619 } else { 5620 memcpy(pcmd, &vport->fc_sparam, 5621 sizeof(struct serv_parm)); 5622 5623 sp->cmn.valid_vendor_ver_level = 0; 5624 memset(sp->un.vendorVersion, 0, 5625 sizeof(sp->un.vendorVersion)); 5626 sp->cmn.bbRcvSizeMsb &= 0xF; 5627 5628 /* If our firmware supports this feature, convey that 5629 * info to the target using the vendor specific field. 5630 */ 5631 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5632 sp->cmn.valid_vendor_ver_level = 1; 5633 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5634 sp->un.vv.flags = 5635 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5636 } 5637 } 5638 5639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5640 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5641 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5642 break; 5643 case ELS_CMD_PRLO: 5644 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5645 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5646 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5647 if (!elsiocb) 5648 return 1; 5649 5650 if (phba->sli_rev == LPFC_SLI_REV4) { 5651 wqe = &elsiocb->wqe; 5652 /* XRI / rx_id */ 5653 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5654 bf_get(wqe_ctxt_tag, 5655 &oldwqe->xmit_els_rsp.wqe_com)); 5656 5657 /* oxid */ 5658 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5659 bf_get(wqe_rcvoxid, 5660 &oldwqe->xmit_els_rsp.wqe_com)); 5661 } else { 5662 icmd = &elsiocb->iocb; 5663 oldcmd = &oldiocb->iocb; 5664 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5665 icmd->unsli3.rcvsli3.ox_id = 5666 oldcmd->unsli3.rcvsli3.ox_id; 5667 } 5668 5669 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5670 5671 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5672 sizeof(uint32_t) + sizeof(PRLO)); 5673 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5674 els_pkt_ptr = (ELS_PKT *) pcmd; 5675 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5676 5677 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5678 "Issue ACC PRLO: did:x%x flg:x%x", 5679 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5680 break; 5681 case ELS_CMD_RDF: 5682 cmdsize = sizeof(*rdf_resp); 5683 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5684 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5685 if (!elsiocb) 5686 return 1; 5687 5688 if (phba->sli_rev == LPFC_SLI_REV4) { 5689 wqe = &elsiocb->wqe; 5690 /* XRI / rx_id */ 5691 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5692 bf_get(wqe_ctxt_tag, 5693 &oldwqe->xmit_els_rsp.wqe_com)); 5694 5695 /* oxid */ 5696 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5697 bf_get(wqe_rcvoxid, 5698 &oldwqe->xmit_els_rsp.wqe_com)); 5699 } else { 5700 icmd = &elsiocb->iocb; 5701 oldcmd = &oldiocb->iocb; 5702 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5703 icmd->unsli3.rcvsli3.ox_id = 5704 oldcmd->unsli3.rcvsli3.ox_id; 5705 } 5706 5707 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5708 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5709 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5710 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5711 5712 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5713 rdf_resp->desc_list_len = cpu_to_be32(12); 5714 5715 /* FC-LS-5 specifies LS REQ Information descriptor */ 5716 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5717 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5718 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5719 break; 5720 default: 5721 return 1; 5722 } 5723 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5724 spin_lock_irq(&ndlp->lock); 5725 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5726 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5727 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5728 spin_unlock_irq(&ndlp->lock); 5729 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5730 } else { 5731 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5732 } 5733 5734 phba->fc_stat.elsXmitACC++; 5735 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5736 if (!elsiocb->ndlp) { 5737 lpfc_els_free_iocb(phba, elsiocb); 5738 return 1; 5739 } 5740 5741 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5742 if (rc == IOCB_ERROR) { 5743 lpfc_els_free_iocb(phba, elsiocb); 5744 lpfc_nlp_put(ndlp); 5745 return 1; 5746 } 5747 5748 /* Xmit ELS ACC response tag <ulpIoTag> */ 5749 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5750 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5751 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5752 "RPI: x%x, fc_flag x%x refcnt %d\n", 5753 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5754 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5755 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5756 return 0; 5757 } 5758 5759 /** 5760 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5761 * @vport: pointer to a virtual N_Port data structure. 5762 * @rejectError: reject response to issue 5763 * @oldiocb: pointer to the original lpfc command iocb data structure. 5764 * @ndlp: pointer to a node-list data structure. 5765 * @mbox: pointer to the driver internal queue element for mailbox command. 5766 * 5767 * This routine prepares and issue an Reject (RJT) response IOCB 5768 * command. If a @mbox pointer is passed in, it will be put into the 5769 * context_un.mbox field of the IOCB for the completion callback function 5770 * to issue to the HBA later. 5771 * 5772 * Note that the ndlp reference count will be incremented by 1 for holding the 5773 * ndlp and the reference to ndlp will be stored into the ndlp field of 5774 * the IOCB for the completion callback function to the reject response 5775 * ELS IOCB command. 5776 * 5777 * Return code 5778 * 0 - Successfully issued reject response 5779 * 1 - Failed to issue reject response 5780 **/ 5781 int 5782 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5783 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5784 LPFC_MBOXQ_t *mbox) 5785 { 5786 int rc; 5787 struct lpfc_hba *phba = vport->phba; 5788 IOCB_t *icmd; 5789 IOCB_t *oldcmd; 5790 union lpfc_wqe128 *wqe; 5791 struct lpfc_iocbq *elsiocb; 5792 uint8_t *pcmd; 5793 uint16_t cmdsize; 5794 5795 cmdsize = 2 * sizeof(uint32_t); 5796 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5797 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5798 if (!elsiocb) 5799 return 1; 5800 5801 if (phba->sli_rev == LPFC_SLI_REV4) { 5802 wqe = &elsiocb->wqe; 5803 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5804 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5805 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5806 get_job_rcvoxid(phba, oldiocb)); 5807 } else { 5808 icmd = &elsiocb->iocb; 5809 oldcmd = &oldiocb->iocb; 5810 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5811 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5812 } 5813 5814 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5815 5816 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5817 pcmd += sizeof(uint32_t); 5818 *((uint32_t *) (pcmd)) = rejectError; 5819 5820 if (mbox) 5821 elsiocb->context_un.mbox = mbox; 5822 5823 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5825 "0129 Xmit ELS RJT x%x response tag x%x " 5826 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5827 "rpi x%x\n", 5828 rejectError, elsiocb->iotag, 5829 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5830 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5831 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5832 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5833 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5834 5835 phba->fc_stat.elsXmitLSRJT++; 5836 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5837 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5838 if (!elsiocb->ndlp) { 5839 lpfc_els_free_iocb(phba, elsiocb); 5840 return 1; 5841 } 5842 5843 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5844 * node's assigned RPI gets released provided this node is not already 5845 * registered with the transport. 5846 */ 5847 if (phba->sli_rev == LPFC_SLI_REV4 && 5848 vport->port_type == LPFC_NPIV_PORT && 5849 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5850 spin_lock_irq(&ndlp->lock); 5851 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5852 spin_unlock_irq(&ndlp->lock); 5853 } 5854 5855 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5856 if (rc == IOCB_ERROR) { 5857 lpfc_els_free_iocb(phba, elsiocb); 5858 lpfc_nlp_put(ndlp); 5859 return 1; 5860 } 5861 5862 return 0; 5863 } 5864 5865 /** 5866 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5867 * @vport: pointer to a host virtual N_Port data structure. 5868 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5869 * @ndlp: NPort to where rsp is directed 5870 * 5871 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5872 * this N_Port's support of hardware signals in its Congestion 5873 * Capabilities Descriptor. 5874 * 5875 * Return code 5876 * 0 - Successfully issued edc rsp command 5877 * 1 - Failed to issue edc rsp command 5878 **/ 5879 static int 5880 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5881 struct lpfc_nodelist *ndlp) 5882 { 5883 struct lpfc_hba *phba = vport->phba; 5884 struct fc_els_edc_resp *edc_rsp; 5885 struct fc_tlv_desc *tlv; 5886 struct lpfc_iocbq *elsiocb; 5887 IOCB_t *icmd, *cmd; 5888 union lpfc_wqe128 *wqe; 5889 u32 cgn_desc_size, lft_desc_size; 5890 u16 cmdsize; 5891 uint8_t *pcmd; 5892 int rc; 5893 5894 cmdsize = sizeof(struct fc_els_edc_resp); 5895 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5896 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5897 sizeof(struct fc_diag_lnkflt_desc) : 0; 5898 cmdsize += cgn_desc_size + lft_desc_size; 5899 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5900 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5901 if (!elsiocb) 5902 return 1; 5903 5904 if (phba->sli_rev == LPFC_SLI_REV4) { 5905 wqe = &elsiocb->wqe; 5906 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5907 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5908 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5909 get_job_rcvoxid(phba, cmdiocb)); 5910 } else { 5911 icmd = &elsiocb->iocb; 5912 cmd = &cmdiocb->iocb; 5913 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5914 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5915 } 5916 5917 pcmd = elsiocb->cmd_dmabuf->virt; 5918 memset(pcmd, 0, cmdsize); 5919 5920 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5921 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5922 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5923 cgn_desc_size + lft_desc_size); 5924 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5925 edc_rsp->lsri.desc_len = cpu_to_be32( 5926 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5927 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5928 tlv = edc_rsp->desc; 5929 lpfc_format_edc_cgn_desc(phba, tlv); 5930 tlv = fc_tlv_next_desc(tlv); 5931 if (lft_desc_size) 5932 lpfc_format_edc_lft_desc(phba, tlv); 5933 5934 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5935 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5936 ndlp->nlp_DID, ndlp->nlp_flag, 5937 kref_read(&ndlp->kref)); 5938 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5939 5940 phba->fc_stat.elsXmitACC++; 5941 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5942 if (!elsiocb->ndlp) { 5943 lpfc_els_free_iocb(phba, elsiocb); 5944 return 1; 5945 } 5946 5947 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5948 if (rc == IOCB_ERROR) { 5949 lpfc_els_free_iocb(phba, elsiocb); 5950 lpfc_nlp_put(ndlp); 5951 return 1; 5952 } 5953 5954 /* Xmit ELS ACC response tag <ulpIoTag> */ 5955 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5956 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5957 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5958 "RPI: x%x, fc_flag x%x\n", 5959 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5960 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5961 ndlp->nlp_rpi, vport->fc_flag); 5962 5963 return 0; 5964 } 5965 5966 /** 5967 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5968 * @vport: pointer to a virtual N_Port data structure. 5969 * @oldiocb: pointer to the original lpfc command iocb data structure. 5970 * @ndlp: pointer to a node-list data structure. 5971 * 5972 * This routine prepares and issues an Accept (ACC) response to Address 5973 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5974 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5975 * 5976 * Note that the ndlp reference count will be incremented by 1 for holding the 5977 * ndlp and the reference to ndlp will be stored into the ndlp field of 5978 * the IOCB for the completion callback function to the ADISC Accept response 5979 * ELS IOCB command. 5980 * 5981 * Return code 5982 * 0 - Successfully issued acc adisc response 5983 * 1 - Failed to issue adisc acc response 5984 **/ 5985 int 5986 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5987 struct lpfc_nodelist *ndlp) 5988 { 5989 struct lpfc_hba *phba = vport->phba; 5990 ADISC *ap; 5991 IOCB_t *icmd, *oldcmd; 5992 union lpfc_wqe128 *wqe; 5993 struct lpfc_iocbq *elsiocb; 5994 uint8_t *pcmd; 5995 uint16_t cmdsize; 5996 int rc; 5997 u32 ulp_context; 5998 5999 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 6000 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6001 ndlp->nlp_DID, ELS_CMD_ACC); 6002 if (!elsiocb) 6003 return 1; 6004 6005 if (phba->sli_rev == LPFC_SLI_REV4) { 6006 wqe = &elsiocb->wqe; 6007 /* XRI / rx_id */ 6008 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6009 get_job_ulpcontext(phba, oldiocb)); 6010 ulp_context = get_job_ulpcontext(phba, elsiocb); 6011 /* oxid */ 6012 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6013 get_job_rcvoxid(phba, oldiocb)); 6014 } else { 6015 icmd = &elsiocb->iocb; 6016 oldcmd = &oldiocb->iocb; 6017 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6018 ulp_context = elsiocb->iocb.ulpContext; 6019 icmd->unsli3.rcvsli3.ox_id = 6020 oldcmd->unsli3.rcvsli3.ox_id; 6021 } 6022 6023 /* Xmit ADISC ACC response tag <ulpIoTag> */ 6024 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6025 "0130 Xmit ADISC ACC response iotag x%x xri: " 6026 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 6027 elsiocb->iotag, ulp_context, 6028 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6029 ndlp->nlp_rpi); 6030 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6031 6032 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6033 pcmd += sizeof(uint32_t); 6034 6035 ap = (ADISC *) (pcmd); 6036 ap->hardAL_PA = phba->fc_pref_ALPA; 6037 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6038 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6039 ap->DID = be32_to_cpu(vport->fc_myDID); 6040 6041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6042 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6043 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6044 6045 phba->fc_stat.elsXmitACC++; 6046 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6047 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6048 if (!elsiocb->ndlp) { 6049 lpfc_els_free_iocb(phba, elsiocb); 6050 return 1; 6051 } 6052 6053 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6054 if (rc == IOCB_ERROR) { 6055 lpfc_els_free_iocb(phba, elsiocb); 6056 lpfc_nlp_put(ndlp); 6057 return 1; 6058 } 6059 6060 return 0; 6061 } 6062 6063 /** 6064 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6065 * @vport: pointer to a virtual N_Port data structure. 6066 * @oldiocb: pointer to the original lpfc command iocb data structure. 6067 * @ndlp: pointer to a node-list data structure. 6068 * 6069 * This routine prepares and issues an Accept (ACC) response to Process 6070 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6071 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6072 * 6073 * Note that the ndlp reference count will be incremented by 1 for holding the 6074 * ndlp and the reference to ndlp will be stored into the ndlp field of 6075 * the IOCB for the completion callback function to the PRLI Accept response 6076 * ELS IOCB command. 6077 * 6078 * Return code 6079 * 0 - Successfully issued acc prli response 6080 * 1 - Failed to issue acc prli response 6081 **/ 6082 int 6083 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6084 struct lpfc_nodelist *ndlp) 6085 { 6086 struct lpfc_hba *phba = vport->phba; 6087 PRLI *npr; 6088 struct lpfc_nvme_prli *npr_nvme; 6089 lpfc_vpd_t *vpd; 6090 IOCB_t *icmd; 6091 IOCB_t *oldcmd; 6092 union lpfc_wqe128 *wqe; 6093 struct lpfc_iocbq *elsiocb; 6094 uint8_t *pcmd; 6095 uint16_t cmdsize; 6096 uint32_t prli_fc4_req, *req_payload; 6097 struct lpfc_dmabuf *req_buf; 6098 int rc; 6099 u32 elsrspcmd, ulp_context; 6100 6101 /* Need the incoming PRLI payload to determine if the ACC is for an 6102 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6103 */ 6104 req_buf = oldiocb->cmd_dmabuf; 6105 req_payload = (((uint32_t *)req_buf->virt) + 1); 6106 6107 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6108 prli_fc4_req = be32_to_cpu(*req_payload); 6109 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6110 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6111 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6112 prli_fc4_req, *((uint32_t *)req_payload)); 6113 6114 if (prli_fc4_req == PRLI_FCP_TYPE) { 6115 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6116 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6117 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6118 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6119 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6120 } else { 6121 return 1; 6122 } 6123 6124 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6125 ndlp->nlp_DID, elsrspcmd); 6126 if (!elsiocb) 6127 return 1; 6128 6129 if (phba->sli_rev == LPFC_SLI_REV4) { 6130 wqe = &elsiocb->wqe; 6131 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6132 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6133 ulp_context = get_job_ulpcontext(phba, elsiocb); 6134 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6135 get_job_rcvoxid(phba, oldiocb)); 6136 } else { 6137 icmd = &elsiocb->iocb; 6138 oldcmd = &oldiocb->iocb; 6139 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6140 ulp_context = elsiocb->iocb.ulpContext; 6141 icmd->unsli3.rcvsli3.ox_id = 6142 oldcmd->unsli3.rcvsli3.ox_id; 6143 } 6144 6145 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6146 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6147 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6148 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6149 elsiocb->iotag, ulp_context, 6150 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6151 ndlp->nlp_rpi); 6152 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6153 memset(pcmd, 0, cmdsize); 6154 6155 *((uint32_t *)(pcmd)) = elsrspcmd; 6156 pcmd += sizeof(uint32_t); 6157 6158 /* For PRLI, remainder of payload is PRLI parameter page */ 6159 vpd = &phba->vpd; 6160 6161 if (prli_fc4_req == PRLI_FCP_TYPE) { 6162 /* 6163 * If the remote port is a target and our firmware version 6164 * is 3.20 or later, set the following bits for FC-TAPE 6165 * support. 6166 */ 6167 npr = (PRLI *) pcmd; 6168 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6169 (vpd->rev.feaLevelHigh >= 0x02)) { 6170 npr->ConfmComplAllowed = 1; 6171 npr->Retry = 1; 6172 npr->TaskRetryIdReq = 1; 6173 } 6174 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6175 npr->estabImagePair = 1; 6176 npr->readXferRdyDis = 1; 6177 npr->ConfmComplAllowed = 1; 6178 npr->prliType = PRLI_FCP_TYPE; 6179 npr->initiatorFunc = 1; 6180 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6181 /* Respond with an NVME PRLI Type */ 6182 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6183 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6184 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6185 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6186 if (phba->nvmet_support) { 6187 bf_set(prli_tgt, npr_nvme, 1); 6188 bf_set(prli_disc, npr_nvme, 1); 6189 if (phba->cfg_nvme_enable_fb) { 6190 bf_set(prli_fba, npr_nvme, 1); 6191 6192 /* TBD. Target mode needs to post buffers 6193 * that support the configured first burst 6194 * byte size. 6195 */ 6196 bf_set(prli_fb_sz, npr_nvme, 6197 phba->cfg_nvmet_fb_size); 6198 } 6199 } else { 6200 bf_set(prli_init, npr_nvme, 1); 6201 } 6202 6203 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6204 "6015 NVME issue PRLI ACC word1 x%08x " 6205 "word4 x%08x word5 x%08x flag x%x, " 6206 "fcp_info x%x nlp_type x%x\n", 6207 npr_nvme->word1, npr_nvme->word4, 6208 npr_nvme->word5, ndlp->nlp_flag, 6209 ndlp->nlp_fcp_info, ndlp->nlp_type); 6210 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6211 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6212 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6213 } else 6214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6215 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6216 prli_fc4_req, ndlp->nlp_fc4_type, 6217 ndlp->nlp_DID); 6218 6219 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6220 "Issue ACC PRLI: did:x%x flg:x%x", 6221 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6222 6223 phba->fc_stat.elsXmitACC++; 6224 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6225 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6226 if (!elsiocb->ndlp) { 6227 lpfc_els_free_iocb(phba, elsiocb); 6228 return 1; 6229 } 6230 6231 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6232 if (rc == IOCB_ERROR) { 6233 lpfc_els_free_iocb(phba, elsiocb); 6234 lpfc_nlp_put(ndlp); 6235 return 1; 6236 } 6237 6238 return 0; 6239 } 6240 6241 /** 6242 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6243 * @vport: pointer to a virtual N_Port data structure. 6244 * @format: rnid command format. 6245 * @oldiocb: pointer to the original lpfc command iocb data structure. 6246 * @ndlp: pointer to a node-list data structure. 6247 * 6248 * This routine issues a Request Node Identification Data (RNID) Accept 6249 * (ACC) response. It constructs the RNID ACC response command according to 6250 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6251 * issue the response. 6252 * 6253 * Note that the ndlp reference count will be incremented by 1 for holding the 6254 * ndlp and the reference to ndlp will be stored into the ndlp field of 6255 * the IOCB for the completion callback function. 6256 * 6257 * Return code 6258 * 0 - Successfully issued acc rnid response 6259 * 1 - Failed to issue acc rnid response 6260 **/ 6261 static int 6262 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6263 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6264 { 6265 struct lpfc_hba *phba = vport->phba; 6266 RNID *rn; 6267 IOCB_t *icmd, *oldcmd; 6268 union lpfc_wqe128 *wqe; 6269 struct lpfc_iocbq *elsiocb; 6270 uint8_t *pcmd; 6271 uint16_t cmdsize; 6272 int rc; 6273 u32 ulp_context; 6274 6275 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6276 + (2 * sizeof(struct lpfc_name)); 6277 if (format) 6278 cmdsize += sizeof(RNID_TOP_DISC); 6279 6280 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6281 ndlp->nlp_DID, ELS_CMD_ACC); 6282 if (!elsiocb) 6283 return 1; 6284 6285 if (phba->sli_rev == LPFC_SLI_REV4) { 6286 wqe = &elsiocb->wqe; 6287 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6288 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6289 ulp_context = get_job_ulpcontext(phba, elsiocb); 6290 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6291 get_job_rcvoxid(phba, oldiocb)); 6292 } else { 6293 icmd = &elsiocb->iocb; 6294 oldcmd = &oldiocb->iocb; 6295 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6296 ulp_context = elsiocb->iocb.ulpContext; 6297 icmd->unsli3.rcvsli3.ox_id = 6298 oldcmd->unsli3.rcvsli3.ox_id; 6299 } 6300 6301 /* Xmit RNID ACC response tag <ulpIoTag> */ 6302 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6303 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6304 elsiocb->iotag, ulp_context); 6305 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6306 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6307 pcmd += sizeof(uint32_t); 6308 6309 memset(pcmd, 0, sizeof(RNID)); 6310 rn = (RNID *) (pcmd); 6311 rn->Format = format; 6312 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6313 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6314 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6315 switch (format) { 6316 case 0: 6317 rn->SpecificLen = 0; 6318 break; 6319 case RNID_TOPOLOGY_DISC: 6320 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6321 memcpy(&rn->un.topologyDisc.portName, 6322 &vport->fc_portname, sizeof(struct lpfc_name)); 6323 rn->un.topologyDisc.unitType = RNID_HBA; 6324 rn->un.topologyDisc.physPort = 0; 6325 rn->un.topologyDisc.attachedNodes = 0; 6326 break; 6327 default: 6328 rn->CommonLen = 0; 6329 rn->SpecificLen = 0; 6330 break; 6331 } 6332 6333 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6334 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6335 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6336 6337 phba->fc_stat.elsXmitACC++; 6338 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6339 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6340 if (!elsiocb->ndlp) { 6341 lpfc_els_free_iocb(phba, elsiocb); 6342 return 1; 6343 } 6344 6345 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6346 if (rc == IOCB_ERROR) { 6347 lpfc_els_free_iocb(phba, elsiocb); 6348 lpfc_nlp_put(ndlp); 6349 return 1; 6350 } 6351 6352 return 0; 6353 } 6354 6355 /** 6356 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6357 * @vport: pointer to a virtual N_Port data structure. 6358 * @iocb: pointer to the lpfc command iocb data structure. 6359 * @ndlp: pointer to a node-list data structure. 6360 * 6361 * Return 6362 **/ 6363 static void 6364 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6365 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6366 { 6367 struct lpfc_hba *phba = vport->phba; 6368 uint8_t *pcmd; 6369 struct RRQ *rrq; 6370 uint16_t rxid; 6371 uint16_t xri; 6372 struct lpfc_node_rrq *prrq; 6373 6374 6375 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6376 pcmd += sizeof(uint32_t); 6377 rrq = (struct RRQ *)pcmd; 6378 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6379 rxid = bf_get(rrq_rxid, rrq); 6380 6381 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6382 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6383 " x%x x%x\n", 6384 be32_to_cpu(bf_get(rrq_did, rrq)), 6385 bf_get(rrq_oxid, rrq), 6386 rxid, 6387 get_wqe_reqtag(iocb), 6388 get_job_ulpcontext(phba, iocb)); 6389 6390 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6391 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6392 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6393 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6394 xri = bf_get(rrq_oxid, rrq); 6395 else 6396 xri = rxid; 6397 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6398 if (prrq) 6399 lpfc_clr_rrq_active(phba, xri, prrq); 6400 return; 6401 } 6402 6403 /** 6404 * lpfc_els_rsp_echo_acc - Issue echo acc response 6405 * @vport: pointer to a virtual N_Port data structure. 6406 * @data: pointer to echo data to return in the accept. 6407 * @oldiocb: pointer to the original lpfc command iocb data structure. 6408 * @ndlp: pointer to a node-list data structure. 6409 * 6410 * Return code 6411 * 0 - Successfully issued acc echo response 6412 * 1 - Failed to issue acc echo response 6413 **/ 6414 static int 6415 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6416 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6417 { 6418 struct lpfc_hba *phba = vport->phba; 6419 IOCB_t *icmd, *oldcmd; 6420 union lpfc_wqe128 *wqe; 6421 struct lpfc_iocbq *elsiocb; 6422 uint8_t *pcmd; 6423 uint16_t cmdsize; 6424 int rc; 6425 u32 ulp_context; 6426 6427 if (phba->sli_rev == LPFC_SLI_REV4) 6428 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6429 else 6430 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6431 6432 /* The accumulated length can exceed the BPL_SIZE. For 6433 * now, use this as the limit 6434 */ 6435 if (cmdsize > LPFC_BPL_SIZE) 6436 cmdsize = LPFC_BPL_SIZE; 6437 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6438 ndlp->nlp_DID, ELS_CMD_ACC); 6439 if (!elsiocb) 6440 return 1; 6441 6442 if (phba->sli_rev == LPFC_SLI_REV4) { 6443 wqe = &elsiocb->wqe; 6444 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6445 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6446 ulp_context = get_job_ulpcontext(phba, elsiocb); 6447 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6448 get_job_rcvoxid(phba, oldiocb)); 6449 } else { 6450 icmd = &elsiocb->iocb; 6451 oldcmd = &oldiocb->iocb; 6452 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6453 ulp_context = elsiocb->iocb.ulpContext; 6454 icmd->unsli3.rcvsli3.ox_id = 6455 oldcmd->unsli3.rcvsli3.ox_id; 6456 } 6457 6458 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6459 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6460 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6461 elsiocb->iotag, ulp_context); 6462 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6463 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6464 pcmd += sizeof(uint32_t); 6465 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6466 6467 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6468 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6469 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6470 6471 phba->fc_stat.elsXmitACC++; 6472 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6473 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6474 if (!elsiocb->ndlp) { 6475 lpfc_els_free_iocb(phba, elsiocb); 6476 return 1; 6477 } 6478 6479 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6480 if (rc == IOCB_ERROR) { 6481 lpfc_els_free_iocb(phba, elsiocb); 6482 lpfc_nlp_put(ndlp); 6483 return 1; 6484 } 6485 6486 return 0; 6487 } 6488 6489 /** 6490 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6491 * @vport: pointer to a host virtual N_Port data structure. 6492 * 6493 * This routine issues Address Discover (ADISC) ELS commands to those 6494 * N_Ports which are in node port recovery state and ADISC has not been issued 6495 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6496 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6497 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6498 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6499 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6500 * IOCBs quit for later pick up. On the other hand, after walking through 6501 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6502 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6503 * no more ADISC need to be sent. 6504 * 6505 * Return code 6506 * The number of N_Ports with adisc issued. 6507 **/ 6508 int 6509 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6510 { 6511 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6512 struct lpfc_nodelist *ndlp, *next_ndlp; 6513 int sentadisc = 0; 6514 6515 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6516 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6517 6518 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6519 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6520 continue; 6521 6522 spin_lock_irq(&ndlp->lock); 6523 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6524 spin_unlock_irq(&ndlp->lock); 6525 6526 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6527 /* This node was marked for ADISC but was not picked 6528 * for discovery. This is possible if the node was 6529 * missing in gidft response. 6530 * 6531 * At time of marking node for ADISC, we skipped unreg 6532 * from backend 6533 */ 6534 lpfc_nlp_unreg_node(vport, ndlp); 6535 lpfc_unreg_rpi(vport, ndlp); 6536 continue; 6537 } 6538 6539 ndlp->nlp_prev_state = ndlp->nlp_state; 6540 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6541 lpfc_issue_els_adisc(vport, ndlp, 0); 6542 sentadisc++; 6543 vport->num_disc_nodes++; 6544 if (vport->num_disc_nodes >= 6545 vport->cfg_discovery_threads) { 6546 spin_lock_irq(shost->host_lock); 6547 vport->fc_flag |= FC_NLP_MORE; 6548 spin_unlock_irq(shost->host_lock); 6549 break; 6550 } 6551 6552 } 6553 if (sentadisc == 0) { 6554 spin_lock_irq(shost->host_lock); 6555 vport->fc_flag &= ~FC_NLP_MORE; 6556 spin_unlock_irq(shost->host_lock); 6557 } 6558 return sentadisc; 6559 } 6560 6561 /** 6562 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6563 * @vport: pointer to a host virtual N_Port data structure. 6564 * 6565 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6566 * which are in node port recovery state, with a @vport. Each time an ELS 6567 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6568 * the per @vport number of discover count (num_disc_nodes) shall be 6569 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6570 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6571 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6572 * later pick up. On the other hand, after walking through all the ndlps with 6573 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6574 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6575 * PLOGI need to be sent. 6576 * 6577 * Return code 6578 * The number of N_Ports with plogi issued. 6579 **/ 6580 int 6581 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6582 { 6583 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6584 struct lpfc_nodelist *ndlp, *next_ndlp; 6585 int sentplogi = 0; 6586 6587 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6588 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6589 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6590 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6591 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6592 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6593 ndlp->nlp_prev_state = ndlp->nlp_state; 6594 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6595 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6596 sentplogi++; 6597 vport->num_disc_nodes++; 6598 if (vport->num_disc_nodes >= 6599 vport->cfg_discovery_threads) { 6600 spin_lock_irq(shost->host_lock); 6601 vport->fc_flag |= FC_NLP_MORE; 6602 spin_unlock_irq(shost->host_lock); 6603 break; 6604 } 6605 } 6606 } 6607 6608 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6609 "6452 Discover PLOGI %d flag x%x\n", 6610 sentplogi, vport->fc_flag); 6611 6612 if (sentplogi) { 6613 lpfc_set_disctmo(vport); 6614 } 6615 else { 6616 spin_lock_irq(shost->host_lock); 6617 vport->fc_flag &= ~FC_NLP_MORE; 6618 spin_unlock_irq(shost->host_lock); 6619 } 6620 return sentplogi; 6621 } 6622 6623 static uint32_t 6624 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6625 uint32_t word0) 6626 { 6627 6628 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6629 desc->payload.els_req = word0; 6630 desc->length = cpu_to_be32(sizeof(desc->payload)); 6631 6632 return sizeof(struct fc_rdp_link_service_desc); 6633 } 6634 6635 static uint32_t 6636 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6637 uint8_t *page_a0, uint8_t *page_a2) 6638 { 6639 uint16_t wavelength; 6640 uint16_t temperature; 6641 uint16_t rx_power; 6642 uint16_t tx_bias; 6643 uint16_t tx_power; 6644 uint16_t vcc; 6645 uint16_t flag = 0; 6646 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6647 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6648 6649 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6650 6651 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6652 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6653 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6654 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6655 6656 if ((trasn_code_byte4->fc_sw_laser) || 6657 (trasn_code_byte5->fc_sw_laser_sl) || 6658 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6659 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6660 } else if (trasn_code_byte4->fc_lw_laser) { 6661 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6662 page_a0[SSF_WAVELENGTH_B0]; 6663 if (wavelength == SFP_WAVELENGTH_LC1310) 6664 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6665 if (wavelength == SFP_WAVELENGTH_LL1550) 6666 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6667 } 6668 /* check if its SFP+ */ 6669 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6670 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6671 << SFP_FLAG_CT_SHIFT; 6672 6673 /* check if its OPTICAL */ 6674 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6675 SFP_FLAG_IS_OPTICAL_PORT : 0) 6676 << SFP_FLAG_IS_OPTICAL_SHIFT; 6677 6678 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6679 page_a2[SFF_TEMPERATURE_B0]); 6680 vcc = (page_a2[SFF_VCC_B1] << 8 | 6681 page_a2[SFF_VCC_B0]); 6682 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6683 page_a2[SFF_TXPOWER_B0]); 6684 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6685 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6686 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6687 page_a2[SFF_RXPOWER_B0]); 6688 desc->sfp_info.temperature = cpu_to_be16(temperature); 6689 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6690 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6691 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6692 desc->sfp_info.vcc = cpu_to_be16(vcc); 6693 6694 desc->sfp_info.flags = cpu_to_be16(flag); 6695 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6696 6697 return sizeof(struct fc_rdp_sfp_desc); 6698 } 6699 6700 static uint32_t 6701 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6702 READ_LNK_VAR *stat) 6703 { 6704 uint32_t type; 6705 6706 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6707 6708 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6709 6710 desc->info.port_type = cpu_to_be32(type); 6711 6712 desc->info.link_status.link_failure_cnt = 6713 cpu_to_be32(stat->linkFailureCnt); 6714 desc->info.link_status.loss_of_synch_cnt = 6715 cpu_to_be32(stat->lossSyncCnt); 6716 desc->info.link_status.loss_of_signal_cnt = 6717 cpu_to_be32(stat->lossSignalCnt); 6718 desc->info.link_status.primitive_seq_proto_err = 6719 cpu_to_be32(stat->primSeqErrCnt); 6720 desc->info.link_status.invalid_trans_word = 6721 cpu_to_be32(stat->invalidXmitWord); 6722 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6723 6724 desc->length = cpu_to_be32(sizeof(desc->info)); 6725 6726 return sizeof(struct fc_rdp_link_error_status_desc); 6727 } 6728 6729 static uint32_t 6730 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6731 struct lpfc_vport *vport) 6732 { 6733 uint32_t bbCredit; 6734 6735 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6736 6737 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6738 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6739 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6740 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6741 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6742 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6743 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6744 } else { 6745 desc->bbc_info.attached_port_bbc = 0; 6746 } 6747 6748 desc->bbc_info.rtt = 0; 6749 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6750 6751 return sizeof(struct fc_rdp_bbc_desc); 6752 } 6753 6754 static uint32_t 6755 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6756 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6757 { 6758 uint32_t flags = 0; 6759 6760 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6761 6762 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6763 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6764 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6765 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6766 6767 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6768 flags |= RDP_OET_HIGH_ALARM; 6769 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6770 flags |= RDP_OET_LOW_ALARM; 6771 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6772 flags |= RDP_OET_HIGH_WARNING; 6773 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6774 flags |= RDP_OET_LOW_WARNING; 6775 6776 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6777 desc->oed_info.function_flags = cpu_to_be32(flags); 6778 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6779 return sizeof(struct fc_rdp_oed_sfp_desc); 6780 } 6781 6782 static uint32_t 6783 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6784 struct fc_rdp_oed_sfp_desc *desc, 6785 uint8_t *page_a2) 6786 { 6787 uint32_t flags = 0; 6788 6789 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6790 6791 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6792 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6793 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6794 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6795 6796 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6797 flags |= RDP_OET_HIGH_ALARM; 6798 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6799 flags |= RDP_OET_LOW_ALARM; 6800 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6801 flags |= RDP_OET_HIGH_WARNING; 6802 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6803 flags |= RDP_OET_LOW_WARNING; 6804 6805 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6806 desc->oed_info.function_flags = cpu_to_be32(flags); 6807 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6808 return sizeof(struct fc_rdp_oed_sfp_desc); 6809 } 6810 6811 static uint32_t 6812 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6813 struct fc_rdp_oed_sfp_desc *desc, 6814 uint8_t *page_a2) 6815 { 6816 uint32_t flags = 0; 6817 6818 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6819 6820 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6821 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6822 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6823 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6824 6825 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6826 flags |= RDP_OET_HIGH_ALARM; 6827 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6828 flags |= RDP_OET_LOW_ALARM; 6829 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6830 flags |= RDP_OET_HIGH_WARNING; 6831 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6832 flags |= RDP_OET_LOW_WARNING; 6833 6834 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6835 desc->oed_info.function_flags = cpu_to_be32(flags); 6836 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6837 return sizeof(struct fc_rdp_oed_sfp_desc); 6838 } 6839 6840 static uint32_t 6841 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6842 struct fc_rdp_oed_sfp_desc *desc, 6843 uint8_t *page_a2) 6844 { 6845 uint32_t flags = 0; 6846 6847 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6848 6849 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6850 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6851 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6852 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6853 6854 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6855 flags |= RDP_OET_HIGH_ALARM; 6856 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6857 flags |= RDP_OET_LOW_ALARM; 6858 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6859 flags |= RDP_OET_HIGH_WARNING; 6860 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6861 flags |= RDP_OET_LOW_WARNING; 6862 6863 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6864 desc->oed_info.function_flags = cpu_to_be32(flags); 6865 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6866 return sizeof(struct fc_rdp_oed_sfp_desc); 6867 } 6868 6869 6870 static uint32_t 6871 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6872 struct fc_rdp_oed_sfp_desc *desc, 6873 uint8_t *page_a2) 6874 { 6875 uint32_t flags = 0; 6876 6877 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6878 6879 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6880 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6881 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6882 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6883 6884 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6885 flags |= RDP_OET_HIGH_ALARM; 6886 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6887 flags |= RDP_OET_LOW_ALARM; 6888 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6889 flags |= RDP_OET_HIGH_WARNING; 6890 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6891 flags |= RDP_OET_LOW_WARNING; 6892 6893 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6894 desc->oed_info.function_flags = cpu_to_be32(flags); 6895 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6896 return sizeof(struct fc_rdp_oed_sfp_desc); 6897 } 6898 6899 static uint32_t 6900 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6901 uint8_t *page_a0, struct lpfc_vport *vport) 6902 { 6903 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6904 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6905 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6906 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6907 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6908 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6909 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6910 return sizeof(struct fc_rdp_opd_sfp_desc); 6911 } 6912 6913 static uint32_t 6914 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6915 { 6916 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6917 return 0; 6918 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6919 6920 desc->info.CorrectedBlocks = 6921 cpu_to_be32(stat->fecCorrBlkCount); 6922 desc->info.UncorrectableBlocks = 6923 cpu_to_be32(stat->fecUncorrBlkCount); 6924 6925 desc->length = cpu_to_be32(sizeof(desc->info)); 6926 6927 return sizeof(struct fc_fec_rdp_desc); 6928 } 6929 6930 static uint32_t 6931 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6932 { 6933 uint16_t rdp_cap = 0; 6934 uint16_t rdp_speed; 6935 6936 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6937 6938 switch (phba->fc_linkspeed) { 6939 case LPFC_LINK_SPEED_1GHZ: 6940 rdp_speed = RDP_PS_1GB; 6941 break; 6942 case LPFC_LINK_SPEED_2GHZ: 6943 rdp_speed = RDP_PS_2GB; 6944 break; 6945 case LPFC_LINK_SPEED_4GHZ: 6946 rdp_speed = RDP_PS_4GB; 6947 break; 6948 case LPFC_LINK_SPEED_8GHZ: 6949 rdp_speed = RDP_PS_8GB; 6950 break; 6951 case LPFC_LINK_SPEED_10GHZ: 6952 rdp_speed = RDP_PS_10GB; 6953 break; 6954 case LPFC_LINK_SPEED_16GHZ: 6955 rdp_speed = RDP_PS_16GB; 6956 break; 6957 case LPFC_LINK_SPEED_32GHZ: 6958 rdp_speed = RDP_PS_32GB; 6959 break; 6960 case LPFC_LINK_SPEED_64GHZ: 6961 rdp_speed = RDP_PS_64GB; 6962 break; 6963 case LPFC_LINK_SPEED_128GHZ: 6964 rdp_speed = RDP_PS_128GB; 6965 break; 6966 case LPFC_LINK_SPEED_256GHZ: 6967 rdp_speed = RDP_PS_256GB; 6968 break; 6969 default: 6970 rdp_speed = RDP_PS_UNKNOWN; 6971 break; 6972 } 6973 6974 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6975 6976 if (phba->lmt & LMT_256Gb) 6977 rdp_cap |= RDP_PS_256GB; 6978 if (phba->lmt & LMT_128Gb) 6979 rdp_cap |= RDP_PS_128GB; 6980 if (phba->lmt & LMT_64Gb) 6981 rdp_cap |= RDP_PS_64GB; 6982 if (phba->lmt & LMT_32Gb) 6983 rdp_cap |= RDP_PS_32GB; 6984 if (phba->lmt & LMT_16Gb) 6985 rdp_cap |= RDP_PS_16GB; 6986 if (phba->lmt & LMT_10Gb) 6987 rdp_cap |= RDP_PS_10GB; 6988 if (phba->lmt & LMT_8Gb) 6989 rdp_cap |= RDP_PS_8GB; 6990 if (phba->lmt & LMT_4Gb) 6991 rdp_cap |= RDP_PS_4GB; 6992 if (phba->lmt & LMT_2Gb) 6993 rdp_cap |= RDP_PS_2GB; 6994 if (phba->lmt & LMT_1Gb) 6995 rdp_cap |= RDP_PS_1GB; 6996 6997 if (rdp_cap == 0) 6998 rdp_cap = RDP_CAP_UNKNOWN; 6999 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 7000 rdp_cap |= RDP_CAP_USER_CONFIGURED; 7001 7002 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 7003 desc->length = cpu_to_be32(sizeof(desc->info)); 7004 return sizeof(struct fc_rdp_port_speed_desc); 7005 } 7006 7007 static uint32_t 7008 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 7009 struct lpfc_vport *vport) 7010 { 7011 7012 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7013 7014 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 7015 sizeof(desc->port_names.wwnn)); 7016 7017 memcpy(desc->port_names.wwpn, &vport->fc_portname, 7018 sizeof(desc->port_names.wwpn)); 7019 7020 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7021 return sizeof(struct fc_rdp_port_name_desc); 7022 } 7023 7024 static uint32_t 7025 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 7026 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7027 { 7028 7029 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7030 if (vport->fc_flag & FC_FABRIC) { 7031 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7032 sizeof(desc->port_names.wwnn)); 7033 7034 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7035 sizeof(desc->port_names.wwpn)); 7036 } else { /* Point to Point */ 7037 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7038 sizeof(desc->port_names.wwnn)); 7039 7040 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7041 sizeof(desc->port_names.wwpn)); 7042 } 7043 7044 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7045 return sizeof(struct fc_rdp_port_name_desc); 7046 } 7047 7048 static void 7049 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7050 int status) 7051 { 7052 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7053 struct lpfc_vport *vport = ndlp->vport; 7054 struct lpfc_iocbq *elsiocb; 7055 struct ulp_bde64 *bpl; 7056 IOCB_t *icmd; 7057 union lpfc_wqe128 *wqe; 7058 uint8_t *pcmd; 7059 struct ls_rjt *stat; 7060 struct fc_rdp_res_frame *rdp_res; 7061 uint32_t cmdsize, len; 7062 uint16_t *flag_ptr; 7063 int rc; 7064 u32 ulp_context; 7065 7066 if (status != SUCCESS) 7067 goto error; 7068 7069 /* This will change once we know the true size of the RDP payload */ 7070 cmdsize = sizeof(struct fc_rdp_res_frame); 7071 7072 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7073 lpfc_max_els_tries, rdp_context->ndlp, 7074 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7075 if (!elsiocb) 7076 goto free_rdp_context; 7077 7078 ulp_context = get_job_ulpcontext(phba, elsiocb); 7079 if (phba->sli_rev == LPFC_SLI_REV4) { 7080 wqe = &elsiocb->wqe; 7081 /* ox-id of the frame */ 7082 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7083 rdp_context->ox_id); 7084 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7085 rdp_context->rx_id); 7086 } else { 7087 icmd = &elsiocb->iocb; 7088 icmd->ulpContext = rdp_context->rx_id; 7089 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7090 } 7091 7092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7093 "2171 Xmit RDP response tag x%x xri x%x, " 7094 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7095 elsiocb->iotag, ulp_context, 7096 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7097 ndlp->nlp_rpi); 7098 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7099 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7100 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7101 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7102 7103 /* Update Alarm and Warning */ 7104 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7105 phba->sfp_alarm |= *flag_ptr; 7106 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7107 phba->sfp_warning |= *flag_ptr; 7108 7109 /* For RDP payload */ 7110 len = 8; 7111 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7112 (len + pcmd), ELS_CMD_RDP); 7113 7114 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7115 rdp_context->page_a0, rdp_context->page_a2); 7116 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7117 phba); 7118 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7119 (len + pcmd), &rdp_context->link_stat); 7120 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7121 (len + pcmd), vport); 7122 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7123 (len + pcmd), vport, ndlp); 7124 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7125 &rdp_context->link_stat); 7126 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7127 &rdp_context->link_stat, vport); 7128 len += lpfc_rdp_res_oed_temp_desc(phba, 7129 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7130 rdp_context->page_a2); 7131 len += lpfc_rdp_res_oed_voltage_desc(phba, 7132 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7133 rdp_context->page_a2); 7134 len += lpfc_rdp_res_oed_txbias_desc(phba, 7135 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7136 rdp_context->page_a2); 7137 len += lpfc_rdp_res_oed_txpower_desc(phba, 7138 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7139 rdp_context->page_a2); 7140 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7141 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7142 rdp_context->page_a2); 7143 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7144 rdp_context->page_a0, vport); 7145 7146 rdp_res->length = cpu_to_be32(len - 8); 7147 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7148 7149 /* Now that we know the true size of the payload, update the BPL */ 7150 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7151 bpl->tus.f.bdeSize = len; 7152 bpl->tus.f.bdeFlags = 0; 7153 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7154 7155 phba->fc_stat.elsXmitACC++; 7156 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7157 if (!elsiocb->ndlp) { 7158 lpfc_els_free_iocb(phba, elsiocb); 7159 goto free_rdp_context; 7160 } 7161 7162 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7163 if (rc == IOCB_ERROR) { 7164 lpfc_els_free_iocb(phba, elsiocb); 7165 lpfc_nlp_put(ndlp); 7166 } 7167 7168 goto free_rdp_context; 7169 7170 error: 7171 cmdsize = 2 * sizeof(uint32_t); 7172 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7173 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7174 if (!elsiocb) 7175 goto free_rdp_context; 7176 7177 if (phba->sli_rev == LPFC_SLI_REV4) { 7178 wqe = &elsiocb->wqe; 7179 /* ox-id of the frame */ 7180 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7181 rdp_context->ox_id); 7182 bf_set(wqe_ctxt_tag, 7183 &wqe->xmit_els_rsp.wqe_com, 7184 rdp_context->rx_id); 7185 } else { 7186 icmd = &elsiocb->iocb; 7187 icmd->ulpContext = rdp_context->rx_id; 7188 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7189 } 7190 7191 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7192 7193 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7194 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7195 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7196 7197 phba->fc_stat.elsXmitLSRJT++; 7198 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7199 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7200 if (!elsiocb->ndlp) { 7201 lpfc_els_free_iocb(phba, elsiocb); 7202 goto free_rdp_context; 7203 } 7204 7205 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7206 if (rc == IOCB_ERROR) { 7207 lpfc_els_free_iocb(phba, elsiocb); 7208 lpfc_nlp_put(ndlp); 7209 } 7210 7211 free_rdp_context: 7212 /* This reference put is for the original unsolicited RDP. If the 7213 * prep failed, there is no reference to remove. 7214 */ 7215 lpfc_nlp_put(ndlp); 7216 kfree(rdp_context); 7217 } 7218 7219 static int 7220 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7221 { 7222 LPFC_MBOXQ_t *mbox = NULL; 7223 int rc; 7224 7225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7226 if (!mbox) { 7227 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7228 "7105 failed to allocate mailbox memory"); 7229 return 1; 7230 } 7231 7232 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7233 goto rdp_fail; 7234 mbox->vport = rdp_context->ndlp->vport; 7235 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7236 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7238 if (rc == MBX_NOT_FINISHED) { 7239 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7240 return 1; 7241 } 7242 7243 return 0; 7244 7245 rdp_fail: 7246 mempool_free(mbox, phba->mbox_mem_pool); 7247 return 1; 7248 } 7249 7250 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7251 struct lpfc_rdp_context *rdp_context) 7252 { 7253 LPFC_MBOXQ_t *mbox = NULL; 7254 int rc; 7255 struct lpfc_dmabuf *mp; 7256 struct lpfc_dmabuf *mpsave; 7257 void *virt; 7258 MAILBOX_t *mb; 7259 7260 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7261 if (!mbox) { 7262 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7263 "7205 failed to allocate mailbox memory"); 7264 return 1; 7265 } 7266 7267 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7268 goto sfp_fail; 7269 mp = mbox->ctx_buf; 7270 mpsave = mp; 7271 virt = mp->virt; 7272 if (phba->sli_rev < LPFC_SLI_REV4) { 7273 mb = &mbox->u.mb; 7274 mb->un.varDmp.cv = 1; 7275 mb->un.varDmp.co = 1; 7276 mb->un.varWords[2] = 0; 7277 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7278 mb->un.varWords[4] = 0; 7279 mb->un.varWords[5] = 0; 7280 mb->un.varWords[6] = 0; 7281 mb->un.varWords[7] = 0; 7282 mb->un.varWords[8] = 0; 7283 mb->un.varWords[9] = 0; 7284 mb->un.varWords[10] = 0; 7285 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7286 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7287 mbox->mbox_offset_word = 5; 7288 mbox->ctx_buf = virt; 7289 } else { 7290 bf_set(lpfc_mbx_memory_dump_type3_length, 7291 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7292 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7293 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7294 } 7295 mbox->vport = phba->pport; 7296 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7297 7298 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7299 if (rc == MBX_NOT_FINISHED) { 7300 rc = 1; 7301 goto error; 7302 } 7303 7304 if (phba->sli_rev == LPFC_SLI_REV4) 7305 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 7306 else 7307 mp = mpsave; 7308 7309 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7310 rc = 1; 7311 goto error; 7312 } 7313 7314 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7315 DMP_SFF_PAGE_A0_SIZE); 7316 7317 memset(mbox, 0, sizeof(*mbox)); 7318 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7319 INIT_LIST_HEAD(&mp->list); 7320 7321 /* save address for completion */ 7322 mbox->ctx_buf = mp; 7323 mbox->vport = phba->pport; 7324 7325 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7326 bf_set(lpfc_mbx_memory_dump_type3_type, 7327 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7328 bf_set(lpfc_mbx_memory_dump_type3_link, 7329 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7330 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7331 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7332 if (phba->sli_rev < LPFC_SLI_REV4) { 7333 mb = &mbox->u.mb; 7334 mb->un.varDmp.cv = 1; 7335 mb->un.varDmp.co = 1; 7336 mb->un.varWords[2] = 0; 7337 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7338 mb->un.varWords[4] = 0; 7339 mb->un.varWords[5] = 0; 7340 mb->un.varWords[6] = 0; 7341 mb->un.varWords[7] = 0; 7342 mb->un.varWords[8] = 0; 7343 mb->un.varWords[9] = 0; 7344 mb->un.varWords[10] = 0; 7345 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7346 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7347 mbox->mbox_offset_word = 5; 7348 mbox->ctx_buf = virt; 7349 } else { 7350 bf_set(lpfc_mbx_memory_dump_type3_length, 7351 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7352 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7353 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7354 } 7355 7356 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7357 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7358 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7359 rc = 1; 7360 goto error; 7361 } 7362 rc = 0; 7363 7364 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7365 DMP_SFF_PAGE_A2_SIZE); 7366 7367 error: 7368 mbox->ctx_buf = mpsave; 7369 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7370 7371 return rc; 7372 7373 sfp_fail: 7374 mempool_free(mbox, phba->mbox_mem_pool); 7375 return 1; 7376 } 7377 7378 /* 7379 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7380 * @vport: pointer to a host virtual N_Port data structure. 7381 * @cmdiocb: pointer to lpfc command iocb data structure. 7382 * @ndlp: pointer to a node-list data structure. 7383 * 7384 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7385 * IOCB. First, the payload of the unsolicited RDP is checked. 7386 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7387 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7388 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7389 * gather all data and send RDP response. 7390 * 7391 * Return code 7392 * 0 - Sent the acc response 7393 * 1 - Sent the reject response. 7394 */ 7395 static int 7396 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7397 struct lpfc_nodelist *ndlp) 7398 { 7399 struct lpfc_hba *phba = vport->phba; 7400 struct lpfc_dmabuf *pcmd; 7401 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7402 struct fc_rdp_req_frame *rdp_req; 7403 struct lpfc_rdp_context *rdp_context; 7404 union lpfc_wqe128 *cmd = NULL; 7405 struct ls_rjt stat; 7406 7407 if (phba->sli_rev < LPFC_SLI_REV4 || 7408 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7409 LPFC_SLI_INTF_IF_TYPE_2) { 7410 rjt_err = LSRJT_UNABLE_TPC; 7411 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7412 goto error; 7413 } 7414 7415 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7416 rjt_err = LSRJT_UNABLE_TPC; 7417 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7418 goto error; 7419 } 7420 7421 pcmd = cmdiocb->cmd_dmabuf; 7422 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7423 7424 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7425 "2422 ELS RDP Request " 7426 "dec len %d tag x%x port_id %d len %d\n", 7427 be32_to_cpu(rdp_req->rdp_des_length), 7428 be32_to_cpu(rdp_req->nport_id_desc.tag), 7429 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7430 be32_to_cpu(rdp_req->nport_id_desc.length)); 7431 7432 if (sizeof(struct fc_rdp_nport_desc) != 7433 be32_to_cpu(rdp_req->rdp_des_length)) 7434 goto rjt_logerr; 7435 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7436 goto rjt_logerr; 7437 if (RDP_NPORT_ID_SIZE != 7438 be32_to_cpu(rdp_req->nport_id_desc.length)) 7439 goto rjt_logerr; 7440 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7441 if (!rdp_context) { 7442 rjt_err = LSRJT_UNABLE_TPC; 7443 goto error; 7444 } 7445 7446 cmd = &cmdiocb->wqe; 7447 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7448 if (!rdp_context->ndlp) { 7449 kfree(rdp_context); 7450 rjt_err = LSRJT_UNABLE_TPC; 7451 goto error; 7452 } 7453 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7454 &cmd->xmit_els_rsp.wqe_com); 7455 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7456 &cmd->xmit_els_rsp.wqe_com); 7457 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7458 if (lpfc_get_rdp_info(phba, rdp_context)) { 7459 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7460 "2423 Unable to send mailbox"); 7461 kfree(rdp_context); 7462 rjt_err = LSRJT_UNABLE_TPC; 7463 lpfc_nlp_put(ndlp); 7464 goto error; 7465 } 7466 7467 return 0; 7468 7469 rjt_logerr: 7470 rjt_err = LSRJT_LOGICAL_ERR; 7471 7472 error: 7473 memset(&stat, 0, sizeof(stat)); 7474 stat.un.b.lsRjtRsnCode = rjt_err; 7475 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7476 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7477 return 1; 7478 } 7479 7480 7481 static void 7482 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7483 { 7484 MAILBOX_t *mb; 7485 IOCB_t *icmd; 7486 union lpfc_wqe128 *wqe; 7487 uint8_t *pcmd; 7488 struct lpfc_iocbq *elsiocb; 7489 struct lpfc_nodelist *ndlp; 7490 struct ls_rjt *stat; 7491 union lpfc_sli4_cfg_shdr *shdr; 7492 struct lpfc_lcb_context *lcb_context; 7493 struct fc_lcb_res_frame *lcb_res; 7494 uint32_t cmdsize, shdr_status, shdr_add_status; 7495 int rc; 7496 7497 mb = &pmb->u.mb; 7498 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7499 ndlp = lcb_context->ndlp; 7500 pmb->ctx_ndlp = NULL; 7501 pmb->ctx_buf = NULL; 7502 7503 shdr = (union lpfc_sli4_cfg_shdr *) 7504 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7505 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7506 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7507 7508 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7509 "0194 SET_BEACON_CONFIG mailbox " 7510 "completed with status x%x add_status x%x," 7511 " mbx status x%x\n", 7512 shdr_status, shdr_add_status, mb->mbxStatus); 7513 7514 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7515 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7516 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7517 mempool_free(pmb, phba->mbox_mem_pool); 7518 goto error; 7519 } 7520 7521 mempool_free(pmb, phba->mbox_mem_pool); 7522 cmdsize = sizeof(struct fc_lcb_res_frame); 7523 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7524 lpfc_max_els_tries, ndlp, 7525 ndlp->nlp_DID, ELS_CMD_ACC); 7526 7527 /* Decrement the ndlp reference count from previous mbox command */ 7528 lpfc_nlp_put(ndlp); 7529 7530 if (!elsiocb) 7531 goto free_lcb_context; 7532 7533 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7534 7535 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7536 7537 if (phba->sli_rev == LPFC_SLI_REV4) { 7538 wqe = &elsiocb->wqe; 7539 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7540 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7541 lcb_context->ox_id); 7542 } else { 7543 icmd = &elsiocb->iocb; 7544 icmd->ulpContext = lcb_context->rx_id; 7545 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7546 } 7547 7548 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7549 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7550 lcb_res->lcb_sub_command = lcb_context->sub_command; 7551 lcb_res->lcb_type = lcb_context->type; 7552 lcb_res->capability = lcb_context->capability; 7553 lcb_res->lcb_frequency = lcb_context->frequency; 7554 lcb_res->lcb_duration = lcb_context->duration; 7555 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7556 phba->fc_stat.elsXmitACC++; 7557 7558 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7559 if (!elsiocb->ndlp) { 7560 lpfc_els_free_iocb(phba, elsiocb); 7561 goto out; 7562 } 7563 7564 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7565 if (rc == IOCB_ERROR) { 7566 lpfc_els_free_iocb(phba, elsiocb); 7567 lpfc_nlp_put(ndlp); 7568 } 7569 out: 7570 kfree(lcb_context); 7571 return; 7572 7573 error: 7574 cmdsize = sizeof(struct fc_lcb_res_frame); 7575 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7576 lpfc_max_els_tries, ndlp, 7577 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7578 lpfc_nlp_put(ndlp); 7579 if (!elsiocb) 7580 goto free_lcb_context; 7581 7582 if (phba->sli_rev == LPFC_SLI_REV4) { 7583 wqe = &elsiocb->wqe; 7584 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7585 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7586 lcb_context->ox_id); 7587 } else { 7588 icmd = &elsiocb->iocb; 7589 icmd->ulpContext = lcb_context->rx_id; 7590 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7591 } 7592 7593 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7594 7595 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7596 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7597 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7598 7599 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7600 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7601 7602 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7603 phba->fc_stat.elsXmitLSRJT++; 7604 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7605 if (!elsiocb->ndlp) { 7606 lpfc_els_free_iocb(phba, elsiocb); 7607 goto free_lcb_context; 7608 } 7609 7610 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7611 if (rc == IOCB_ERROR) { 7612 lpfc_els_free_iocb(phba, elsiocb); 7613 lpfc_nlp_put(ndlp); 7614 } 7615 free_lcb_context: 7616 kfree(lcb_context); 7617 } 7618 7619 static int 7620 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7621 struct lpfc_lcb_context *lcb_context, 7622 uint32_t beacon_state) 7623 { 7624 struct lpfc_hba *phba = vport->phba; 7625 union lpfc_sli4_cfg_shdr *cfg_shdr; 7626 LPFC_MBOXQ_t *mbox = NULL; 7627 uint32_t len; 7628 int rc; 7629 7630 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7631 if (!mbox) 7632 return 1; 7633 7634 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7635 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7636 sizeof(struct lpfc_sli4_cfg_mhdr); 7637 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7638 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7639 LPFC_SLI4_MBX_EMBED); 7640 mbox->ctx_ndlp = (void *)lcb_context; 7641 mbox->vport = phba->pport; 7642 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7643 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7644 phba->sli4_hba.physical_port); 7645 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7646 beacon_state); 7647 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7648 7649 /* 7650 * Check bv1s bit before issuing the mailbox 7651 * if bv1s == 1, LCB V1 supported 7652 * else, LCB V0 supported 7653 */ 7654 7655 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7656 /* COMMON_SET_BEACON_CONFIG_V1 */ 7657 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7658 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7659 bf_set(lpfc_mbx_set_beacon_port_type, 7660 &mbox->u.mqe.un.beacon_config, 0); 7661 bf_set(lpfc_mbx_set_beacon_duration_v1, 7662 &mbox->u.mqe.un.beacon_config, 7663 be16_to_cpu(lcb_context->duration)); 7664 } else { 7665 /* COMMON_SET_BEACON_CONFIG_V0 */ 7666 if (be16_to_cpu(lcb_context->duration) != 0) { 7667 mempool_free(mbox, phba->mbox_mem_pool); 7668 return 1; 7669 } 7670 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7671 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7672 bf_set(lpfc_mbx_set_beacon_state, 7673 &mbox->u.mqe.un.beacon_config, beacon_state); 7674 bf_set(lpfc_mbx_set_beacon_port_type, 7675 &mbox->u.mqe.un.beacon_config, 1); 7676 bf_set(lpfc_mbx_set_beacon_duration, 7677 &mbox->u.mqe.un.beacon_config, 7678 be16_to_cpu(lcb_context->duration)); 7679 } 7680 7681 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7682 if (rc == MBX_NOT_FINISHED) { 7683 mempool_free(mbox, phba->mbox_mem_pool); 7684 return 1; 7685 } 7686 7687 return 0; 7688 } 7689 7690 7691 /** 7692 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7693 * @vport: pointer to a host virtual N_Port data structure. 7694 * @cmdiocb: pointer to lpfc command iocb data structure. 7695 * @ndlp: pointer to a node-list data structure. 7696 * 7697 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7698 * First, the payload of the unsolicited LCB is checked. 7699 * Then based on Subcommand beacon will either turn on or off. 7700 * 7701 * Return code 7702 * 0 - Sent the acc response 7703 * 1 - Sent the reject response. 7704 **/ 7705 static int 7706 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7707 struct lpfc_nodelist *ndlp) 7708 { 7709 struct lpfc_hba *phba = vport->phba; 7710 struct lpfc_dmabuf *pcmd; 7711 uint8_t *lp; 7712 struct fc_lcb_request_frame *beacon; 7713 struct lpfc_lcb_context *lcb_context; 7714 u8 state, rjt_err = 0; 7715 struct ls_rjt stat; 7716 7717 pcmd = cmdiocb->cmd_dmabuf; 7718 lp = (uint8_t *)pcmd->virt; 7719 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7720 7721 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7722 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7723 "type x%x frequency %x duration x%x\n", 7724 lp[0], lp[1], lp[2], 7725 beacon->lcb_command, 7726 beacon->lcb_sub_command, 7727 beacon->lcb_type, 7728 beacon->lcb_frequency, 7729 be16_to_cpu(beacon->lcb_duration)); 7730 7731 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7732 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7733 rjt_err = LSRJT_CMD_UNSUPPORTED; 7734 goto rjt; 7735 } 7736 7737 if (phba->sli_rev < LPFC_SLI_REV4 || 7738 phba->hba_flag & HBA_FCOE_MODE || 7739 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7740 LPFC_SLI_INTF_IF_TYPE_2)) { 7741 rjt_err = LSRJT_CMD_UNSUPPORTED; 7742 goto rjt; 7743 } 7744 7745 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7746 if (!lcb_context) { 7747 rjt_err = LSRJT_UNABLE_TPC; 7748 goto rjt; 7749 } 7750 7751 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7752 lcb_context->sub_command = beacon->lcb_sub_command; 7753 lcb_context->capability = 0; 7754 lcb_context->type = beacon->lcb_type; 7755 lcb_context->frequency = beacon->lcb_frequency; 7756 lcb_context->duration = beacon->lcb_duration; 7757 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7758 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7759 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7760 if (!lcb_context->ndlp) { 7761 rjt_err = LSRJT_UNABLE_TPC; 7762 goto rjt_free; 7763 } 7764 7765 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7766 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7767 "0193 failed to send mail box"); 7768 lpfc_nlp_put(ndlp); 7769 rjt_err = LSRJT_UNABLE_TPC; 7770 goto rjt_free; 7771 } 7772 return 0; 7773 7774 rjt_free: 7775 kfree(lcb_context); 7776 rjt: 7777 memset(&stat, 0, sizeof(stat)); 7778 stat.un.b.lsRjtRsnCode = rjt_err; 7779 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7780 return 1; 7781 } 7782 7783 7784 /** 7785 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7786 * @vport: pointer to a host virtual N_Port data structure. 7787 * 7788 * This routine cleans up any Registration State Change Notification 7789 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7790 * @vport together with the host_lock is used to prevent multiple thread 7791 * trying to access the RSCN array on a same @vport at the same time. 7792 **/ 7793 void 7794 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7795 { 7796 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7797 struct lpfc_hba *phba = vport->phba; 7798 int i; 7799 7800 spin_lock_irq(shost->host_lock); 7801 if (vport->fc_rscn_flush) { 7802 /* Another thread is walking fc_rscn_id_list on this vport */ 7803 spin_unlock_irq(shost->host_lock); 7804 return; 7805 } 7806 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7807 vport->fc_rscn_flush = 1; 7808 spin_unlock_irq(shost->host_lock); 7809 7810 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7811 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7812 vport->fc_rscn_id_list[i] = NULL; 7813 } 7814 spin_lock_irq(shost->host_lock); 7815 vport->fc_rscn_id_cnt = 0; 7816 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7817 spin_unlock_irq(shost->host_lock); 7818 lpfc_can_disctmo(vport); 7819 /* Indicate we are done walking this fc_rscn_id_list */ 7820 vport->fc_rscn_flush = 0; 7821 } 7822 7823 /** 7824 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7825 * @vport: pointer to a host virtual N_Port data structure. 7826 * @did: remote destination port identifier. 7827 * 7828 * This routine checks whether there is any pending Registration State 7829 * Configuration Notification (RSCN) to a @did on @vport. 7830 * 7831 * Return code 7832 * None zero - The @did matched with a pending rscn 7833 * 0 - not able to match @did with a pending rscn 7834 **/ 7835 int 7836 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7837 { 7838 D_ID ns_did; 7839 D_ID rscn_did; 7840 uint32_t *lp; 7841 uint32_t payload_len, i; 7842 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7843 7844 ns_did.un.word = did; 7845 7846 /* Never match fabric nodes for RSCNs */ 7847 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7848 return 0; 7849 7850 /* If we are doing a FULL RSCN rediscovery, match everything */ 7851 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7852 return did; 7853 7854 spin_lock_irq(shost->host_lock); 7855 if (vport->fc_rscn_flush) { 7856 /* Another thread is walking fc_rscn_id_list on this vport */ 7857 spin_unlock_irq(shost->host_lock); 7858 return 0; 7859 } 7860 /* Indicate we are walking fc_rscn_id_list on this vport */ 7861 vport->fc_rscn_flush = 1; 7862 spin_unlock_irq(shost->host_lock); 7863 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7864 lp = vport->fc_rscn_id_list[i]->virt; 7865 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7866 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7867 while (payload_len) { 7868 rscn_did.un.word = be32_to_cpu(*lp++); 7869 payload_len -= sizeof(uint32_t); 7870 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7871 case RSCN_ADDRESS_FORMAT_PORT: 7872 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7873 && (ns_did.un.b.area == rscn_did.un.b.area) 7874 && (ns_did.un.b.id == rscn_did.un.b.id)) 7875 goto return_did_out; 7876 break; 7877 case RSCN_ADDRESS_FORMAT_AREA: 7878 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7879 && (ns_did.un.b.area == rscn_did.un.b.area)) 7880 goto return_did_out; 7881 break; 7882 case RSCN_ADDRESS_FORMAT_DOMAIN: 7883 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7884 goto return_did_out; 7885 break; 7886 case RSCN_ADDRESS_FORMAT_FABRIC: 7887 goto return_did_out; 7888 } 7889 } 7890 } 7891 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7892 vport->fc_rscn_flush = 0; 7893 return 0; 7894 return_did_out: 7895 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7896 vport->fc_rscn_flush = 0; 7897 return did; 7898 } 7899 7900 /** 7901 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7902 * @vport: pointer to a host virtual N_Port data structure. 7903 * 7904 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7905 * state machine for a @vport's nodes that are with pending RSCN (Registration 7906 * State Change Notification). 7907 * 7908 * Return code 7909 * 0 - Successful (currently alway return 0) 7910 **/ 7911 static int 7912 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7913 { 7914 struct lpfc_nodelist *ndlp = NULL, *n; 7915 7916 /* Move all affected nodes by pending RSCNs to NPR state. */ 7917 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7918 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7919 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7920 continue; 7921 7922 /* NVME Target mode does not do RSCN Recovery. */ 7923 if (vport->phba->nvmet_support) 7924 continue; 7925 7926 /* If we are in the process of doing discovery on this 7927 * NPort, let it continue on its own. 7928 */ 7929 switch (ndlp->nlp_state) { 7930 case NLP_STE_PLOGI_ISSUE: 7931 case NLP_STE_ADISC_ISSUE: 7932 case NLP_STE_REG_LOGIN_ISSUE: 7933 case NLP_STE_PRLI_ISSUE: 7934 case NLP_STE_LOGO_ISSUE: 7935 continue; 7936 } 7937 7938 lpfc_disc_state_machine(vport, ndlp, NULL, 7939 NLP_EVT_DEVICE_RECOVERY); 7940 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7941 } 7942 return 0; 7943 } 7944 7945 /** 7946 * lpfc_send_rscn_event - Send an RSCN event to management application 7947 * @vport: pointer to a host virtual N_Port data structure. 7948 * @cmdiocb: pointer to lpfc command iocb data structure. 7949 * 7950 * lpfc_send_rscn_event sends an RSCN netlink event to management 7951 * applications. 7952 */ 7953 static void 7954 lpfc_send_rscn_event(struct lpfc_vport *vport, 7955 struct lpfc_iocbq *cmdiocb) 7956 { 7957 struct lpfc_dmabuf *pcmd; 7958 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7959 uint32_t *payload_ptr; 7960 uint32_t payload_len; 7961 struct lpfc_rscn_event_header *rscn_event_data; 7962 7963 pcmd = cmdiocb->cmd_dmabuf; 7964 payload_ptr = (uint32_t *) pcmd->virt; 7965 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7966 7967 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7968 payload_len, GFP_KERNEL); 7969 if (!rscn_event_data) { 7970 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7971 "0147 Failed to allocate memory for RSCN event\n"); 7972 return; 7973 } 7974 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7975 rscn_event_data->payload_length = payload_len; 7976 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7977 payload_len); 7978 7979 fc_host_post_vendor_event(shost, 7980 fc_get_event_number(), 7981 sizeof(struct lpfc_rscn_event_header) + payload_len, 7982 (char *)rscn_event_data, 7983 LPFC_NL_VENDOR_ID); 7984 7985 kfree(rscn_event_data); 7986 } 7987 7988 /** 7989 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7990 * @vport: pointer to a host virtual N_Port data structure. 7991 * @cmdiocb: pointer to lpfc command iocb data structure. 7992 * @ndlp: pointer to a node-list data structure. 7993 * 7994 * This routine processes an unsolicited RSCN (Registration State Change 7995 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7996 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7997 * discover state machine is about to begin discovery, it just accepts the 7998 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7999 * contains N_Port IDs for other vports on this HBA, it just accepts the 8000 * RSCN and ignore processing it. If the state machine is in the recovery 8001 * state, the fc_rscn_id_list of this @vport is walked and the 8002 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 8003 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 8004 * routine is invoked to handle the RSCN event. 8005 * 8006 * Return code 8007 * 0 - Just sent the acc response 8008 * 1 - Sent the acc response and waited for name server completion 8009 **/ 8010 static int 8011 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8012 struct lpfc_nodelist *ndlp) 8013 { 8014 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8015 struct lpfc_hba *phba = vport->phba; 8016 struct lpfc_dmabuf *pcmd; 8017 uint32_t *lp, *datap; 8018 uint32_t payload_len, length, nportid, *cmd; 8019 int rscn_cnt; 8020 int rscn_id = 0, hba_id = 0; 8021 int i, tmo; 8022 8023 pcmd = cmdiocb->cmd_dmabuf; 8024 lp = (uint32_t *) pcmd->virt; 8025 8026 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 8027 payload_len -= sizeof(uint32_t); /* take off word 0 */ 8028 /* RSCN received */ 8029 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8030 "0214 RSCN received Data: x%x x%x x%x x%x\n", 8031 vport->fc_flag, payload_len, *lp, 8032 vport->fc_rscn_id_cnt); 8033 8034 /* Send an RSCN event to the management application */ 8035 lpfc_send_rscn_event(vport, cmdiocb); 8036 8037 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8038 fc_host_post_event(shost, fc_get_event_number(), 8039 FCH_EVT_RSCN, lp[i]); 8040 8041 /* Check if RSCN is coming from a direct-connected remote NPort */ 8042 if (vport->fc_flag & FC_PT2PT) { 8043 /* If so, just ACC it, no other action needed for now */ 8044 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8045 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 8046 *lp, vport->fc_flag, payload_len); 8047 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8048 8049 /* Check to see if we need to NVME rescan this target 8050 * remoteport. 8051 */ 8052 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8053 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8054 lpfc_nvme_rescan_port(vport, ndlp); 8055 return 0; 8056 } 8057 8058 /* If we are about to begin discovery, just ACC the RSCN. 8059 * Discovery processing will satisfy it. 8060 */ 8061 if (vport->port_state <= LPFC_NS_QRY) { 8062 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8063 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8064 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8065 8066 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8067 return 0; 8068 } 8069 8070 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8071 * just ACC and ignore it. 8072 */ 8073 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8074 !(vport->cfg_peer_port_login)) { 8075 i = payload_len; 8076 datap = lp; 8077 while (i > 0) { 8078 nportid = *datap++; 8079 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8080 i -= sizeof(uint32_t); 8081 rscn_id++; 8082 if (lpfc_find_vport_by_did(phba, nportid)) 8083 hba_id++; 8084 } 8085 if (rscn_id == hba_id) { 8086 /* ALL NPortIDs in RSCN are on HBA */ 8087 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8088 "0219 Ignore RSCN " 8089 "Data: x%x x%x x%x x%x\n", 8090 vport->fc_flag, payload_len, 8091 *lp, vport->fc_rscn_id_cnt); 8092 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8093 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8094 ndlp->nlp_DID, vport->port_state, 8095 ndlp->nlp_flag); 8096 8097 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8098 ndlp, NULL); 8099 /* Restart disctmo if its already running */ 8100 if (vport->fc_flag & FC_DISC_TMO) { 8101 tmo = ((phba->fc_ratov * 3) + 3); 8102 mod_timer(&vport->fc_disctmo, 8103 jiffies + 8104 msecs_to_jiffies(1000 * tmo)); 8105 } 8106 return 0; 8107 } 8108 } 8109 8110 spin_lock_irq(shost->host_lock); 8111 if (vport->fc_rscn_flush) { 8112 /* Another thread is walking fc_rscn_id_list on this vport */ 8113 vport->fc_flag |= FC_RSCN_DISCOVERY; 8114 spin_unlock_irq(shost->host_lock); 8115 /* Send back ACC */ 8116 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8117 return 0; 8118 } 8119 /* Indicate we are walking fc_rscn_id_list on this vport */ 8120 vport->fc_rscn_flush = 1; 8121 spin_unlock_irq(shost->host_lock); 8122 /* Get the array count after successfully have the token */ 8123 rscn_cnt = vport->fc_rscn_id_cnt; 8124 /* If we are already processing an RSCN, save the received 8125 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8126 */ 8127 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 8128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8129 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8130 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8131 8132 spin_lock_irq(shost->host_lock); 8133 vport->fc_flag |= FC_RSCN_DEFERRED; 8134 8135 /* Restart disctmo if its already running */ 8136 if (vport->fc_flag & FC_DISC_TMO) { 8137 tmo = ((phba->fc_ratov * 3) + 3); 8138 mod_timer(&vport->fc_disctmo, 8139 jiffies + msecs_to_jiffies(1000 * tmo)); 8140 } 8141 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8142 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 8143 vport->fc_flag |= FC_RSCN_MODE; 8144 spin_unlock_irq(shost->host_lock); 8145 if (rscn_cnt) { 8146 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8147 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8148 } 8149 if ((rscn_cnt) && 8150 (payload_len + length <= LPFC_BPL_SIZE)) { 8151 *cmd &= ELS_CMD_MASK; 8152 *cmd |= cpu_to_be32(payload_len + length); 8153 memcpy(((uint8_t *)cmd) + length, lp, 8154 payload_len); 8155 } else { 8156 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8157 vport->fc_rscn_id_cnt++; 8158 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8159 * routine will not try to free it. 8160 */ 8161 cmdiocb->cmd_dmabuf = NULL; 8162 } 8163 /* Deferred RSCN */ 8164 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8165 "0235 Deferred RSCN " 8166 "Data: x%x x%x x%x\n", 8167 vport->fc_rscn_id_cnt, vport->fc_flag, 8168 vport->port_state); 8169 } else { 8170 vport->fc_flag |= FC_RSCN_DISCOVERY; 8171 spin_unlock_irq(shost->host_lock); 8172 /* ReDiscovery RSCN */ 8173 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8174 "0234 ReDiscovery RSCN " 8175 "Data: x%x x%x x%x\n", 8176 vport->fc_rscn_id_cnt, vport->fc_flag, 8177 vport->port_state); 8178 } 8179 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8180 vport->fc_rscn_flush = 0; 8181 /* Send back ACC */ 8182 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8183 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8184 lpfc_rscn_recovery_check(vport); 8185 return 0; 8186 } 8187 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8188 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8189 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8190 8191 spin_lock_irq(shost->host_lock); 8192 vport->fc_flag |= FC_RSCN_MODE; 8193 spin_unlock_irq(shost->host_lock); 8194 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8195 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8196 vport->fc_rscn_flush = 0; 8197 /* 8198 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8199 * not try to free it. 8200 */ 8201 cmdiocb->cmd_dmabuf = NULL; 8202 lpfc_set_disctmo(vport); 8203 /* Send back ACC */ 8204 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8205 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8206 lpfc_rscn_recovery_check(vport); 8207 return lpfc_els_handle_rscn(vport); 8208 } 8209 8210 /** 8211 * lpfc_els_handle_rscn - Handle rscn for a vport 8212 * @vport: pointer to a host virtual N_Port data structure. 8213 * 8214 * This routine handles the Registration State Configuration Notification 8215 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8216 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8217 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8218 * NameServer shall be issued. If CT command to the NameServer fails to be 8219 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8220 * RSCN activities with the @vport. 8221 * 8222 * Return code 8223 * 0 - Cleaned up rscn on the @vport 8224 * 1 - Wait for plogi to name server before proceed 8225 **/ 8226 int 8227 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8228 { 8229 struct lpfc_nodelist *ndlp; 8230 struct lpfc_hba *phba = vport->phba; 8231 8232 /* Ignore RSCN if the port is being torn down. */ 8233 if (vport->load_flag & FC_UNLOADING) { 8234 lpfc_els_flush_rscn(vport); 8235 return 0; 8236 } 8237 8238 /* Start timer for RSCN processing */ 8239 lpfc_set_disctmo(vport); 8240 8241 /* RSCN processed */ 8242 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8243 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8244 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8245 vport->port_state, vport->num_disc_nodes, 8246 vport->gidft_inp); 8247 8248 /* To process RSCN, first compare RSCN data with NameServer */ 8249 vport->fc_ns_retry = 0; 8250 vport->num_disc_nodes = 0; 8251 8252 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8253 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8254 /* Good ndlp, issue CT Request to NameServer. Need to 8255 * know how many gidfts were issued. If none, then just 8256 * flush the RSCN. Otherwise, the outstanding requests 8257 * need to complete. 8258 */ 8259 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8260 if (lpfc_issue_gidft(vport) > 0) 8261 return 1; 8262 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8263 if (lpfc_issue_gidpt(vport) > 0) 8264 return 1; 8265 } else { 8266 return 1; 8267 } 8268 } else { 8269 /* Nameserver login in question. Revalidate. */ 8270 if (ndlp) { 8271 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8272 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8273 } else { 8274 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8275 if (!ndlp) { 8276 lpfc_els_flush_rscn(vport); 8277 return 0; 8278 } 8279 ndlp->nlp_prev_state = ndlp->nlp_state; 8280 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8281 } 8282 ndlp->nlp_type |= NLP_FABRIC; 8283 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8284 /* Wait for NameServer login cmpl before we can 8285 * continue 8286 */ 8287 return 1; 8288 } 8289 8290 lpfc_els_flush_rscn(vport); 8291 return 0; 8292 } 8293 8294 /** 8295 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8296 * @vport: pointer to a host virtual N_Port data structure. 8297 * @cmdiocb: pointer to lpfc command iocb data structure. 8298 * @ndlp: pointer to a node-list data structure. 8299 * 8300 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8301 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8302 * point topology. As an unsolicited FLOGI should not be received in a loop 8303 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8304 * lpfc_check_sparm() routine is invoked to check the parameters in the 8305 * unsolicited FLOGI. If parameters validation failed, the routine 8306 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8307 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8308 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8309 * will initiate PLOGI. The higher lexicographical value party shall has 8310 * higher priority (as the winning port) and will initiate PLOGI and 8311 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8312 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8313 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8314 * 8315 * Return code 8316 * 0 - Successfully processed the unsolicited flogi 8317 * 1 - Failed to process the unsolicited flogi 8318 **/ 8319 static int 8320 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8321 struct lpfc_nodelist *ndlp) 8322 { 8323 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8324 struct lpfc_hba *phba = vport->phba; 8325 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8326 uint32_t *lp = (uint32_t *) pcmd->virt; 8327 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8328 struct serv_parm *sp; 8329 LPFC_MBOXQ_t *mbox; 8330 uint32_t cmd, did; 8331 int rc; 8332 uint32_t fc_flag = 0; 8333 uint32_t port_state = 0; 8334 8335 /* Clear external loopback plug detected flag */ 8336 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8337 8338 cmd = *lp++; 8339 sp = (struct serv_parm *) lp; 8340 8341 /* FLOGI received */ 8342 8343 lpfc_set_disctmo(vport); 8344 8345 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8346 /* We should never receive a FLOGI in loop mode, ignore it */ 8347 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8348 8349 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8350 Loop Mode */ 8351 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8352 "0113 An FLOGI ELS command x%x was " 8353 "received from DID x%x in Loop Mode\n", 8354 cmd, did); 8355 return 1; 8356 } 8357 8358 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8359 8360 /* 8361 * If our portname is greater than the remote portname, 8362 * then we initiate Nport login. 8363 */ 8364 8365 rc = memcmp(&vport->fc_portname, &sp->portName, 8366 sizeof(struct lpfc_name)); 8367 8368 if (!rc) { 8369 if (phba->sli_rev < LPFC_SLI_REV4) { 8370 mbox = mempool_alloc(phba->mbox_mem_pool, 8371 GFP_KERNEL); 8372 if (!mbox) 8373 return 1; 8374 lpfc_linkdown(phba); 8375 lpfc_init_link(phba, mbox, 8376 phba->cfg_topology, 8377 phba->cfg_link_speed); 8378 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8379 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8380 mbox->vport = vport; 8381 rc = lpfc_sli_issue_mbox(phba, mbox, 8382 MBX_NOWAIT); 8383 lpfc_set_loopback_flag(phba); 8384 if (rc == MBX_NOT_FINISHED) 8385 mempool_free(mbox, phba->mbox_mem_pool); 8386 return 1; 8387 } 8388 8389 /* External loopback plug insertion detected */ 8390 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8391 8392 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8393 "1119 External Loopback plug detected\n"); 8394 8395 /* abort the flogi coming back to ourselves 8396 * due to external loopback on the port. 8397 */ 8398 lpfc_els_abort_flogi(phba); 8399 return 0; 8400 8401 } else if (rc > 0) { /* greater than */ 8402 spin_lock_irq(shost->host_lock); 8403 vport->fc_flag |= FC_PT2PT_PLOGI; 8404 spin_unlock_irq(shost->host_lock); 8405 8406 /* If we have the high WWPN we can assign our own 8407 * myDID; otherwise, we have to WAIT for a PLOGI 8408 * from the remote NPort to find out what it 8409 * will be. 8410 */ 8411 vport->fc_myDID = PT2PT_LocalID; 8412 } else { 8413 vport->fc_myDID = PT2PT_RemoteID; 8414 } 8415 8416 /* 8417 * The vport state should go to LPFC_FLOGI only 8418 * AFTER we issue a FLOGI, not receive one. 8419 */ 8420 spin_lock_irq(shost->host_lock); 8421 fc_flag = vport->fc_flag; 8422 port_state = vport->port_state; 8423 vport->fc_flag |= FC_PT2PT; 8424 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8425 8426 /* Acking an unsol FLOGI. Count 1 for link bounce 8427 * work-around. 8428 */ 8429 vport->rcv_flogi_cnt++; 8430 spin_unlock_irq(shost->host_lock); 8431 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8432 "3311 Rcv Flogi PS x%x new PS x%x " 8433 "fc_flag x%x new fc_flag x%x\n", 8434 port_state, vport->port_state, 8435 fc_flag, vport->fc_flag); 8436 8437 /* 8438 * We temporarily set fc_myDID to make it look like we are 8439 * a Fabric. This is done just so we end up with the right 8440 * did / sid on the FLOGI ACC rsp. 8441 */ 8442 did = vport->fc_myDID; 8443 vport->fc_myDID = Fabric_DID; 8444 8445 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8446 8447 /* Defer ACC response until AFTER we issue a FLOGI */ 8448 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8449 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8450 &wqe->xmit_els_rsp.wqe_com); 8451 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8452 &wqe->xmit_els_rsp.wqe_com); 8453 8454 vport->fc_myDID = did; 8455 8456 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8457 "3344 Deferring FLOGI ACC: rx_id: x%x," 8458 " ox_id: x%x, hba_flag x%x\n", 8459 phba->defer_flogi_acc_rx_id, 8460 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8461 8462 phba->defer_flogi_acc_flag = true; 8463 8464 return 0; 8465 } 8466 8467 /* Send back ACC */ 8468 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8469 8470 /* Now lets put fc_myDID back to what its supposed to be */ 8471 vport->fc_myDID = did; 8472 8473 return 0; 8474 } 8475 8476 /** 8477 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8478 * @vport: pointer to a host virtual N_Port data structure. 8479 * @cmdiocb: pointer to lpfc command iocb data structure. 8480 * @ndlp: pointer to a node-list data structure. 8481 * 8482 * This routine processes Request Node Identification Data (RNID) IOCB 8483 * received as an ELS unsolicited event. Only when the RNID specified format 8484 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8485 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8486 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8487 * rejected by invoking the lpfc_els_rsp_reject() routine. 8488 * 8489 * Return code 8490 * 0 - Successfully processed rnid iocb (currently always return 0) 8491 **/ 8492 static int 8493 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8494 struct lpfc_nodelist *ndlp) 8495 { 8496 struct lpfc_dmabuf *pcmd; 8497 uint32_t *lp; 8498 RNID *rn; 8499 struct ls_rjt stat; 8500 8501 pcmd = cmdiocb->cmd_dmabuf; 8502 lp = (uint32_t *) pcmd->virt; 8503 8504 lp++; 8505 rn = (RNID *) lp; 8506 8507 /* RNID received */ 8508 8509 switch (rn->Format) { 8510 case 0: 8511 case RNID_TOPOLOGY_DISC: 8512 /* Send back ACC */ 8513 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8514 break; 8515 default: 8516 /* Reject this request because format not supported */ 8517 stat.un.b.lsRjtRsvd0 = 0; 8518 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8519 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8520 stat.un.b.vendorUnique = 0; 8521 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8522 NULL); 8523 } 8524 return 0; 8525 } 8526 8527 /** 8528 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8529 * @vport: pointer to a host virtual N_Port data structure. 8530 * @cmdiocb: pointer to lpfc command iocb data structure. 8531 * @ndlp: pointer to a node-list data structure. 8532 * 8533 * Return code 8534 * 0 - Successfully processed echo iocb (currently always return 0) 8535 **/ 8536 static int 8537 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8538 struct lpfc_nodelist *ndlp) 8539 { 8540 uint8_t *pcmd; 8541 8542 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8543 8544 /* skip over first word of echo command to find echo data */ 8545 pcmd += sizeof(uint32_t); 8546 8547 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8548 return 0; 8549 } 8550 8551 /** 8552 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8553 * @vport: pointer to a host virtual N_Port data structure. 8554 * @cmdiocb: pointer to lpfc command iocb data structure. 8555 * @ndlp: pointer to a node-list data structure. 8556 * 8557 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8558 * received as an ELS unsolicited event. Currently, this function just invokes 8559 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8560 * 8561 * Return code 8562 * 0 - Successfully processed lirr iocb (currently always return 0) 8563 **/ 8564 static int 8565 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8566 struct lpfc_nodelist *ndlp) 8567 { 8568 struct ls_rjt stat; 8569 8570 /* For now, unconditionally reject this command */ 8571 stat.un.b.lsRjtRsvd0 = 0; 8572 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8573 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8574 stat.un.b.vendorUnique = 0; 8575 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8576 return 0; 8577 } 8578 8579 /** 8580 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8581 * @vport: pointer to a host virtual N_Port data structure. 8582 * @cmdiocb: pointer to lpfc command iocb data structure. 8583 * @ndlp: pointer to a node-list data structure. 8584 * 8585 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8586 * received as an ELS unsolicited event. A request to RRQ shall only 8587 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8588 * Nx_Port N_Port_ID of the target Exchange is the same as the 8589 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8590 * not accepted, an LS_RJT with reason code "Unable to perform 8591 * command request" and reason code explanation "Invalid Originator 8592 * S_ID" shall be returned. For now, we just unconditionally accept 8593 * RRQ from the target. 8594 **/ 8595 static void 8596 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8597 struct lpfc_nodelist *ndlp) 8598 { 8599 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8600 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8601 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8602 } 8603 8604 /** 8605 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8606 * @phba: pointer to lpfc hba data structure. 8607 * @pmb: pointer to the driver internal queue element for mailbox command. 8608 * 8609 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8610 * mailbox command. This callback function is to actually send the Accept 8611 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8612 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8613 * mailbox command, constructs the RLS response with the link statistics 8614 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8615 * response to the RLS. 8616 * 8617 * Note that the ndlp reference count will be incremented by 1 for holding the 8618 * ndlp and the reference to ndlp will be stored into the ndlp field of 8619 * the IOCB for the completion callback function to the RLS Accept Response 8620 * ELS IOCB command. 8621 * 8622 **/ 8623 static void 8624 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8625 { 8626 int rc = 0; 8627 MAILBOX_t *mb; 8628 IOCB_t *icmd; 8629 union lpfc_wqe128 *wqe; 8630 struct RLS_RSP *rls_rsp; 8631 uint8_t *pcmd; 8632 struct lpfc_iocbq *elsiocb; 8633 struct lpfc_nodelist *ndlp; 8634 uint16_t oxid; 8635 uint16_t rxid; 8636 uint32_t cmdsize; 8637 u32 ulp_context; 8638 8639 mb = &pmb->u.mb; 8640 8641 ndlp = pmb->ctx_ndlp; 8642 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8643 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8644 pmb->ctx_buf = NULL; 8645 pmb->ctx_ndlp = NULL; 8646 8647 if (mb->mbxStatus) { 8648 mempool_free(pmb, phba->mbox_mem_pool); 8649 return; 8650 } 8651 8652 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8653 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8654 lpfc_max_els_tries, ndlp, 8655 ndlp->nlp_DID, ELS_CMD_ACC); 8656 8657 /* Decrement the ndlp reference count from previous mbox command */ 8658 lpfc_nlp_put(ndlp); 8659 8660 if (!elsiocb) { 8661 mempool_free(pmb, phba->mbox_mem_pool); 8662 return; 8663 } 8664 8665 ulp_context = get_job_ulpcontext(phba, elsiocb); 8666 if (phba->sli_rev == LPFC_SLI_REV4) { 8667 wqe = &elsiocb->wqe; 8668 /* Xri / rx_id */ 8669 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8670 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8671 } else { 8672 icmd = &elsiocb->iocb; 8673 icmd->ulpContext = rxid; 8674 icmd->unsli3.rcvsli3.ox_id = oxid; 8675 } 8676 8677 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8678 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8679 pcmd += sizeof(uint32_t); /* Skip past command */ 8680 rls_rsp = (struct RLS_RSP *)pcmd; 8681 8682 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8683 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8684 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8685 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8686 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8687 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8688 mempool_free(pmb, phba->mbox_mem_pool); 8689 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8690 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8691 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8692 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8693 elsiocb->iotag, ulp_context, 8694 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8695 ndlp->nlp_rpi); 8696 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8697 phba->fc_stat.elsXmitACC++; 8698 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8699 if (!elsiocb->ndlp) { 8700 lpfc_els_free_iocb(phba, elsiocb); 8701 return; 8702 } 8703 8704 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8705 if (rc == IOCB_ERROR) { 8706 lpfc_els_free_iocb(phba, elsiocb); 8707 lpfc_nlp_put(ndlp); 8708 } 8709 return; 8710 } 8711 8712 /** 8713 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8714 * @vport: pointer to a host virtual N_Port data structure. 8715 * @cmdiocb: pointer to lpfc command iocb data structure. 8716 * @ndlp: pointer to a node-list data structure. 8717 * 8718 * This routine processes Read Link Status (RLS) IOCB received as an 8719 * ELS unsolicited event. It first checks the remote port state. If the 8720 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8721 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8722 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8723 * for reading the HBA link statistics. It is for the callback function, 8724 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8725 * to actually sending out RPL Accept (ACC) response. 8726 * 8727 * Return codes 8728 * 0 - Successfully processed rls iocb (currently always return 0) 8729 **/ 8730 static int 8731 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8732 struct lpfc_nodelist *ndlp) 8733 { 8734 struct lpfc_hba *phba = vport->phba; 8735 LPFC_MBOXQ_t *mbox; 8736 struct ls_rjt stat; 8737 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8738 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8739 8740 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8741 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8742 /* reject the unsolicited RLS request and done with it */ 8743 goto reject_out; 8744 8745 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8746 if (mbox) { 8747 lpfc_read_lnk_stat(phba, mbox); 8748 mbox->ctx_buf = (void *)((unsigned long) 8749 (ox_id << 16 | ctx)); 8750 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8751 if (!mbox->ctx_ndlp) 8752 goto node_err; 8753 mbox->vport = vport; 8754 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8755 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8756 != MBX_NOT_FINISHED) 8757 /* Mbox completion will send ELS Response */ 8758 return 0; 8759 /* Decrement reference count used for the failed mbox 8760 * command. 8761 */ 8762 lpfc_nlp_put(ndlp); 8763 node_err: 8764 mempool_free(mbox, phba->mbox_mem_pool); 8765 } 8766 reject_out: 8767 /* issue rejection response */ 8768 stat.un.b.lsRjtRsvd0 = 0; 8769 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8770 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8771 stat.un.b.vendorUnique = 0; 8772 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8773 return 0; 8774 } 8775 8776 /** 8777 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8778 * @vport: pointer to a host virtual N_Port data structure. 8779 * @cmdiocb: pointer to lpfc command iocb data structure. 8780 * @ndlp: pointer to a node-list data structure. 8781 * 8782 * This routine processes Read Timout Value (RTV) IOCB received as an 8783 * ELS unsolicited event. It first checks the remote port state. If the 8784 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8785 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8786 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8787 * Value (RTV) unsolicited IOCB event. 8788 * 8789 * Note that the ndlp reference count will be incremented by 1 for holding the 8790 * ndlp and the reference to ndlp will be stored into the ndlp field of 8791 * the IOCB for the completion callback function to the RTV Accept Response 8792 * ELS IOCB command. 8793 * 8794 * Return codes 8795 * 0 - Successfully processed rtv iocb (currently always return 0) 8796 **/ 8797 static int 8798 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8799 struct lpfc_nodelist *ndlp) 8800 { 8801 int rc = 0; 8802 IOCB_t *icmd; 8803 union lpfc_wqe128 *wqe; 8804 struct lpfc_hba *phba = vport->phba; 8805 struct ls_rjt stat; 8806 struct RTV_RSP *rtv_rsp; 8807 uint8_t *pcmd; 8808 struct lpfc_iocbq *elsiocb; 8809 uint32_t cmdsize; 8810 u32 ulp_context; 8811 8812 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8813 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8814 /* reject the unsolicited RTV request and done with it */ 8815 goto reject_out; 8816 8817 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8818 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8819 lpfc_max_els_tries, ndlp, 8820 ndlp->nlp_DID, ELS_CMD_ACC); 8821 8822 if (!elsiocb) 8823 return 1; 8824 8825 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8826 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8827 pcmd += sizeof(uint32_t); /* Skip past command */ 8828 8829 ulp_context = get_job_ulpcontext(phba, elsiocb); 8830 /* use the command's xri in the response */ 8831 if (phba->sli_rev == LPFC_SLI_REV4) { 8832 wqe = &elsiocb->wqe; 8833 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8834 get_job_ulpcontext(phba, cmdiocb)); 8835 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8836 get_job_rcvoxid(phba, cmdiocb)); 8837 } else { 8838 icmd = &elsiocb->iocb; 8839 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8840 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8841 } 8842 8843 rtv_rsp = (struct RTV_RSP *)pcmd; 8844 8845 /* populate RTV payload */ 8846 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8847 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8848 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8849 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8850 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8851 8852 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8853 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8854 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8855 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8856 "Data: x%x x%x x%x\n", 8857 elsiocb->iotag, ulp_context, 8858 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8859 ndlp->nlp_rpi, 8860 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8861 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8862 phba->fc_stat.elsXmitACC++; 8863 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8864 if (!elsiocb->ndlp) { 8865 lpfc_els_free_iocb(phba, elsiocb); 8866 return 0; 8867 } 8868 8869 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8870 if (rc == IOCB_ERROR) { 8871 lpfc_els_free_iocb(phba, elsiocb); 8872 lpfc_nlp_put(ndlp); 8873 } 8874 return 0; 8875 8876 reject_out: 8877 /* issue rejection response */ 8878 stat.un.b.lsRjtRsvd0 = 0; 8879 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8880 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8881 stat.un.b.vendorUnique = 0; 8882 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8883 return 0; 8884 } 8885 8886 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8887 * @vport: pointer to a host virtual N_Port data structure. 8888 * @ndlp: pointer to a node-list data structure. 8889 * @did: DID of the target. 8890 * @rrq: Pointer to the rrq struct. 8891 * 8892 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8893 * successful, the completion handler will clear the RRQ. 8894 * 8895 * Return codes 8896 * 0 - Successfully sent rrq els iocb. 8897 * 1 - Failed to send rrq els iocb. 8898 **/ 8899 static int 8900 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8901 uint32_t did, struct lpfc_node_rrq *rrq) 8902 { 8903 struct lpfc_hba *phba = vport->phba; 8904 struct RRQ *els_rrq; 8905 struct lpfc_iocbq *elsiocb; 8906 uint8_t *pcmd; 8907 uint16_t cmdsize; 8908 int ret; 8909 8910 if (!ndlp) 8911 return 1; 8912 8913 /* If ndlp is not NULL, we will bump the reference count on it */ 8914 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8915 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8916 ELS_CMD_RRQ); 8917 if (!elsiocb) 8918 return 1; 8919 8920 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8921 8922 /* For RRQ request, remainder of payload is Exchange IDs */ 8923 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8924 pcmd += sizeof(uint32_t); 8925 els_rrq = (struct RRQ *) pcmd; 8926 8927 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8928 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8929 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8930 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8931 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8932 8933 8934 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8935 "Issue RRQ: did:x%x", 8936 did, rrq->xritag, rrq->rxid); 8937 elsiocb->context_un.rrq = rrq; 8938 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8939 8940 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8941 if (!elsiocb->ndlp) 8942 goto io_err; 8943 8944 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8945 if (ret == IOCB_ERROR) { 8946 lpfc_nlp_put(ndlp); 8947 goto io_err; 8948 } 8949 return 0; 8950 8951 io_err: 8952 lpfc_els_free_iocb(phba, elsiocb); 8953 return 1; 8954 } 8955 8956 /** 8957 * lpfc_send_rrq - Sends ELS RRQ if needed. 8958 * @phba: pointer to lpfc hba data structure. 8959 * @rrq: pointer to the active rrq. 8960 * 8961 * This routine will call the lpfc_issue_els_rrq if the rrq is 8962 * still active for the xri. If this function returns a failure then 8963 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8964 * 8965 * Returns 0 Success. 8966 * 1 Failure. 8967 **/ 8968 int 8969 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8970 { 8971 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8972 rrq->nlp_DID); 8973 if (!ndlp) 8974 return 1; 8975 8976 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8977 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8978 rrq->nlp_DID, rrq); 8979 else 8980 return 1; 8981 } 8982 8983 /** 8984 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8985 * @vport: pointer to a host virtual N_Port data structure. 8986 * @cmdsize: size of the ELS command. 8987 * @oldiocb: pointer to the original lpfc command iocb data structure. 8988 * @ndlp: pointer to a node-list data structure. 8989 * 8990 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8991 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8992 * 8993 * Note that the ndlp reference count will be incremented by 1 for holding the 8994 * ndlp and the reference to ndlp will be stored into the ndlp field of 8995 * the IOCB for the completion callback function to the RPL Accept Response 8996 * ELS command. 8997 * 8998 * Return code 8999 * 0 - Successfully issued ACC RPL ELS command 9000 * 1 - Failed to issue ACC RPL ELS command 9001 **/ 9002 static int 9003 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 9004 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 9005 { 9006 int rc = 0; 9007 struct lpfc_hba *phba = vport->phba; 9008 IOCB_t *icmd; 9009 union lpfc_wqe128 *wqe; 9010 RPL_RSP rpl_rsp; 9011 struct lpfc_iocbq *elsiocb; 9012 uint8_t *pcmd; 9013 u32 ulp_context; 9014 9015 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 9016 ndlp->nlp_DID, ELS_CMD_ACC); 9017 9018 if (!elsiocb) 9019 return 1; 9020 9021 ulp_context = get_job_ulpcontext(phba, elsiocb); 9022 if (phba->sli_rev == LPFC_SLI_REV4) { 9023 wqe = &elsiocb->wqe; 9024 /* Xri / rx_id */ 9025 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 9026 get_job_ulpcontext(phba, oldiocb)); 9027 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9028 get_job_rcvoxid(phba, oldiocb)); 9029 } else { 9030 icmd = &elsiocb->iocb; 9031 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9032 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9033 } 9034 9035 pcmd = elsiocb->cmd_dmabuf->virt; 9036 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9037 pcmd += sizeof(uint16_t); 9038 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9039 pcmd += sizeof(uint16_t); 9040 9041 /* Setup the RPL ACC payload */ 9042 rpl_rsp.listLen = be32_to_cpu(1); 9043 rpl_rsp.index = 0; 9044 rpl_rsp.port_num_blk.portNum = 0; 9045 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9046 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9047 sizeof(struct lpfc_name)); 9048 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9049 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9050 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9051 "0120 Xmit ELS RPL ACC response tag x%x " 9052 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9053 "rpi x%x\n", 9054 elsiocb->iotag, ulp_context, 9055 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9056 ndlp->nlp_rpi); 9057 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9058 phba->fc_stat.elsXmitACC++; 9059 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9060 if (!elsiocb->ndlp) { 9061 lpfc_els_free_iocb(phba, elsiocb); 9062 return 1; 9063 } 9064 9065 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9066 if (rc == IOCB_ERROR) { 9067 lpfc_els_free_iocb(phba, elsiocb); 9068 lpfc_nlp_put(ndlp); 9069 return 1; 9070 } 9071 9072 return 0; 9073 } 9074 9075 /** 9076 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9077 * @vport: pointer to a host virtual N_Port data structure. 9078 * @cmdiocb: pointer to lpfc command iocb data structure. 9079 * @ndlp: pointer to a node-list data structure. 9080 * 9081 * This routine processes Read Port List (RPL) IOCB received as an ELS 9082 * unsolicited event. It first checks the remote port state. If the remote 9083 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9084 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9085 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9086 * to accept the RPL. 9087 * 9088 * Return code 9089 * 0 - Successfully processed rpl iocb (currently always return 0) 9090 **/ 9091 static int 9092 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9093 struct lpfc_nodelist *ndlp) 9094 { 9095 struct lpfc_dmabuf *pcmd; 9096 uint32_t *lp; 9097 uint32_t maxsize; 9098 uint16_t cmdsize; 9099 RPL *rpl; 9100 struct ls_rjt stat; 9101 9102 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9103 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9104 /* issue rejection response */ 9105 stat.un.b.lsRjtRsvd0 = 0; 9106 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9107 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9108 stat.un.b.vendorUnique = 0; 9109 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9110 NULL); 9111 /* rejected the unsolicited RPL request and done with it */ 9112 return 0; 9113 } 9114 9115 pcmd = cmdiocb->cmd_dmabuf; 9116 lp = (uint32_t *) pcmd->virt; 9117 rpl = (RPL *) (lp + 1); 9118 maxsize = be32_to_cpu(rpl->maxsize); 9119 9120 /* We support only one port */ 9121 if ((rpl->index == 0) && 9122 ((maxsize == 0) || 9123 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9124 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9125 } else { 9126 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9127 } 9128 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9129 9130 return 0; 9131 } 9132 9133 /** 9134 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9135 * @vport: pointer to a virtual N_Port data structure. 9136 * @cmdiocb: pointer to lpfc command iocb data structure. 9137 * @ndlp: pointer to a node-list data structure. 9138 * 9139 * This routine processes Fibre Channel Address Resolution Protocol 9140 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9141 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9142 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9143 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9144 * remote PortName is compared against the FC PortName stored in the @vport 9145 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9146 * compared against the FC NodeName stored in the @vport data structure. 9147 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9148 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9149 * invoked to send out FARP Response to the remote node. Before sending the 9150 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9151 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9152 * routine is invoked to log into the remote port first. 9153 * 9154 * Return code 9155 * 0 - Either the FARP Match Mode not supported or successfully processed 9156 **/ 9157 static int 9158 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9159 struct lpfc_nodelist *ndlp) 9160 { 9161 struct lpfc_dmabuf *pcmd; 9162 uint32_t *lp; 9163 FARP *fp; 9164 uint32_t cnt, did; 9165 9166 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9167 pcmd = cmdiocb->cmd_dmabuf; 9168 lp = (uint32_t *) pcmd->virt; 9169 9170 lp++; 9171 fp = (FARP *) lp; 9172 /* FARP-REQ received from DID <did> */ 9173 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9174 "0601 FARP-REQ received from DID x%x\n", did); 9175 /* We will only support match on WWPN or WWNN */ 9176 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9177 return 0; 9178 } 9179 9180 cnt = 0; 9181 /* If this FARP command is searching for my portname */ 9182 if (fp->Mflags & FARP_MATCH_PORT) { 9183 if (memcmp(&fp->RportName, &vport->fc_portname, 9184 sizeof(struct lpfc_name)) == 0) 9185 cnt = 1; 9186 } 9187 9188 /* If this FARP command is searching for my nodename */ 9189 if (fp->Mflags & FARP_MATCH_NODE) { 9190 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9191 sizeof(struct lpfc_name)) == 0) 9192 cnt = 1; 9193 } 9194 9195 if (cnt) { 9196 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9197 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9198 /* Log back into the node before sending the FARP. */ 9199 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9200 ndlp->nlp_prev_state = ndlp->nlp_state; 9201 lpfc_nlp_set_state(vport, ndlp, 9202 NLP_STE_PLOGI_ISSUE); 9203 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9204 } 9205 9206 /* Send a FARP response to that node */ 9207 if (fp->Rflags & FARP_REQUEST_FARPR) 9208 lpfc_issue_els_farpr(vport, did, 0); 9209 } 9210 } 9211 return 0; 9212 } 9213 9214 /** 9215 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9216 * @vport: pointer to a host virtual N_Port data structure. 9217 * @cmdiocb: pointer to lpfc command iocb data structure. 9218 * @ndlp: pointer to a node-list data structure. 9219 * 9220 * This routine processes Fibre Channel Address Resolution Protocol 9221 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9222 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9223 * the FARP response request. 9224 * 9225 * Return code 9226 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9227 **/ 9228 static int 9229 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9230 struct lpfc_nodelist *ndlp) 9231 { 9232 uint32_t did; 9233 9234 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9235 9236 /* FARP-RSP received from DID <did> */ 9237 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9238 "0600 FARP-RSP received from DID x%x\n", did); 9239 /* ACCEPT the Farp resp request */ 9240 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9241 9242 return 0; 9243 } 9244 9245 /** 9246 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9247 * @vport: pointer to a host virtual N_Port data structure. 9248 * @cmdiocb: pointer to lpfc command iocb data structure. 9249 * @fan_ndlp: pointer to a node-list data structure. 9250 * 9251 * This routine processes a Fabric Address Notification (FAN) IOCB 9252 * command received as an ELS unsolicited event. The FAN ELS command will 9253 * only be processed on a physical port (i.e., the @vport represents the 9254 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9255 * compared against those in the phba data structure. If any of those is 9256 * different, the lpfc_initial_flogi() routine is invoked to initialize 9257 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9258 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9259 * is invoked to register login to the fabric. 9260 * 9261 * Return code 9262 * 0 - Successfully processed fan iocb (currently always return 0). 9263 **/ 9264 static int 9265 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9266 struct lpfc_nodelist *fan_ndlp) 9267 { 9268 struct lpfc_hba *phba = vport->phba; 9269 uint32_t *lp; 9270 FAN *fp; 9271 9272 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9273 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9274 fp = (FAN *) ++lp; 9275 /* FAN received; Fan does not have a reply sequence */ 9276 if ((vport == phba->pport) && 9277 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9278 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9279 sizeof(struct lpfc_name))) || 9280 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9281 sizeof(struct lpfc_name)))) { 9282 /* This port has switched fabrics. FLOGI is required */ 9283 lpfc_issue_init_vfi(vport); 9284 } else { 9285 /* FAN verified - skip FLOGI */ 9286 vport->fc_myDID = vport->fc_prevDID; 9287 if (phba->sli_rev < LPFC_SLI_REV4) 9288 lpfc_issue_fabric_reglogin(vport); 9289 else { 9290 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9291 "3138 Need register VFI: (x%x/%x)\n", 9292 vport->fc_prevDID, vport->fc_myDID); 9293 lpfc_issue_reg_vfi(vport); 9294 } 9295 } 9296 } 9297 return 0; 9298 } 9299 9300 /** 9301 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9302 * @vport: pointer to a host virtual N_Port data structure. 9303 * @cmdiocb: pointer to lpfc command iocb data structure. 9304 * @ndlp: pointer to a node-list data structure. 9305 * 9306 * Return code 9307 * 0 - Successfully processed echo iocb (currently always return 0) 9308 **/ 9309 static int 9310 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9311 struct lpfc_nodelist *ndlp) 9312 { 9313 struct lpfc_hba *phba = vport->phba; 9314 struct fc_els_edc *edc_req; 9315 struct fc_tlv_desc *tlv; 9316 uint8_t *payload; 9317 uint32_t *ptr, dtag; 9318 const char *dtag_nm; 9319 int desc_cnt = 0, bytes_remain; 9320 struct fc_diag_lnkflt_desc *plnkflt; 9321 9322 payload = cmdiocb->cmd_dmabuf->virt; 9323 9324 edc_req = (struct fc_els_edc *)payload; 9325 bytes_remain = be32_to_cpu(edc_req->desc_len); 9326 9327 ptr = (uint32_t *)payload; 9328 lpfc_printf_vlog(vport, KERN_INFO, 9329 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9330 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9331 bytes_remain, be32_to_cpu(*ptr), 9332 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9333 9334 /* No signal support unless there is a congestion descriptor */ 9335 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9336 phba->cgn_sig_freq = 0; 9337 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9338 9339 if (bytes_remain <= 0) 9340 goto out; 9341 9342 tlv = edc_req->desc; 9343 9344 /* 9345 * cycle through EDC diagnostic descriptors to find the 9346 * congestion signaling capability descriptor 9347 */ 9348 while (bytes_remain) { 9349 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9350 lpfc_printf_log(phba, KERN_WARNING, 9351 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9352 "6464 Truncated TLV hdr on " 9353 "Diagnostic descriptor[%d]\n", 9354 desc_cnt); 9355 goto out; 9356 } 9357 9358 dtag = be32_to_cpu(tlv->desc_tag); 9359 switch (dtag) { 9360 case ELS_DTAG_LNK_FAULT_CAP: 9361 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9362 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9363 sizeof(struct fc_diag_lnkflt_desc)) { 9364 lpfc_printf_log(phba, KERN_WARNING, 9365 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9366 "6465 Truncated Link Fault Diagnostic " 9367 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9368 desc_cnt, bytes_remain, 9369 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9370 sizeof(struct fc_diag_lnkflt_desc)); 9371 goto out; 9372 } 9373 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9374 lpfc_printf_log(phba, KERN_INFO, 9375 LOG_ELS | LOG_LDS_EVENT, 9376 "4626 Link Fault Desc Data: x%08x len x%x " 9377 "da x%x dd x%x interval x%x\n", 9378 be32_to_cpu(plnkflt->desc_tag), 9379 be32_to_cpu(plnkflt->desc_len), 9380 be32_to_cpu( 9381 plnkflt->degrade_activate_threshold), 9382 be32_to_cpu( 9383 plnkflt->degrade_deactivate_threshold), 9384 be32_to_cpu(plnkflt->fec_degrade_interval)); 9385 break; 9386 case ELS_DTAG_CG_SIGNAL_CAP: 9387 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9388 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9389 sizeof(struct fc_diag_cg_sig_desc)) { 9390 lpfc_printf_log( 9391 phba, KERN_WARNING, LOG_CGN_MGMT, 9392 "6466 Truncated cgn signal Diagnostic " 9393 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9394 desc_cnt, bytes_remain, 9395 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9396 sizeof(struct fc_diag_cg_sig_desc)); 9397 goto out; 9398 } 9399 9400 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9401 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9402 9403 /* We start negotiation with lpfc_fabric_cgn_frequency. 9404 * When we process the EDC, we will settle on the 9405 * higher frequency. 9406 */ 9407 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9408 9409 lpfc_least_capable_settings( 9410 phba, (struct fc_diag_cg_sig_desc *)tlv); 9411 break; 9412 default: 9413 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9414 lpfc_printf_log(phba, KERN_WARNING, 9415 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9416 "6467 unknown Diagnostic " 9417 "Descriptor[%d]: tag x%x (%s)\n", 9418 desc_cnt, dtag, dtag_nm); 9419 } 9420 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9421 tlv = fc_tlv_next_desc(tlv); 9422 desc_cnt++; 9423 } 9424 out: 9425 /* Need to send back an ACC */ 9426 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9427 9428 lpfc_config_cgn_signal(phba); 9429 return 0; 9430 } 9431 9432 /** 9433 * lpfc_els_timeout - Handler funciton to the els timer 9434 * @t: timer context used to obtain the vport. 9435 * 9436 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9437 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9438 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9439 * up the worker thread. It is for the worker thread to invoke the routine 9440 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9441 **/ 9442 void 9443 lpfc_els_timeout(struct timer_list *t) 9444 { 9445 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9446 struct lpfc_hba *phba = vport->phba; 9447 uint32_t tmo_posted; 9448 unsigned long iflag; 9449 9450 spin_lock_irqsave(&vport->work_port_lock, iflag); 9451 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9452 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9453 vport->work_port_events |= WORKER_ELS_TMO; 9454 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9455 9456 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9457 lpfc_worker_wake_up(phba); 9458 return; 9459 } 9460 9461 9462 /** 9463 * lpfc_els_timeout_handler - Process an els timeout event 9464 * @vport: pointer to a virtual N_Port data structure. 9465 * 9466 * This routine is the actual handler function that processes an ELS timeout 9467 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9468 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9469 * invoking the lpfc_sli_issue_abort_iotag() routine. 9470 **/ 9471 void 9472 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9473 { 9474 struct lpfc_hba *phba = vport->phba; 9475 struct lpfc_sli_ring *pring; 9476 struct lpfc_iocbq *tmp_iocb, *piocb; 9477 IOCB_t *cmd = NULL; 9478 struct lpfc_dmabuf *pcmd; 9479 uint32_t els_command = 0; 9480 uint32_t timeout; 9481 uint32_t remote_ID = 0xffffffff; 9482 LIST_HEAD(abort_list); 9483 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9484 9485 9486 timeout = (uint32_t)(phba->fc_ratov << 1); 9487 9488 pring = lpfc_phba_elsring(phba); 9489 if (unlikely(!pring)) 9490 return; 9491 9492 if (phba->pport->load_flag & FC_UNLOADING) 9493 return; 9494 9495 spin_lock_irq(&phba->hbalock); 9496 if (phba->sli_rev == LPFC_SLI_REV4) 9497 spin_lock(&pring->ring_lock); 9498 9499 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9500 ulp_command = get_job_cmnd(phba, piocb); 9501 ulp_context = get_job_ulpcontext(phba, piocb); 9502 did = get_job_els_rsp64_did(phba, piocb); 9503 9504 if (phba->sli_rev == LPFC_SLI_REV4) { 9505 iotag = get_wqe_reqtag(piocb); 9506 } else { 9507 cmd = &piocb->iocb; 9508 iotag = cmd->ulpIoTag; 9509 } 9510 9511 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9512 ulp_command == CMD_ABORT_XRI_CX || 9513 ulp_command == CMD_ABORT_XRI_CN || 9514 ulp_command == CMD_CLOSE_XRI_CN) 9515 continue; 9516 9517 if (piocb->vport != vport) 9518 continue; 9519 9520 pcmd = piocb->cmd_dmabuf; 9521 if (pcmd) 9522 els_command = *(uint32_t *) (pcmd->virt); 9523 9524 if (els_command == ELS_CMD_FARP || 9525 els_command == ELS_CMD_FARPR || 9526 els_command == ELS_CMD_FDISC) 9527 continue; 9528 9529 if (piocb->drvrTimeout > 0) { 9530 if (piocb->drvrTimeout >= timeout) 9531 piocb->drvrTimeout -= timeout; 9532 else 9533 piocb->drvrTimeout = 0; 9534 continue; 9535 } 9536 9537 remote_ID = 0xffffffff; 9538 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9539 remote_ID = did; 9540 } else { 9541 struct lpfc_nodelist *ndlp; 9542 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9543 if (ndlp) 9544 remote_ID = ndlp->nlp_DID; 9545 } 9546 list_add_tail(&piocb->dlist, &abort_list); 9547 } 9548 if (phba->sli_rev == LPFC_SLI_REV4) 9549 spin_unlock(&pring->ring_lock); 9550 spin_unlock_irq(&phba->hbalock); 9551 9552 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9553 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9554 "0127 ELS timeout Data: x%x x%x x%x " 9555 "x%x\n", els_command, 9556 remote_ID, ulp_command, iotag); 9557 9558 spin_lock_irq(&phba->hbalock); 9559 list_del_init(&piocb->dlist); 9560 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9561 spin_unlock_irq(&phba->hbalock); 9562 } 9563 9564 /* Make sure HBA is alive */ 9565 lpfc_issue_hb_tmo(phba); 9566 9567 if (!list_empty(&pring->txcmplq)) 9568 if (!(phba->pport->load_flag & FC_UNLOADING)) 9569 mod_timer(&vport->els_tmofunc, 9570 jiffies + msecs_to_jiffies(1000 * timeout)); 9571 } 9572 9573 /** 9574 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9575 * @vport: pointer to a host virtual N_Port data structure. 9576 * 9577 * This routine is used to clean up all the outstanding ELS commands on a 9578 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9579 * routine. After that, it walks the ELS transmit queue to remove all the 9580 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9581 * the IOCBs with a non-NULL completion callback function, the callback 9582 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9583 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9584 * callback function, the IOCB will simply be released. Finally, it walks 9585 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9586 * completion queue IOCB that is associated with the @vport and is not 9587 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9588 * part of the discovery state machine) out to HBA by invoking the 9589 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9590 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9591 * the IOCBs are aborted when this function returns. 9592 **/ 9593 void 9594 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9595 { 9596 LIST_HEAD(abort_list); 9597 struct lpfc_hba *phba = vport->phba; 9598 struct lpfc_sli_ring *pring; 9599 struct lpfc_iocbq *tmp_iocb, *piocb; 9600 u32 ulp_command; 9601 unsigned long iflags = 0; 9602 9603 lpfc_fabric_abort_vport(vport); 9604 9605 /* 9606 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9607 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9608 * ultimately grabs the ring_lock, the driver must splice the list into 9609 * a working list and release the locks before calling the abort. 9610 */ 9611 spin_lock_irqsave(&phba->hbalock, iflags); 9612 pring = lpfc_phba_elsring(phba); 9613 9614 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9615 if (unlikely(!pring)) { 9616 spin_unlock_irqrestore(&phba->hbalock, iflags); 9617 return; 9618 } 9619 9620 if (phba->sli_rev == LPFC_SLI_REV4) 9621 spin_lock(&pring->ring_lock); 9622 9623 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9624 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9625 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9626 continue; 9627 9628 if (piocb->vport != vport) 9629 continue; 9630 9631 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9632 continue; 9633 9634 /* On the ELS ring we can have ELS_REQUESTs or 9635 * GEN_REQUESTs waiting for a response. 9636 */ 9637 ulp_command = get_job_cmnd(phba, piocb); 9638 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9639 list_add_tail(&piocb->dlist, &abort_list); 9640 9641 /* If the link is down when flushing ELS commands 9642 * the firmware will not complete them till after 9643 * the link comes back up. This may confuse 9644 * discovery for the new link up, so we need to 9645 * change the compl routine to just clean up the iocb 9646 * and avoid any retry logic. 9647 */ 9648 if (phba->link_state == LPFC_LINK_DOWN) 9649 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9650 } 9651 if (ulp_command == CMD_GEN_REQUEST64_CR) 9652 list_add_tail(&piocb->dlist, &abort_list); 9653 } 9654 9655 if (phba->sli_rev == LPFC_SLI_REV4) 9656 spin_unlock(&pring->ring_lock); 9657 spin_unlock_irqrestore(&phba->hbalock, iflags); 9658 9659 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9660 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9661 spin_lock_irqsave(&phba->hbalock, iflags); 9662 list_del_init(&piocb->dlist); 9663 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9664 spin_unlock_irqrestore(&phba->hbalock, iflags); 9665 } 9666 /* Make sure HBA is alive */ 9667 lpfc_issue_hb_tmo(phba); 9668 9669 if (!list_empty(&abort_list)) 9670 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9671 "3387 abort list for txq not empty\n"); 9672 INIT_LIST_HEAD(&abort_list); 9673 9674 spin_lock_irqsave(&phba->hbalock, iflags); 9675 if (phba->sli_rev == LPFC_SLI_REV4) 9676 spin_lock(&pring->ring_lock); 9677 9678 /* No need to abort the txq list, 9679 * just queue them up for lpfc_sli_cancel_iocbs 9680 */ 9681 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9682 ulp_command = get_job_cmnd(phba, piocb); 9683 9684 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9685 continue; 9686 9687 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9688 if (ulp_command == CMD_QUE_RING_BUF_CN || 9689 ulp_command == CMD_QUE_RING_BUF64_CN || 9690 ulp_command == CMD_CLOSE_XRI_CN || 9691 ulp_command == CMD_ABORT_XRI_CN || 9692 ulp_command == CMD_ABORT_XRI_CX) 9693 continue; 9694 9695 if (piocb->vport != vport) 9696 continue; 9697 9698 list_del_init(&piocb->list); 9699 list_add_tail(&piocb->list, &abort_list); 9700 } 9701 9702 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9703 if (vport == phba->pport) { 9704 list_for_each_entry_safe(piocb, tmp_iocb, 9705 &phba->fabric_iocb_list, list) { 9706 list_del_init(&piocb->list); 9707 list_add_tail(&piocb->list, &abort_list); 9708 } 9709 } 9710 9711 if (phba->sli_rev == LPFC_SLI_REV4) 9712 spin_unlock(&pring->ring_lock); 9713 spin_unlock_irqrestore(&phba->hbalock, iflags); 9714 9715 /* Cancel all the IOCBs from the completions list */ 9716 lpfc_sli_cancel_iocbs(phba, &abort_list, 9717 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9718 9719 return; 9720 } 9721 9722 /** 9723 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9724 * @phba: pointer to lpfc hba data structure. 9725 * 9726 * This routine is used to clean up all the outstanding ELS commands on a 9727 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9728 * routine. After that, it walks the ELS transmit queue to remove all the 9729 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9730 * the IOCBs with the completion callback function associated, the callback 9731 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9732 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9733 * callback function associated, the IOCB will simply be released. Finally, 9734 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9735 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9736 * management plane IOCBs that are not part of the discovery state machine) 9737 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9738 **/ 9739 void 9740 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9741 { 9742 struct lpfc_vport *vport; 9743 9744 spin_lock_irq(&phba->port_list_lock); 9745 list_for_each_entry(vport, &phba->port_list, listentry) 9746 lpfc_els_flush_cmd(vport); 9747 spin_unlock_irq(&phba->port_list_lock); 9748 9749 return; 9750 } 9751 9752 /** 9753 * lpfc_send_els_failure_event - Posts an ELS command failure event 9754 * @phba: Pointer to hba context object. 9755 * @cmdiocbp: Pointer to command iocb which reported error. 9756 * @rspiocbp: Pointer to response iocb which reported error. 9757 * 9758 * This function sends an event when there is an ELS command 9759 * failure. 9760 **/ 9761 void 9762 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9763 struct lpfc_iocbq *cmdiocbp, 9764 struct lpfc_iocbq *rspiocbp) 9765 { 9766 struct lpfc_vport *vport = cmdiocbp->vport; 9767 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9768 struct lpfc_lsrjt_event lsrjt_event; 9769 struct lpfc_fabric_event_header fabric_event; 9770 struct ls_rjt stat; 9771 struct lpfc_nodelist *ndlp; 9772 uint32_t *pcmd; 9773 u32 ulp_status, ulp_word4; 9774 9775 ndlp = cmdiocbp->ndlp; 9776 if (!ndlp) 9777 return; 9778 9779 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9780 ulp_word4 = get_job_word4(phba, rspiocbp); 9781 9782 if (ulp_status == IOSTAT_LS_RJT) { 9783 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9784 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9785 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9786 sizeof(struct lpfc_name)); 9787 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9788 sizeof(struct lpfc_name)); 9789 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9790 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9791 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9792 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9793 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9794 fc_host_post_vendor_event(shost, 9795 fc_get_event_number(), 9796 sizeof(lsrjt_event), 9797 (char *)&lsrjt_event, 9798 LPFC_NL_VENDOR_ID); 9799 return; 9800 } 9801 if (ulp_status == IOSTAT_NPORT_BSY || 9802 ulp_status == IOSTAT_FABRIC_BSY) { 9803 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9804 if (ulp_status == IOSTAT_NPORT_BSY) 9805 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9806 else 9807 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9808 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9809 sizeof(struct lpfc_name)); 9810 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9811 sizeof(struct lpfc_name)); 9812 fc_host_post_vendor_event(shost, 9813 fc_get_event_number(), 9814 sizeof(fabric_event), 9815 (char *)&fabric_event, 9816 LPFC_NL_VENDOR_ID); 9817 return; 9818 } 9819 9820 } 9821 9822 /** 9823 * lpfc_send_els_event - Posts unsolicited els event 9824 * @vport: Pointer to vport object. 9825 * @ndlp: Pointer FC node object. 9826 * @payload: ELS command code type. 9827 * 9828 * This function posts an event when there is an incoming 9829 * unsolicited ELS command. 9830 **/ 9831 static void 9832 lpfc_send_els_event(struct lpfc_vport *vport, 9833 struct lpfc_nodelist *ndlp, 9834 uint32_t *payload) 9835 { 9836 struct lpfc_els_event_header *els_data = NULL; 9837 struct lpfc_logo_event *logo_data = NULL; 9838 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9839 9840 if (*payload == ELS_CMD_LOGO) { 9841 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9842 if (!logo_data) { 9843 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9844 "0148 Failed to allocate memory " 9845 "for LOGO event\n"); 9846 return; 9847 } 9848 els_data = &logo_data->header; 9849 } else { 9850 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9851 GFP_KERNEL); 9852 if (!els_data) { 9853 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9854 "0149 Failed to allocate memory " 9855 "for ELS event\n"); 9856 return; 9857 } 9858 } 9859 els_data->event_type = FC_REG_ELS_EVENT; 9860 switch (*payload) { 9861 case ELS_CMD_PLOGI: 9862 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9863 break; 9864 case ELS_CMD_PRLO: 9865 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9866 break; 9867 case ELS_CMD_ADISC: 9868 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9869 break; 9870 case ELS_CMD_LOGO: 9871 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9872 /* Copy the WWPN in the LOGO payload */ 9873 memcpy(logo_data->logo_wwpn, &payload[2], 9874 sizeof(struct lpfc_name)); 9875 break; 9876 default: 9877 kfree(els_data); 9878 return; 9879 } 9880 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9881 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9882 if (*payload == ELS_CMD_LOGO) { 9883 fc_host_post_vendor_event(shost, 9884 fc_get_event_number(), 9885 sizeof(struct lpfc_logo_event), 9886 (char *)logo_data, 9887 LPFC_NL_VENDOR_ID); 9888 kfree(logo_data); 9889 } else { 9890 fc_host_post_vendor_event(shost, 9891 fc_get_event_number(), 9892 sizeof(struct lpfc_els_event_header), 9893 (char *)els_data, 9894 LPFC_NL_VENDOR_ID); 9895 kfree(els_data); 9896 } 9897 9898 return; 9899 } 9900 9901 9902 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9903 FC_FPIN_LI_EVT_TYPES_INIT); 9904 9905 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9906 FC_FPIN_DELI_EVT_TYPES_INIT); 9907 9908 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9909 FC_FPIN_CONGN_EVT_TYPES_INIT); 9910 9911 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9912 fc_fpin_congn_severity_types, 9913 FC_FPIN_CONGN_SEVERITY_INIT); 9914 9915 9916 /** 9917 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9918 * @phba: Pointer to phba object. 9919 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9920 * @cnt: count of WWPNs in FPIN payload 9921 * 9922 * This routine is called by LI and PC descriptors. 9923 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9924 */ 9925 static void 9926 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9927 { 9928 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9929 __be64 wwn; 9930 u64 wwpn; 9931 int i, len; 9932 int line = 0; 9933 int wcnt = 0; 9934 bool endit = false; 9935 9936 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9937 for (i = 0; i < cnt; i++) { 9938 /* Are we on the last WWPN */ 9939 if (i == (cnt - 1)) 9940 endit = true; 9941 9942 /* Extract the next WWPN from the payload */ 9943 wwn = *wwnlist++; 9944 wwpn = be64_to_cpu(wwn); 9945 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9946 " %016llx", wwpn); 9947 9948 /* Log a message if we are on the last WWPN 9949 * or if we hit the max allowed per message. 9950 */ 9951 wcnt++; 9952 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9953 buf[len] = 0; 9954 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9955 "4686 %s\n", buf); 9956 9957 /* Check if we reached the last WWPN */ 9958 if (endit) 9959 return; 9960 9961 /* Limit the number of log message displayed per FPIN */ 9962 line++; 9963 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9964 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9965 "4687 %d WWPNs Truncated\n", 9966 cnt - i - 1); 9967 return; 9968 } 9969 9970 /* Start over with next log message */ 9971 wcnt = 0; 9972 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9973 "Additional WWPNs:"); 9974 } 9975 } 9976 } 9977 9978 /** 9979 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9980 * @phba: Pointer to phba object. 9981 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9982 * 9983 * This function processes a Link Integrity FPIN event by logging a message. 9984 **/ 9985 static void 9986 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9987 { 9988 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9989 const char *li_evt_str; 9990 u32 li_evt, cnt; 9991 9992 li_evt = be16_to_cpu(li->event_type); 9993 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9994 cnt = be32_to_cpu(li->pname_count); 9995 9996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9997 "4680 FPIN Link Integrity %s (x%x) " 9998 "Detecting PN x%016llx Attached PN x%016llx " 9999 "Duration %d mSecs Count %d Port Cnt %d\n", 10000 li_evt_str, li_evt, 10001 be64_to_cpu(li->detecting_wwpn), 10002 be64_to_cpu(li->attached_wwpn), 10003 be32_to_cpu(li->event_threshold), 10004 be32_to_cpu(li->event_count), cnt); 10005 10006 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 10007 } 10008 10009 /** 10010 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 10011 * @phba: Pointer to hba object. 10012 * @tlv: Pointer to the Delivery Notification Descriptor TLV 10013 * 10014 * This function processes a Delivery FPIN event by logging a message. 10015 **/ 10016 static void 10017 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10018 { 10019 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 10020 const char *del_rsn_str; 10021 u32 del_rsn; 10022 __be32 *frame; 10023 10024 del_rsn = be16_to_cpu(del->deli_reason_code); 10025 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 10026 10027 /* Skip over desc_tag/desc_len header to payload */ 10028 frame = (__be32 *)(del + 1); 10029 10030 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10031 "4681 FPIN Delivery %s (x%x) " 10032 "Detecting PN x%016llx Attached PN x%016llx " 10033 "DiscHdr0 x%08x " 10034 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10035 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10036 del_rsn_str, del_rsn, 10037 be64_to_cpu(del->detecting_wwpn), 10038 be64_to_cpu(del->attached_wwpn), 10039 be32_to_cpu(frame[0]), 10040 be32_to_cpu(frame[1]), 10041 be32_to_cpu(frame[2]), 10042 be32_to_cpu(frame[3]), 10043 be32_to_cpu(frame[4]), 10044 be32_to_cpu(frame[5])); 10045 } 10046 10047 /** 10048 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10049 * @phba: Pointer to hba object. 10050 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10051 * 10052 * This function processes a Peer Congestion FPIN event by logging a message. 10053 **/ 10054 static void 10055 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10056 { 10057 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10058 const char *pc_evt_str; 10059 u32 pc_evt, cnt; 10060 10061 pc_evt = be16_to_cpu(pc->event_type); 10062 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10063 cnt = be32_to_cpu(pc->pname_count); 10064 10065 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10066 "4684 FPIN Peer Congestion %s (x%x) " 10067 "Duration %d mSecs " 10068 "Detecting PN x%016llx Attached PN x%016llx " 10069 "Impacted Port Cnt %d\n", 10070 pc_evt_str, pc_evt, 10071 be32_to_cpu(pc->event_period), 10072 be64_to_cpu(pc->detecting_wwpn), 10073 be64_to_cpu(pc->attached_wwpn), 10074 cnt); 10075 10076 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10077 } 10078 10079 /** 10080 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10081 * @phba: Pointer to hba object. 10082 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10083 * 10084 * This function processes an FPIN Congestion Notifiction. The notification 10085 * could be an Alarm or Warning. This routine feeds that data into driver's 10086 * running congestion algorithm. It also processes the FPIN by 10087 * logging a message. It returns 1 to indicate deliver this message 10088 * to the upper layer or 0 to indicate don't deliver it. 10089 **/ 10090 static int 10091 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10092 { 10093 struct lpfc_cgn_info *cp; 10094 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10095 const char *cgn_evt_str; 10096 u32 cgn_evt; 10097 const char *cgn_sev_str; 10098 u32 cgn_sev; 10099 uint16_t value; 10100 u32 crc; 10101 bool nm_log = false; 10102 int rc = 1; 10103 10104 cgn_evt = be16_to_cpu(cgn->event_type); 10105 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10106 cgn_sev = cgn->severity; 10107 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10108 10109 /* The driver only takes action on a Credit Stall or Oversubscription 10110 * event type to engage the IO algorithm. The driver prints an 10111 * unmaskable message only for Lost Credit and Credit Stall. 10112 * TODO: Still need to have definition of host action on clear, 10113 * lost credit and device specific event types. 10114 */ 10115 switch (cgn_evt) { 10116 case FPIN_CONGN_LOST_CREDIT: 10117 nm_log = true; 10118 break; 10119 case FPIN_CONGN_CREDIT_STALL: 10120 nm_log = true; 10121 fallthrough; 10122 case FPIN_CONGN_OVERSUBSCRIPTION: 10123 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10124 nm_log = false; 10125 switch (cgn_sev) { 10126 case FPIN_CONGN_SEVERITY_ERROR: 10127 /* Take action here for an Alarm event */ 10128 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10129 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10130 /* Track of alarm cnt for SYNC_WQE */ 10131 atomic_inc(&phba->cgn_sync_alarm_cnt); 10132 } 10133 /* Track alarm cnt for cgn_info regardless 10134 * of whether CMF is configured for Signals 10135 * or FPINs. 10136 */ 10137 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10138 goto cleanup; 10139 } 10140 break; 10141 case FPIN_CONGN_SEVERITY_WARNING: 10142 /* Take action here for a Warning event */ 10143 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10144 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10145 /* Track of warning cnt for SYNC_WQE */ 10146 atomic_inc(&phba->cgn_sync_warn_cnt); 10147 } 10148 /* Track warning cnt and freq for cgn_info 10149 * regardless of whether CMF is configured for 10150 * Signals or FPINs. 10151 */ 10152 atomic_inc(&phba->cgn_fabric_warn_cnt); 10153 cleanup: 10154 /* Save frequency in ms */ 10155 phba->cgn_fpin_frequency = 10156 be32_to_cpu(cgn->event_period); 10157 value = phba->cgn_fpin_frequency; 10158 if (phba->cgn_i) { 10159 cp = (struct lpfc_cgn_info *) 10160 phba->cgn_i->virt; 10161 cp->cgn_alarm_freq = 10162 cpu_to_le16(value); 10163 cp->cgn_warn_freq = 10164 cpu_to_le16(value); 10165 crc = lpfc_cgn_calc_crc32 10166 (cp, 10167 LPFC_CGN_INFO_SZ, 10168 LPFC_CGN_CRC32_SEED); 10169 cp->cgn_info_crc = cpu_to_le32(crc); 10170 } 10171 10172 /* Don't deliver to upper layer since 10173 * driver took action on this tlv. 10174 */ 10175 rc = 0; 10176 } 10177 break; 10178 } 10179 break; 10180 } 10181 10182 /* Change the log level to unmaskable for the following event types. */ 10183 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10184 LOG_CGN_MGMT | LOG_ELS, 10185 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10186 "Duration %d mSecs\n", 10187 cgn_sev_str, cgn_evt_str, cgn_evt, 10188 be32_to_cpu(cgn->event_period)); 10189 return rc; 10190 } 10191 10192 void 10193 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10194 { 10195 struct lpfc_hba *phba = vport->phba; 10196 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10197 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10198 const char *dtag_nm; 10199 int desc_cnt = 0, bytes_remain, cnt; 10200 u32 dtag, deliver = 0; 10201 int len; 10202 10203 /* FPINs handled only if we are in the right discovery state */ 10204 if (vport->port_state < LPFC_DISC_AUTH) 10205 return; 10206 10207 /* make sure there is the full fpin header */ 10208 if (fpin_length < sizeof(struct fc_els_fpin)) 10209 return; 10210 10211 /* Sanity check descriptor length. The desc_len value does not 10212 * include space for the ELS command and the desc_len fields. 10213 */ 10214 len = be32_to_cpu(fpin->desc_len); 10215 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10216 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10217 "4671 Bad ELS FPIN length %d: %d\n", 10218 len, fpin_length); 10219 return; 10220 } 10221 10222 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10223 first_tlv = tlv; 10224 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10225 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10226 10227 /* process each descriptor separately */ 10228 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10229 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10230 dtag = be32_to_cpu(tlv->desc_tag); 10231 switch (dtag) { 10232 case ELS_DTAG_LNK_INTEGRITY: 10233 lpfc_els_rcv_fpin_li(phba, tlv); 10234 deliver = 1; 10235 break; 10236 case ELS_DTAG_DELIVERY: 10237 lpfc_els_rcv_fpin_del(phba, tlv); 10238 deliver = 1; 10239 break; 10240 case ELS_DTAG_PEER_CONGEST: 10241 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10242 deliver = 1; 10243 break; 10244 case ELS_DTAG_CONGESTION: 10245 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10246 break; 10247 default: 10248 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10249 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10250 "4678 unknown FPIN descriptor[%d]: " 10251 "tag x%x (%s)\n", 10252 desc_cnt, dtag, dtag_nm); 10253 10254 /* If descriptor is bad, drop the rest of the data */ 10255 return; 10256 } 10257 lpfc_cgn_update_stat(phba, dtag); 10258 cnt = be32_to_cpu(tlv->desc_len); 10259 10260 /* Sanity check descriptor length. The desc_len value does not 10261 * include space for the desc_tag and the desc_len fields. 10262 */ 10263 len -= (cnt + sizeof(struct fc_tlv_desc)); 10264 if (len < 0) { 10265 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10266 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10267 "4672 Bad FPIN descriptor TLV length " 10268 "%d: %d %d %s\n", 10269 cnt, len, fpin_length, dtag_nm); 10270 return; 10271 } 10272 10273 current_tlv = tlv; 10274 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10275 tlv = fc_tlv_next_desc(tlv); 10276 10277 /* Format payload such that the FPIN delivered to the 10278 * upper layer is a single descriptor FPIN. 10279 */ 10280 if (desc_cnt) 10281 memcpy(first_tlv, current_tlv, 10282 (cnt + sizeof(struct fc_els_fpin))); 10283 10284 /* Adjust the length so that it only reflects a 10285 * single descriptor FPIN. 10286 */ 10287 fpin_length = cnt + sizeof(struct fc_els_fpin); 10288 fpin->desc_len = cpu_to_be32(fpin_length); 10289 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10290 10291 /* Send every descriptor individually to the upper layer */ 10292 if (deliver) 10293 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10294 fpin_length, (char *)fpin, 0); 10295 desc_cnt++; 10296 } 10297 } 10298 10299 /** 10300 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10301 * @phba: pointer to lpfc hba data structure. 10302 * @pring: pointer to a SLI ring. 10303 * @vport: pointer to a host virtual N_Port data structure. 10304 * @elsiocb: pointer to lpfc els command iocb data structure. 10305 * 10306 * This routine is used for processing the IOCB associated with a unsolicited 10307 * event. It first determines whether there is an existing ndlp that matches 10308 * the DID from the unsolicited IOCB. If not, it will create a new one with 10309 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10310 * IOCB is then used to invoke the proper routine and to set up proper state 10311 * of the discovery state machine. 10312 **/ 10313 static void 10314 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10315 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10316 { 10317 struct lpfc_nodelist *ndlp; 10318 struct ls_rjt stat; 10319 u32 *payload, payload_len; 10320 u32 cmd = 0, did = 0, newnode, status = 0; 10321 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10322 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10323 LPFC_MBOXQ_t *mbox; 10324 10325 if (!vport || !elsiocb->cmd_dmabuf) 10326 goto dropit; 10327 10328 newnode = 0; 10329 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10330 payload = elsiocb->cmd_dmabuf->virt; 10331 if (phba->sli_rev == LPFC_SLI_REV4) 10332 payload_len = wcqe_cmpl->total_data_placed; 10333 else 10334 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10335 status = get_job_ulpstatus(phba, elsiocb); 10336 cmd = *payload; 10337 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10338 lpfc_sli3_post_buffer(phba, pring, 1); 10339 10340 did = get_job_els_rsp64_did(phba, elsiocb); 10341 if (status) { 10342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10343 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10344 status, get_job_word4(phba, elsiocb), did); 10345 goto dropit; 10346 } 10347 10348 /* Check to see if link went down during discovery */ 10349 if (lpfc_els_chk_latt(vport)) 10350 goto dropit; 10351 10352 /* Ignore traffic received during vport shutdown. */ 10353 if (vport->load_flag & FC_UNLOADING) 10354 goto dropit; 10355 10356 /* If NPort discovery is delayed drop incoming ELS */ 10357 if ((vport->fc_flag & FC_DISC_DELAYED) && 10358 (cmd != ELS_CMD_PLOGI)) 10359 goto dropit; 10360 10361 ndlp = lpfc_findnode_did(vport, did); 10362 if (!ndlp) { 10363 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10364 ndlp = lpfc_nlp_init(vport, did); 10365 if (!ndlp) 10366 goto dropit; 10367 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10368 newnode = 1; 10369 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10370 ndlp->nlp_type |= NLP_FABRIC; 10371 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10372 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10373 newnode = 1; 10374 } 10375 10376 phba->fc_stat.elsRcvFrame++; 10377 10378 /* 10379 * Do not process any unsolicited ELS commands 10380 * if the ndlp is in DEV_LOSS 10381 */ 10382 spin_lock_irq(&ndlp->lock); 10383 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10384 spin_unlock_irq(&ndlp->lock); 10385 if (newnode) 10386 lpfc_nlp_put(ndlp); 10387 goto dropit; 10388 } 10389 spin_unlock_irq(&ndlp->lock); 10390 10391 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10392 if (!elsiocb->ndlp) 10393 goto dropit; 10394 elsiocb->vport = vport; 10395 10396 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10397 cmd &= ELS_CMD_MASK; 10398 } 10399 /* ELS command <elsCmd> received from NPORT <did> */ 10400 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10401 "0112 ELS command x%x received from NPORT x%x " 10402 "refcnt %d Data: x%x x%x x%x x%x\n", 10403 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10404 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10405 10406 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10407 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10408 (cmd != ELS_CMD_FLOGI) && 10409 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10410 rjt_err = LSRJT_LOGICAL_BSY; 10411 rjt_exp = LSEXP_NOTHING_MORE; 10412 goto lsrjt; 10413 } 10414 10415 switch (cmd) { 10416 case ELS_CMD_PLOGI: 10417 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10418 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10419 did, vport->port_state, ndlp->nlp_flag); 10420 10421 phba->fc_stat.elsRcvPLOGI++; 10422 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10423 if (phba->sli_rev == LPFC_SLI_REV4 && 10424 (phba->pport->fc_flag & FC_PT2PT)) { 10425 vport->fc_prevDID = vport->fc_myDID; 10426 /* Our DID needs to be updated before registering 10427 * the vfi. This is done in lpfc_rcv_plogi but 10428 * that is called after the reg_vfi. 10429 */ 10430 vport->fc_myDID = 10431 bf_get(els_rsp64_sid, 10432 &elsiocb->wqe.xmit_els_rsp); 10433 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10434 "3312 Remote port assigned DID x%x " 10435 "%x\n", vport->fc_myDID, 10436 vport->fc_prevDID); 10437 } 10438 10439 lpfc_send_els_event(vport, ndlp, payload); 10440 10441 /* If Nport discovery is delayed, reject PLOGIs */ 10442 if (vport->fc_flag & FC_DISC_DELAYED) { 10443 rjt_err = LSRJT_UNABLE_TPC; 10444 rjt_exp = LSEXP_NOTHING_MORE; 10445 break; 10446 } 10447 10448 if (vport->port_state < LPFC_DISC_AUTH) { 10449 if (!(phba->pport->fc_flag & FC_PT2PT) || 10450 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10451 rjt_err = LSRJT_UNABLE_TPC; 10452 rjt_exp = LSEXP_NOTHING_MORE; 10453 break; 10454 } 10455 } 10456 10457 spin_lock_irq(&ndlp->lock); 10458 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10459 spin_unlock_irq(&ndlp->lock); 10460 10461 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10462 NLP_EVT_RCV_PLOGI); 10463 10464 break; 10465 case ELS_CMD_FLOGI: 10466 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10467 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10468 did, vport->port_state, ndlp->nlp_flag); 10469 10470 phba->fc_stat.elsRcvFLOGI++; 10471 10472 /* If the driver believes fabric discovery is done and is ready, 10473 * bounce the link. There is some descrepancy. 10474 */ 10475 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10476 vport->fc_flag & FC_PT2PT && 10477 vport->rcv_flogi_cnt >= 1) { 10478 rjt_err = LSRJT_LOGICAL_BSY; 10479 rjt_exp = LSEXP_NOTHING_MORE; 10480 init_link++; 10481 goto lsrjt; 10482 } 10483 10484 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10485 /* retain node if our response is deferred */ 10486 if (phba->defer_flogi_acc_flag) 10487 break; 10488 if (newnode) 10489 lpfc_disc_state_machine(vport, ndlp, NULL, 10490 NLP_EVT_DEVICE_RM); 10491 break; 10492 case ELS_CMD_LOGO: 10493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10494 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10495 did, vport->port_state, ndlp->nlp_flag); 10496 10497 phba->fc_stat.elsRcvLOGO++; 10498 lpfc_send_els_event(vport, ndlp, payload); 10499 if (vport->port_state < LPFC_DISC_AUTH) { 10500 rjt_err = LSRJT_UNABLE_TPC; 10501 rjt_exp = LSEXP_NOTHING_MORE; 10502 break; 10503 } 10504 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10505 if (newnode) 10506 lpfc_disc_state_machine(vport, ndlp, NULL, 10507 NLP_EVT_DEVICE_RM); 10508 break; 10509 case ELS_CMD_PRLO: 10510 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10511 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10512 did, vport->port_state, ndlp->nlp_flag); 10513 10514 phba->fc_stat.elsRcvPRLO++; 10515 lpfc_send_els_event(vport, ndlp, payload); 10516 if (vport->port_state < LPFC_DISC_AUTH) { 10517 rjt_err = LSRJT_UNABLE_TPC; 10518 rjt_exp = LSEXP_NOTHING_MORE; 10519 break; 10520 } 10521 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10522 break; 10523 case ELS_CMD_LCB: 10524 phba->fc_stat.elsRcvLCB++; 10525 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10526 break; 10527 case ELS_CMD_RDP: 10528 phba->fc_stat.elsRcvRDP++; 10529 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10530 break; 10531 case ELS_CMD_RSCN: 10532 phba->fc_stat.elsRcvRSCN++; 10533 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10534 if (newnode) 10535 lpfc_disc_state_machine(vport, ndlp, NULL, 10536 NLP_EVT_DEVICE_RM); 10537 break; 10538 case ELS_CMD_ADISC: 10539 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10540 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10541 did, vport->port_state, ndlp->nlp_flag); 10542 10543 lpfc_send_els_event(vport, ndlp, payload); 10544 phba->fc_stat.elsRcvADISC++; 10545 if (vport->port_state < LPFC_DISC_AUTH) { 10546 rjt_err = LSRJT_UNABLE_TPC; 10547 rjt_exp = LSEXP_NOTHING_MORE; 10548 break; 10549 } 10550 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10551 NLP_EVT_RCV_ADISC); 10552 break; 10553 case ELS_CMD_PDISC: 10554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10555 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10556 did, vport->port_state, ndlp->nlp_flag); 10557 10558 phba->fc_stat.elsRcvPDISC++; 10559 if (vport->port_state < LPFC_DISC_AUTH) { 10560 rjt_err = LSRJT_UNABLE_TPC; 10561 rjt_exp = LSEXP_NOTHING_MORE; 10562 break; 10563 } 10564 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10565 NLP_EVT_RCV_PDISC); 10566 break; 10567 case ELS_CMD_FARPR: 10568 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10569 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10570 did, vport->port_state, ndlp->nlp_flag); 10571 10572 phba->fc_stat.elsRcvFARPR++; 10573 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10574 break; 10575 case ELS_CMD_FARP: 10576 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10577 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10578 did, vport->port_state, ndlp->nlp_flag); 10579 10580 phba->fc_stat.elsRcvFARP++; 10581 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10582 break; 10583 case ELS_CMD_FAN: 10584 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10585 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10586 did, vport->port_state, ndlp->nlp_flag); 10587 10588 phba->fc_stat.elsRcvFAN++; 10589 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10590 break; 10591 case ELS_CMD_PRLI: 10592 case ELS_CMD_NVMEPRLI: 10593 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10594 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10595 did, vport->port_state, ndlp->nlp_flag); 10596 10597 phba->fc_stat.elsRcvPRLI++; 10598 if ((vport->port_state < LPFC_DISC_AUTH) && 10599 (vport->fc_flag & FC_FABRIC)) { 10600 rjt_err = LSRJT_UNABLE_TPC; 10601 rjt_exp = LSEXP_NOTHING_MORE; 10602 break; 10603 } 10604 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10605 break; 10606 case ELS_CMD_LIRR: 10607 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10608 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10609 did, vport->port_state, ndlp->nlp_flag); 10610 10611 phba->fc_stat.elsRcvLIRR++; 10612 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10613 if (newnode) 10614 lpfc_disc_state_machine(vport, ndlp, NULL, 10615 NLP_EVT_DEVICE_RM); 10616 break; 10617 case ELS_CMD_RLS: 10618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10619 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10620 did, vport->port_state, ndlp->nlp_flag); 10621 10622 phba->fc_stat.elsRcvRLS++; 10623 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10624 if (newnode) 10625 lpfc_disc_state_machine(vport, ndlp, NULL, 10626 NLP_EVT_DEVICE_RM); 10627 break; 10628 case ELS_CMD_RPL: 10629 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10630 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10631 did, vport->port_state, ndlp->nlp_flag); 10632 10633 phba->fc_stat.elsRcvRPL++; 10634 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10635 if (newnode) 10636 lpfc_disc_state_machine(vport, ndlp, NULL, 10637 NLP_EVT_DEVICE_RM); 10638 break; 10639 case ELS_CMD_RNID: 10640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10641 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10642 did, vport->port_state, ndlp->nlp_flag); 10643 10644 phba->fc_stat.elsRcvRNID++; 10645 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10646 if (newnode) 10647 lpfc_disc_state_machine(vport, ndlp, NULL, 10648 NLP_EVT_DEVICE_RM); 10649 break; 10650 case ELS_CMD_RTV: 10651 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10652 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10653 did, vport->port_state, ndlp->nlp_flag); 10654 phba->fc_stat.elsRcvRTV++; 10655 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10656 if (newnode) 10657 lpfc_disc_state_machine(vport, ndlp, NULL, 10658 NLP_EVT_DEVICE_RM); 10659 break; 10660 case ELS_CMD_RRQ: 10661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10662 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10663 did, vport->port_state, ndlp->nlp_flag); 10664 10665 phba->fc_stat.elsRcvRRQ++; 10666 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10667 if (newnode) 10668 lpfc_disc_state_machine(vport, ndlp, NULL, 10669 NLP_EVT_DEVICE_RM); 10670 break; 10671 case ELS_CMD_ECHO: 10672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10673 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10674 did, vport->port_state, ndlp->nlp_flag); 10675 10676 phba->fc_stat.elsRcvECHO++; 10677 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10678 if (newnode) 10679 lpfc_disc_state_machine(vport, ndlp, NULL, 10680 NLP_EVT_DEVICE_RM); 10681 break; 10682 case ELS_CMD_REC: 10683 /* receive this due to exchange closed */ 10684 rjt_err = LSRJT_UNABLE_TPC; 10685 rjt_exp = LSEXP_INVALID_OX_RX; 10686 break; 10687 case ELS_CMD_FPIN: 10688 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10689 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10690 did, vport->port_state, ndlp->nlp_flag); 10691 10692 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10693 payload_len); 10694 10695 /* There are no replies, so no rjt codes */ 10696 break; 10697 case ELS_CMD_EDC: 10698 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10699 break; 10700 case ELS_CMD_RDF: 10701 phba->fc_stat.elsRcvRDF++; 10702 /* Accept RDF only from fabric controller */ 10703 if (did != Fabric_Cntl_DID) { 10704 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10705 "1115 Received RDF from invalid DID " 10706 "x%x\n", did); 10707 rjt_err = LSRJT_PROTOCOL_ERR; 10708 rjt_exp = LSEXP_NOTHING_MORE; 10709 goto lsrjt; 10710 } 10711 10712 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10713 break; 10714 default: 10715 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10716 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10717 cmd, did, vport->port_state); 10718 10719 /* Unsupported ELS command, reject */ 10720 rjt_err = LSRJT_CMD_UNSUPPORTED; 10721 rjt_exp = LSEXP_NOTHING_MORE; 10722 10723 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10724 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10725 "0115 Unknown ELS command x%x " 10726 "received from NPORT x%x\n", cmd, did); 10727 if (newnode) 10728 lpfc_disc_state_machine(vport, ndlp, NULL, 10729 NLP_EVT_DEVICE_RM); 10730 break; 10731 } 10732 10733 lsrjt: 10734 /* check if need to LS_RJT received ELS cmd */ 10735 if (rjt_err) { 10736 memset(&stat, 0, sizeof(stat)); 10737 stat.un.b.lsRjtRsnCode = rjt_err; 10738 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10739 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10740 NULL); 10741 /* Remove the reference from above for new nodes. */ 10742 if (newnode) 10743 lpfc_disc_state_machine(vport, ndlp, NULL, 10744 NLP_EVT_DEVICE_RM); 10745 } 10746 10747 /* Release the reference on this elsiocb, not the ndlp. */ 10748 lpfc_nlp_put(elsiocb->ndlp); 10749 elsiocb->ndlp = NULL; 10750 10751 /* Special case. Driver received an unsolicited command that 10752 * unsupportable given the driver's current state. Reset the 10753 * link and start over. 10754 */ 10755 if (init_link) { 10756 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10757 if (!mbox) 10758 return; 10759 lpfc_linkdown(phba); 10760 lpfc_init_link(phba, mbox, 10761 phba->cfg_topology, 10762 phba->cfg_link_speed); 10763 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10764 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10765 mbox->vport = vport; 10766 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10767 MBX_NOT_FINISHED) 10768 mempool_free(mbox, phba->mbox_mem_pool); 10769 } 10770 10771 return; 10772 10773 dropit: 10774 if (vport && !(vport->load_flag & FC_UNLOADING)) 10775 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10776 "0111 Dropping received ELS cmd " 10777 "Data: x%x x%x x%x x%x\n", 10778 cmd, status, get_job_word4(phba, elsiocb), did); 10779 10780 phba->fc_stat.elsRcvDrop++; 10781 } 10782 10783 /** 10784 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10785 * @phba: pointer to lpfc hba data structure. 10786 * @pring: pointer to a SLI ring. 10787 * @elsiocb: pointer to lpfc els iocb data structure. 10788 * 10789 * This routine is used to process an unsolicited event received from a SLI 10790 * (Service Level Interface) ring. The actual processing of the data buffer 10791 * associated with the unsolicited event is done by invoking the routine 10792 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10793 * SLI ring on which the unsolicited event was received. 10794 **/ 10795 void 10796 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10797 struct lpfc_iocbq *elsiocb) 10798 { 10799 struct lpfc_vport *vport = elsiocb->vport; 10800 u32 ulp_command, status, parameter, bde_count = 0; 10801 IOCB_t *icmd; 10802 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10803 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10804 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10805 dma_addr_t paddr; 10806 10807 elsiocb->cmd_dmabuf = NULL; 10808 elsiocb->rsp_dmabuf = NULL; 10809 elsiocb->bpl_dmabuf = NULL; 10810 10811 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10812 ulp_command = get_job_cmnd(phba, elsiocb); 10813 status = get_job_ulpstatus(phba, elsiocb); 10814 parameter = get_job_word4(phba, elsiocb); 10815 if (phba->sli_rev == LPFC_SLI_REV4) 10816 bde_count = wcqe_cmpl->word3; 10817 else 10818 bde_count = elsiocb->iocb.ulpBdeCount; 10819 10820 if (status == IOSTAT_NEED_BUFFER) { 10821 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10822 } else if (status == IOSTAT_LOCAL_REJECT && 10823 (parameter & IOERR_PARAM_MASK) == 10824 IOERR_RCV_BUFFER_WAITING) { 10825 phba->fc_stat.NoRcvBuf++; 10826 /* Not enough posted buffers; Try posting more buffers */ 10827 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10828 lpfc_sli3_post_buffer(phba, pring, 0); 10829 return; 10830 } 10831 10832 if (phba->sli_rev == LPFC_SLI_REV3) { 10833 icmd = &elsiocb->iocb; 10834 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10835 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10836 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10837 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10838 vport = phba->pport; 10839 else 10840 vport = lpfc_find_vport_by_vpid(phba, 10841 icmd->unsli3.rcvsli3.vpi); 10842 } 10843 } 10844 10845 /* If there are no BDEs associated 10846 * with this IOCB, there is nothing to do. 10847 */ 10848 if (bde_count == 0) 10849 return; 10850 10851 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10852 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10853 elsiocb->cmd_dmabuf = bdeBuf1; 10854 if (bde_count == 2) 10855 elsiocb->bpl_dmabuf = bdeBuf2; 10856 } else { 10857 icmd = &elsiocb->iocb; 10858 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10859 icmd->un.cont64[0].addrLow); 10860 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10861 paddr); 10862 if (bde_count == 2) { 10863 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10864 icmd->un.cont64[1].addrLow); 10865 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10866 pring, 10867 paddr); 10868 } 10869 } 10870 10871 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10872 /* 10873 * The different unsolicited event handlers would tell us 10874 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10875 */ 10876 if (elsiocb->cmd_dmabuf) { 10877 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10878 elsiocb->cmd_dmabuf = NULL; 10879 } 10880 10881 if (elsiocb->bpl_dmabuf) { 10882 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10883 elsiocb->bpl_dmabuf = NULL; 10884 } 10885 10886 } 10887 10888 static void 10889 lpfc_start_fdmi(struct lpfc_vport *vport) 10890 { 10891 struct lpfc_nodelist *ndlp; 10892 10893 /* If this is the first time, allocate an ndlp and initialize 10894 * it. Otherwise, make sure the node is enabled and then do the 10895 * login. 10896 */ 10897 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10898 if (!ndlp) { 10899 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10900 if (ndlp) { 10901 ndlp->nlp_type |= NLP_FABRIC; 10902 } else { 10903 return; 10904 } 10905 } 10906 10907 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10908 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10909 } 10910 10911 /** 10912 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10913 * @phba: pointer to lpfc hba data structure. 10914 * @vport: pointer to a virtual N_Port data structure. 10915 * 10916 * This routine issues a Port Login (PLOGI) to the Name Server with 10917 * State Change Request (SCR) for a @vport. This routine will create an 10918 * ndlp for the Name Server associated to the @vport if such node does 10919 * not already exist. The PLOGI to Name Server is issued by invoking the 10920 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10921 * (FDMI) is configured to the @vport, a FDMI node will be created and 10922 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10923 **/ 10924 void 10925 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10926 { 10927 struct lpfc_nodelist *ndlp; 10928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10929 10930 /* 10931 * If lpfc_delay_discovery parameter is set and the clean address 10932 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10933 * discovery. 10934 */ 10935 spin_lock_irq(shost->host_lock); 10936 if (vport->fc_flag & FC_DISC_DELAYED) { 10937 spin_unlock_irq(shost->host_lock); 10938 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10939 "3334 Delay fc port discovery for %d secs\n", 10940 phba->fc_ratov); 10941 mod_timer(&vport->delayed_disc_tmo, 10942 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10943 return; 10944 } 10945 spin_unlock_irq(shost->host_lock); 10946 10947 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10948 if (!ndlp) { 10949 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10950 if (!ndlp) { 10951 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10952 lpfc_disc_start(vport); 10953 return; 10954 } 10955 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10956 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10957 "0251 NameServer login: no memory\n"); 10958 return; 10959 } 10960 } 10961 10962 ndlp->nlp_type |= NLP_FABRIC; 10963 10964 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10965 10966 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10967 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10968 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10969 "0252 Cannot issue NameServer login\n"); 10970 return; 10971 } 10972 10973 if ((phba->cfg_enable_SmartSAN || 10974 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10975 (vport->load_flag & FC_ALLOW_FDMI)) 10976 lpfc_start_fdmi(vport); 10977 } 10978 10979 /** 10980 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10981 * @phba: pointer to lpfc hba data structure. 10982 * @pmb: pointer to the driver internal queue element for mailbox command. 10983 * 10984 * This routine is the completion callback function to register new vport 10985 * mailbox command. If the new vport mailbox command completes successfully, 10986 * the fabric registration login shall be performed on physical port (the 10987 * new vport created is actually a physical port, with VPI 0) or the port 10988 * login to Name Server for State Change Request (SCR) will be performed 10989 * on virtual port (real virtual port, with VPI greater than 0). 10990 **/ 10991 static void 10992 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10993 { 10994 struct lpfc_vport *vport = pmb->vport; 10995 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10996 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10997 MAILBOX_t *mb = &pmb->u.mb; 10998 int rc; 10999 11000 spin_lock_irq(shost->host_lock); 11001 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11002 spin_unlock_irq(shost->host_lock); 11003 11004 if (mb->mbxStatus) { 11005 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11006 "0915 Register VPI failed : Status: x%x" 11007 " upd bit: x%x \n", mb->mbxStatus, 11008 mb->un.varRegVpi.upd); 11009 if (phba->sli_rev == LPFC_SLI_REV4 && 11010 mb->un.varRegVpi.upd) 11011 goto mbox_err_exit ; 11012 11013 switch (mb->mbxStatus) { 11014 case 0x11: /* unsupported feature */ 11015 case 0x9603: /* max_vpi exceeded */ 11016 case 0x9602: /* Link event since CLEAR_LA */ 11017 /* giving up on vport registration */ 11018 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11019 spin_lock_irq(shost->host_lock); 11020 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 11021 spin_unlock_irq(shost->host_lock); 11022 lpfc_can_disctmo(vport); 11023 break; 11024 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 11025 case 0x20: 11026 spin_lock_irq(shost->host_lock); 11027 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11028 spin_unlock_irq(shost->host_lock); 11029 lpfc_init_vpi(phba, pmb, vport->vpi); 11030 pmb->vport = vport; 11031 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11032 rc = lpfc_sli_issue_mbox(phba, pmb, 11033 MBX_NOWAIT); 11034 if (rc == MBX_NOT_FINISHED) { 11035 lpfc_printf_vlog(vport, KERN_ERR, 11036 LOG_TRACE_EVENT, 11037 "2732 Failed to issue INIT_VPI" 11038 " mailbox command\n"); 11039 } else { 11040 lpfc_nlp_put(ndlp); 11041 return; 11042 } 11043 fallthrough; 11044 default: 11045 /* Try to recover from this error */ 11046 if (phba->sli_rev == LPFC_SLI_REV4) 11047 lpfc_sli4_unreg_all_rpis(vport); 11048 lpfc_mbx_unreg_vpi(vport); 11049 spin_lock_irq(shost->host_lock); 11050 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11051 spin_unlock_irq(shost->host_lock); 11052 if (mb->mbxStatus == MBX_NOT_FINISHED) 11053 break; 11054 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11055 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 11056 if (phba->sli_rev == LPFC_SLI_REV4) 11057 lpfc_issue_init_vfi(vport); 11058 else 11059 lpfc_initial_flogi(vport); 11060 } else { 11061 lpfc_initial_fdisc(vport); 11062 } 11063 break; 11064 } 11065 } else { 11066 spin_lock_irq(shost->host_lock); 11067 vport->vpi_state |= LPFC_VPI_REGISTERED; 11068 spin_unlock_irq(shost->host_lock); 11069 if (vport == phba->pport) { 11070 if (phba->sli_rev < LPFC_SLI_REV4) 11071 lpfc_issue_fabric_reglogin(vport); 11072 else { 11073 /* 11074 * If the physical port is instantiated using 11075 * FDISC, do not start vport discovery. 11076 */ 11077 if (vport->port_state != LPFC_FDISC) 11078 lpfc_start_fdiscs(phba); 11079 lpfc_do_scr_ns_plogi(phba, vport); 11080 } 11081 } else { 11082 lpfc_do_scr_ns_plogi(phba, vport); 11083 } 11084 } 11085 mbox_err_exit: 11086 /* Now, we decrement the ndlp reference count held for this 11087 * callback function 11088 */ 11089 lpfc_nlp_put(ndlp); 11090 11091 mempool_free(pmb, phba->mbox_mem_pool); 11092 return; 11093 } 11094 11095 /** 11096 * lpfc_register_new_vport - Register a new vport with a HBA 11097 * @phba: pointer to lpfc hba data structure. 11098 * @vport: pointer to a host virtual N_Port data structure. 11099 * @ndlp: pointer to a node-list data structure. 11100 * 11101 * This routine registers the @vport as a new virtual port with a HBA. 11102 * It is done through a registering vpi mailbox command. 11103 **/ 11104 void 11105 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11106 struct lpfc_nodelist *ndlp) 11107 { 11108 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11109 LPFC_MBOXQ_t *mbox; 11110 11111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11112 if (mbox) { 11113 lpfc_reg_vpi(vport, mbox); 11114 mbox->vport = vport; 11115 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11116 if (!mbox->ctx_ndlp) { 11117 mempool_free(mbox, phba->mbox_mem_pool); 11118 goto mbox_err_exit; 11119 } 11120 11121 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11122 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11123 == MBX_NOT_FINISHED) { 11124 /* mailbox command not success, decrement ndlp 11125 * reference count for this command 11126 */ 11127 lpfc_nlp_put(ndlp); 11128 mempool_free(mbox, phba->mbox_mem_pool); 11129 11130 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11131 "0253 Register VPI: Can't send mbox\n"); 11132 goto mbox_err_exit; 11133 } 11134 } else { 11135 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11136 "0254 Register VPI: no memory\n"); 11137 goto mbox_err_exit; 11138 } 11139 return; 11140 11141 mbox_err_exit: 11142 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11143 spin_lock_irq(shost->host_lock); 11144 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11145 spin_unlock_irq(shost->host_lock); 11146 return; 11147 } 11148 11149 /** 11150 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11151 * @phba: pointer to lpfc hba data structure. 11152 * 11153 * This routine cancels the retry delay timers to all the vports. 11154 **/ 11155 void 11156 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11157 { 11158 struct lpfc_vport **vports; 11159 struct lpfc_nodelist *ndlp; 11160 uint32_t link_state; 11161 int i; 11162 11163 /* Treat this failure as linkdown for all vports */ 11164 link_state = phba->link_state; 11165 lpfc_linkdown(phba); 11166 phba->link_state = link_state; 11167 11168 vports = lpfc_create_vport_work_array(phba); 11169 11170 if (vports) { 11171 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11172 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11173 if (ndlp) 11174 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11175 lpfc_els_flush_cmd(vports[i]); 11176 } 11177 lpfc_destroy_vport_work_array(phba, vports); 11178 } 11179 } 11180 11181 /** 11182 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11183 * @phba: pointer to lpfc hba data structure. 11184 * 11185 * This routine abort all pending discovery commands and 11186 * start a timer to retry FLOGI for the physical port 11187 * discovery. 11188 **/ 11189 void 11190 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11191 { 11192 struct lpfc_nodelist *ndlp; 11193 11194 /* Cancel the all vports retry delay retry timers */ 11195 lpfc_cancel_all_vport_retry_delay_timer(phba); 11196 11197 /* If fabric require FLOGI, then re-instantiate physical login */ 11198 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11199 if (!ndlp) 11200 return; 11201 11202 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11203 spin_lock_irq(&ndlp->lock); 11204 ndlp->nlp_flag |= NLP_DELAY_TMO; 11205 spin_unlock_irq(&ndlp->lock); 11206 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11207 phba->pport->port_state = LPFC_FLOGI; 11208 return; 11209 } 11210 11211 /** 11212 * lpfc_fabric_login_reqd - Check if FLOGI required. 11213 * @phba: pointer to lpfc hba data structure. 11214 * @cmdiocb: pointer to FDISC command iocb. 11215 * @rspiocb: pointer to FDISC response iocb. 11216 * 11217 * This routine checks if a FLOGI is reguired for FDISC 11218 * to succeed. 11219 **/ 11220 static int 11221 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11222 struct lpfc_iocbq *cmdiocb, 11223 struct lpfc_iocbq *rspiocb) 11224 { 11225 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11226 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11227 11228 if (ulp_status != IOSTAT_FABRIC_RJT || 11229 ulp_word4 != RJT_LOGIN_REQUIRED) 11230 return 0; 11231 else 11232 return 1; 11233 } 11234 11235 /** 11236 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11237 * @phba: pointer to lpfc hba data structure. 11238 * @cmdiocb: pointer to lpfc command iocb data structure. 11239 * @rspiocb: pointer to lpfc response iocb data structure. 11240 * 11241 * This routine is the completion callback function to a Fabric Discover 11242 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11243 * single threaded, each FDISC completion callback function will reset 11244 * the discovery timer for all vports such that the timers will not get 11245 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11246 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11247 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11248 * assigned to the vport has been changed with the completion of the FDISC 11249 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11250 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11251 * routine is invoked to register new vport with the HBA. Otherwise, the 11252 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11253 * Server for State Change Request (SCR). 11254 **/ 11255 static void 11256 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11257 struct lpfc_iocbq *rspiocb) 11258 { 11259 struct lpfc_vport *vport = cmdiocb->vport; 11260 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11261 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11262 struct lpfc_nodelist *np; 11263 struct lpfc_nodelist *next_np; 11264 struct lpfc_iocbq *piocb; 11265 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11266 struct serv_parm *sp; 11267 uint8_t fabric_param_changed; 11268 u32 ulp_status, ulp_word4; 11269 11270 ulp_status = get_job_ulpstatus(phba, rspiocb); 11271 ulp_word4 = get_job_word4(phba, rspiocb); 11272 11273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11274 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11275 ulp_status, ulp_word4, 11276 vport->fc_prevDID); 11277 /* Since all FDISCs are being single threaded, we 11278 * must reset the discovery timer for ALL vports 11279 * waiting to send FDISC when one completes. 11280 */ 11281 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11282 lpfc_set_disctmo(piocb->vport); 11283 } 11284 11285 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11286 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11287 ulp_status, ulp_word4, vport->fc_prevDID); 11288 11289 if (ulp_status) { 11290 11291 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11292 lpfc_retry_pport_discovery(phba); 11293 goto out; 11294 } 11295 11296 /* Check for retry */ 11297 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11298 goto out; 11299 /* FDISC failed */ 11300 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11301 "0126 FDISC failed. (x%x/x%x)\n", 11302 ulp_status, ulp_word4); 11303 goto fdisc_failed; 11304 } 11305 11306 lpfc_check_nlp_post_devloss(vport, ndlp); 11307 11308 spin_lock_irq(shost->host_lock); 11309 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11310 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11311 vport->fc_flag |= FC_FABRIC; 11312 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11313 vport->fc_flag |= FC_PUBLIC_LOOP; 11314 spin_unlock_irq(shost->host_lock); 11315 11316 vport->fc_myDID = ulp_word4 & Mask_DID; 11317 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11318 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11319 if (!prsp) 11320 goto out; 11321 sp = prsp->virt + sizeof(uint32_t); 11322 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11323 memcpy(&vport->fabric_portname, &sp->portName, 11324 sizeof(struct lpfc_name)); 11325 memcpy(&vport->fabric_nodename, &sp->nodeName, 11326 sizeof(struct lpfc_name)); 11327 if (fabric_param_changed && 11328 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11329 /* If our NportID changed, we need to ensure all 11330 * remaining NPORTs get unreg_login'ed so we can 11331 * issue unreg_vpi. 11332 */ 11333 list_for_each_entry_safe(np, next_np, 11334 &vport->fc_nodes, nlp_listp) { 11335 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11336 !(np->nlp_flag & NLP_NPR_ADISC)) 11337 continue; 11338 spin_lock_irq(&ndlp->lock); 11339 np->nlp_flag &= ~NLP_NPR_ADISC; 11340 spin_unlock_irq(&ndlp->lock); 11341 lpfc_unreg_rpi(vport, np); 11342 } 11343 lpfc_cleanup_pending_mbox(vport); 11344 11345 if (phba->sli_rev == LPFC_SLI_REV4) 11346 lpfc_sli4_unreg_all_rpis(vport); 11347 11348 lpfc_mbx_unreg_vpi(vport); 11349 spin_lock_irq(shost->host_lock); 11350 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11351 if (phba->sli_rev == LPFC_SLI_REV4) 11352 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11353 else 11354 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11355 spin_unlock_irq(shost->host_lock); 11356 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11357 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11358 /* 11359 * Driver needs to re-reg VPI in order for f/w 11360 * to update the MAC address. 11361 */ 11362 lpfc_register_new_vport(phba, vport, ndlp); 11363 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11364 goto out; 11365 } 11366 11367 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11368 lpfc_issue_init_vpi(vport); 11369 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11370 lpfc_register_new_vport(phba, vport, ndlp); 11371 else 11372 lpfc_do_scr_ns_plogi(phba, vport); 11373 11374 /* The FDISC completed successfully. Move the fabric ndlp to 11375 * UNMAPPED state and register with the transport. 11376 */ 11377 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11378 goto out; 11379 11380 fdisc_failed: 11381 if (vport->fc_vport && 11382 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11383 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11384 /* Cancel discovery timer */ 11385 lpfc_can_disctmo(vport); 11386 out: 11387 lpfc_els_free_iocb(phba, cmdiocb); 11388 lpfc_nlp_put(ndlp); 11389 } 11390 11391 /** 11392 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11393 * @vport: pointer to a virtual N_Port data structure. 11394 * @ndlp: pointer to a node-list data structure. 11395 * @retry: number of retries to the command IOCB. 11396 * 11397 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11398 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11399 * routine to issue the IOCB, which makes sure only one outstanding fabric 11400 * IOCB will be sent off HBA at any given time. 11401 * 11402 * Note that the ndlp reference count will be incremented by 1 for holding the 11403 * ndlp and the reference to ndlp will be stored into the ndlp field of 11404 * the IOCB for the completion callback function to the FDISC ELS command. 11405 * 11406 * Return code 11407 * 0 - Successfully issued fdisc iocb command 11408 * 1 - Failed to issue fdisc iocb command 11409 **/ 11410 static int 11411 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11412 uint8_t retry) 11413 { 11414 struct lpfc_hba *phba = vport->phba; 11415 IOCB_t *icmd; 11416 union lpfc_wqe128 *wqe = NULL; 11417 struct lpfc_iocbq *elsiocb; 11418 struct serv_parm *sp; 11419 uint8_t *pcmd; 11420 uint16_t cmdsize; 11421 int did = ndlp->nlp_DID; 11422 int rc; 11423 11424 vport->port_state = LPFC_FDISC; 11425 vport->fc_myDID = 0; 11426 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11427 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11428 ELS_CMD_FDISC); 11429 if (!elsiocb) { 11430 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11431 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11432 "0255 Issue FDISC: no IOCB\n"); 11433 return 1; 11434 } 11435 11436 if (phba->sli_rev == LPFC_SLI_REV4) { 11437 wqe = &elsiocb->wqe; 11438 bf_set(els_req64_sid, &wqe->els_req, 0); 11439 bf_set(els_req64_sp, &wqe->els_req, 1); 11440 } else { 11441 icmd = &elsiocb->iocb; 11442 icmd->un.elsreq64.myID = 0; 11443 icmd->un.elsreq64.fl = 1; 11444 icmd->ulpCt_h = 1; 11445 icmd->ulpCt_l = 0; 11446 } 11447 11448 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11449 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11450 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11451 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11452 sp = (struct serv_parm *) pcmd; 11453 /* Setup CSPs accordingly for Fabric */ 11454 sp->cmn.e_d_tov = 0; 11455 sp->cmn.w2.r_a_tov = 0; 11456 sp->cmn.virtual_fabric_support = 0; 11457 sp->cls1.classValid = 0; 11458 sp->cls2.seqDelivery = 1; 11459 sp->cls3.seqDelivery = 1; 11460 11461 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11462 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11463 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11464 pcmd += sizeof(uint32_t); /* Port Name */ 11465 memcpy(pcmd, &vport->fc_portname, 8); 11466 pcmd += sizeof(uint32_t); /* Node Name */ 11467 pcmd += sizeof(uint32_t); /* Node Name */ 11468 memcpy(pcmd, &vport->fc_nodename, 8); 11469 sp->cmn.valid_vendor_ver_level = 0; 11470 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11471 lpfc_set_disctmo(vport); 11472 11473 phba->fc_stat.elsXmitFDISC++; 11474 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11475 11476 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11477 "Issue FDISC: did:x%x", 11478 did, 0, 0); 11479 11480 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11481 if (!elsiocb->ndlp) 11482 goto err_out; 11483 11484 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11485 if (rc == IOCB_ERROR) { 11486 lpfc_nlp_put(ndlp); 11487 goto err_out; 11488 } 11489 11490 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11491 return 0; 11492 11493 err_out: 11494 lpfc_els_free_iocb(phba, elsiocb); 11495 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11496 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11497 "0256 Issue FDISC: Cannot send IOCB\n"); 11498 return 1; 11499 } 11500 11501 /** 11502 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11503 * @phba: pointer to lpfc hba data structure. 11504 * @cmdiocb: pointer to lpfc command iocb data structure. 11505 * @rspiocb: pointer to lpfc response iocb data structure. 11506 * 11507 * This routine is the completion callback function to the issuing of a LOGO 11508 * ELS command off a vport. It frees the command IOCB and then decrement the 11509 * reference count held on ndlp for this completion function, indicating that 11510 * the reference to the ndlp is no long needed. Note that the 11511 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11512 * callback function and an additional explicit ndlp reference decrementation 11513 * will trigger the actual release of the ndlp. 11514 **/ 11515 static void 11516 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11517 struct lpfc_iocbq *rspiocb) 11518 { 11519 struct lpfc_vport *vport = cmdiocb->vport; 11520 IOCB_t *irsp; 11521 struct lpfc_nodelist *ndlp; 11522 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11523 u32 ulp_status, ulp_word4, did, tmo; 11524 11525 ndlp = cmdiocb->ndlp; 11526 11527 ulp_status = get_job_ulpstatus(phba, rspiocb); 11528 ulp_word4 = get_job_word4(phba, rspiocb); 11529 11530 if (phba->sli_rev == LPFC_SLI_REV4) { 11531 did = get_job_els_rsp64_did(phba, cmdiocb); 11532 tmo = get_wqe_tmo(cmdiocb); 11533 } else { 11534 irsp = &rspiocb->iocb; 11535 did = get_job_els_rsp64_did(phba, rspiocb); 11536 tmo = irsp->ulpTimeout; 11537 } 11538 11539 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11540 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11541 ulp_status, ulp_word4, did); 11542 11543 /* NPIV LOGO completes to NPort <nlp_DID> */ 11544 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11545 "2928 NPIV LOGO completes to NPort x%x " 11546 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11547 ndlp->nlp_DID, ulp_status, ulp_word4, 11548 tmo, vport->num_disc_nodes, 11549 kref_read(&ndlp->kref), ndlp->nlp_flag, 11550 ndlp->fc4_xpt_flags); 11551 11552 if (ulp_status == IOSTAT_SUCCESS) { 11553 spin_lock_irq(shost->host_lock); 11554 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11555 vport->fc_flag &= ~FC_FABRIC; 11556 spin_unlock_irq(shost->host_lock); 11557 lpfc_can_disctmo(vport); 11558 } 11559 11560 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11561 /* Wake up lpfc_vport_delete if waiting...*/ 11562 if (ndlp->logo_waitq) 11563 wake_up(ndlp->logo_waitq); 11564 spin_lock_irq(&ndlp->lock); 11565 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11566 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11567 spin_unlock_irq(&ndlp->lock); 11568 } 11569 11570 /* Safe to release resources now. */ 11571 lpfc_els_free_iocb(phba, cmdiocb); 11572 lpfc_nlp_put(ndlp); 11573 } 11574 11575 /** 11576 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11577 * @vport: pointer to a virtual N_Port data structure. 11578 * @ndlp: pointer to a node-list data structure. 11579 * 11580 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11581 * 11582 * Note that the ndlp reference count will be incremented by 1 for holding the 11583 * ndlp and the reference to ndlp will be stored into the ndlp field of 11584 * the IOCB for the completion callback function to the LOGO ELS command. 11585 * 11586 * Return codes 11587 * 0 - Successfully issued logo off the @vport 11588 * 1 - Failed to issue logo off the @vport 11589 **/ 11590 int 11591 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11592 { 11593 int rc = 0; 11594 struct lpfc_hba *phba = vport->phba; 11595 struct lpfc_iocbq *elsiocb; 11596 uint8_t *pcmd; 11597 uint16_t cmdsize; 11598 11599 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11600 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11601 ELS_CMD_LOGO); 11602 if (!elsiocb) 11603 return 1; 11604 11605 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11606 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11607 pcmd += sizeof(uint32_t); 11608 11609 /* Fill in LOGO payload */ 11610 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11611 pcmd += sizeof(uint32_t); 11612 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11613 11614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11615 "Issue LOGO npiv did:x%x flg:x%x", 11616 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11617 11618 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11619 spin_lock_irq(&ndlp->lock); 11620 ndlp->nlp_flag |= NLP_LOGO_SND; 11621 spin_unlock_irq(&ndlp->lock); 11622 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11623 if (!elsiocb->ndlp) { 11624 lpfc_els_free_iocb(phba, elsiocb); 11625 goto err; 11626 } 11627 11628 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11629 if (rc == IOCB_ERROR) { 11630 lpfc_els_free_iocb(phba, elsiocb); 11631 lpfc_nlp_put(ndlp); 11632 goto err; 11633 } 11634 return 0; 11635 11636 err: 11637 spin_lock_irq(&ndlp->lock); 11638 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11639 spin_unlock_irq(&ndlp->lock); 11640 return 1; 11641 } 11642 11643 /** 11644 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11645 * @t: timer context used to obtain the lpfc hba. 11646 * 11647 * This routine is invoked by the fabric iocb block timer after 11648 * timeout. It posts the fabric iocb block timeout event by setting the 11649 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11650 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11651 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11652 * posted event WORKER_FABRIC_BLOCK_TMO. 11653 **/ 11654 void 11655 lpfc_fabric_block_timeout(struct timer_list *t) 11656 { 11657 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11658 unsigned long iflags; 11659 uint32_t tmo_posted; 11660 11661 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11662 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11663 if (!tmo_posted) 11664 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11665 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11666 11667 if (!tmo_posted) 11668 lpfc_worker_wake_up(phba); 11669 return; 11670 } 11671 11672 /** 11673 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11674 * @phba: pointer to lpfc hba data structure. 11675 * 11676 * This routine issues one fabric iocb from the driver internal list to 11677 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11678 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11679 * remove one pending fabric iocb from the driver internal list and invokes 11680 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11681 **/ 11682 static void 11683 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11684 { 11685 struct lpfc_iocbq *iocb; 11686 unsigned long iflags; 11687 int ret; 11688 11689 repeat: 11690 iocb = NULL; 11691 spin_lock_irqsave(&phba->hbalock, iflags); 11692 /* Post any pending iocb to the SLI layer */ 11693 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11694 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11695 list); 11696 if (iocb) 11697 /* Increment fabric iocb count to hold the position */ 11698 atomic_inc(&phba->fabric_iocb_count); 11699 } 11700 spin_unlock_irqrestore(&phba->hbalock, iflags); 11701 if (iocb) { 11702 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11703 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11704 iocb->cmd_flag |= LPFC_IO_FABRIC; 11705 11706 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11707 "Fabric sched1: ste:x%x", 11708 iocb->vport->port_state, 0, 0); 11709 11710 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11711 11712 if (ret == IOCB_ERROR) { 11713 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11714 iocb->fabric_cmd_cmpl = NULL; 11715 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11716 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11717 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11718 iocb->cmd_cmpl(phba, iocb, iocb); 11719 11720 atomic_dec(&phba->fabric_iocb_count); 11721 goto repeat; 11722 } 11723 } 11724 } 11725 11726 /** 11727 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11728 * @phba: pointer to lpfc hba data structure. 11729 * 11730 * This routine unblocks the issuing fabric iocb command. The function 11731 * will clear the fabric iocb block bit and then invoke the routine 11732 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11733 * from the driver internal fabric iocb list. 11734 **/ 11735 void 11736 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11737 { 11738 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11739 11740 lpfc_resume_fabric_iocbs(phba); 11741 return; 11742 } 11743 11744 /** 11745 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11746 * @phba: pointer to lpfc hba data structure. 11747 * 11748 * This routine blocks the issuing fabric iocb for a specified amount of 11749 * time (currently 100 ms). This is done by set the fabric iocb block bit 11750 * and set up a timeout timer for 100ms. When the block bit is set, no more 11751 * fabric iocb will be issued out of the HBA. 11752 **/ 11753 static void 11754 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11755 { 11756 int blocked; 11757 11758 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11759 /* Start a timer to unblock fabric iocbs after 100ms */ 11760 if (!blocked) 11761 mod_timer(&phba->fabric_block_timer, 11762 jiffies + msecs_to_jiffies(100)); 11763 11764 return; 11765 } 11766 11767 /** 11768 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11769 * @phba: pointer to lpfc hba data structure. 11770 * @cmdiocb: pointer to lpfc command iocb data structure. 11771 * @rspiocb: pointer to lpfc response iocb data structure. 11772 * 11773 * This routine is the callback function that is put to the fabric iocb's 11774 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11775 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11776 * function first restores and invokes the original iocb's callback function 11777 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11778 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11779 **/ 11780 static void 11781 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11782 struct lpfc_iocbq *rspiocb) 11783 { 11784 struct ls_rjt stat; 11785 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11786 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11787 11788 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11789 11790 switch (ulp_status) { 11791 case IOSTAT_NPORT_RJT: 11792 case IOSTAT_FABRIC_RJT: 11793 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11794 lpfc_block_fabric_iocbs(phba); 11795 break; 11796 11797 case IOSTAT_NPORT_BSY: 11798 case IOSTAT_FABRIC_BSY: 11799 lpfc_block_fabric_iocbs(phba); 11800 break; 11801 11802 case IOSTAT_LS_RJT: 11803 stat.un.ls_rjt_error_be = 11804 cpu_to_be32(ulp_word4); 11805 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11806 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11807 lpfc_block_fabric_iocbs(phba); 11808 break; 11809 } 11810 11811 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11812 11813 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11814 cmdiocb->fabric_cmd_cmpl = NULL; 11815 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11816 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11817 11818 atomic_dec(&phba->fabric_iocb_count); 11819 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11820 /* Post any pending iocbs to HBA */ 11821 lpfc_resume_fabric_iocbs(phba); 11822 } 11823 } 11824 11825 /** 11826 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11827 * @phba: pointer to lpfc hba data structure. 11828 * @iocb: pointer to lpfc command iocb data structure. 11829 * 11830 * This routine is used as the top-level API for issuing a fabric iocb command 11831 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11832 * function makes sure that only one fabric bound iocb will be outstanding at 11833 * any given time. As such, this function will first check to see whether there 11834 * is already an outstanding fabric iocb on the wire. If so, it will put the 11835 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11836 * issued later. Otherwise, it will issue the iocb on the wire and update the 11837 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11838 * 11839 * Note, this implementation has a potential sending out fabric IOCBs out of 11840 * order. The problem is caused by the construction of the "ready" boolen does 11841 * not include the condition that the internal fabric IOCB list is empty. As 11842 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11843 * ahead of the fabric IOCBs in the internal list. 11844 * 11845 * Return code 11846 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11847 * IOCB_ERROR - failed to issue fabric iocb 11848 **/ 11849 static int 11850 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11851 { 11852 unsigned long iflags; 11853 int ready; 11854 int ret; 11855 11856 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11857 11858 spin_lock_irqsave(&phba->hbalock, iflags); 11859 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11860 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11861 11862 if (ready) 11863 /* Increment fabric iocb count to hold the position */ 11864 atomic_inc(&phba->fabric_iocb_count); 11865 spin_unlock_irqrestore(&phba->hbalock, iflags); 11866 if (ready) { 11867 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11868 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11869 iocb->cmd_flag |= LPFC_IO_FABRIC; 11870 11871 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11872 "Fabric sched2: ste:x%x", 11873 iocb->vport->port_state, 0, 0); 11874 11875 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11876 11877 if (ret == IOCB_ERROR) { 11878 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11879 iocb->fabric_cmd_cmpl = NULL; 11880 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11881 atomic_dec(&phba->fabric_iocb_count); 11882 } 11883 } else { 11884 spin_lock_irqsave(&phba->hbalock, iflags); 11885 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11886 spin_unlock_irqrestore(&phba->hbalock, iflags); 11887 ret = IOCB_SUCCESS; 11888 } 11889 return ret; 11890 } 11891 11892 /** 11893 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11894 * @vport: pointer to a virtual N_Port data structure. 11895 * 11896 * This routine aborts all the IOCBs associated with a @vport from the 11897 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11898 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11899 * list, removes each IOCB associated with the @vport off the list, set the 11900 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11901 * associated with the IOCB. 11902 **/ 11903 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11904 { 11905 LIST_HEAD(completions); 11906 struct lpfc_hba *phba = vport->phba; 11907 struct lpfc_iocbq *tmp_iocb, *piocb; 11908 11909 spin_lock_irq(&phba->hbalock); 11910 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11911 list) { 11912 11913 if (piocb->vport != vport) 11914 continue; 11915 11916 list_move_tail(&piocb->list, &completions); 11917 } 11918 spin_unlock_irq(&phba->hbalock); 11919 11920 /* Cancel all the IOCBs from the completions list */ 11921 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11922 IOERR_SLI_ABORTED); 11923 } 11924 11925 /** 11926 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11927 * @ndlp: pointer to a node-list data structure. 11928 * 11929 * This routine aborts all the IOCBs associated with an @ndlp from the 11930 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11931 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11932 * list, removes each IOCB associated with the @ndlp off the list, set the 11933 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11934 * associated with the IOCB. 11935 **/ 11936 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11937 { 11938 LIST_HEAD(completions); 11939 struct lpfc_hba *phba = ndlp->phba; 11940 struct lpfc_iocbq *tmp_iocb, *piocb; 11941 struct lpfc_sli_ring *pring; 11942 11943 pring = lpfc_phba_elsring(phba); 11944 11945 if (unlikely(!pring)) 11946 return; 11947 11948 spin_lock_irq(&phba->hbalock); 11949 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11950 list) { 11951 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11952 11953 list_move_tail(&piocb->list, &completions); 11954 } 11955 } 11956 spin_unlock_irq(&phba->hbalock); 11957 11958 /* Cancel all the IOCBs from the completions list */ 11959 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11960 IOERR_SLI_ABORTED); 11961 } 11962 11963 /** 11964 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11965 * @phba: pointer to lpfc hba data structure. 11966 * 11967 * This routine aborts all the IOCBs currently on the driver internal 11968 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11969 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11970 * list, removes IOCBs off the list, set the status field to 11971 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11972 * the IOCB. 11973 **/ 11974 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11975 { 11976 LIST_HEAD(completions); 11977 11978 spin_lock_irq(&phba->hbalock); 11979 list_splice_init(&phba->fabric_iocb_list, &completions); 11980 spin_unlock_irq(&phba->hbalock); 11981 11982 /* Cancel all the IOCBs from the completions list */ 11983 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11984 IOERR_SLI_ABORTED); 11985 } 11986 11987 /** 11988 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11989 * @vport: pointer to lpfc vport data structure. 11990 * 11991 * This routine is invoked by the vport cleanup for deletions and the cleanup 11992 * for an ndlp on removal. 11993 **/ 11994 void 11995 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11996 { 11997 struct lpfc_hba *phba = vport->phba; 11998 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11999 struct lpfc_nodelist *ndlp = NULL; 12000 unsigned long iflag = 0; 12001 12002 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12003 list_for_each_entry_safe(sglq_entry, sglq_next, 12004 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12005 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 12006 lpfc_nlp_put(sglq_entry->ndlp); 12007 ndlp = sglq_entry->ndlp; 12008 sglq_entry->ndlp = NULL; 12009 12010 /* If the xri on the abts_els_sgl list is for the Fport 12011 * node and the vport is unloading, the xri aborted wcqe 12012 * likely isn't coming back. Just release the sgl. 12013 */ 12014 if ((vport->load_flag & FC_UNLOADING) && 12015 ndlp->nlp_DID == Fabric_DID) { 12016 list_del(&sglq_entry->list); 12017 sglq_entry->state = SGL_FREED; 12018 list_add_tail(&sglq_entry->list, 12019 &phba->sli4_hba.lpfc_els_sgl_list); 12020 } 12021 } 12022 } 12023 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12024 return; 12025 } 12026 12027 /** 12028 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 12029 * @phba: pointer to lpfc hba data structure. 12030 * @axri: pointer to the els xri abort wcqe structure. 12031 * 12032 * This routine is invoked by the worker thread to process a SLI4 slow-path 12033 * ELS aborted xri. 12034 **/ 12035 void 12036 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12037 struct sli4_wcqe_xri_aborted *axri) 12038 { 12039 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12040 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12041 uint16_t lxri = 0; 12042 12043 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12044 unsigned long iflag = 0; 12045 struct lpfc_nodelist *ndlp; 12046 struct lpfc_sli_ring *pring; 12047 12048 pring = lpfc_phba_elsring(phba); 12049 12050 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12051 list_for_each_entry_safe(sglq_entry, sglq_next, 12052 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12053 if (sglq_entry->sli4_xritag == xri) { 12054 list_del(&sglq_entry->list); 12055 ndlp = sglq_entry->ndlp; 12056 sglq_entry->ndlp = NULL; 12057 list_add_tail(&sglq_entry->list, 12058 &phba->sli4_hba.lpfc_els_sgl_list); 12059 sglq_entry->state = SGL_FREED; 12060 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12061 iflag); 12062 12063 if (ndlp) { 12064 lpfc_set_rrq_active(phba, ndlp, 12065 sglq_entry->sli4_lxritag, 12066 rxid, 1); 12067 lpfc_nlp_put(ndlp); 12068 } 12069 12070 /* Check if TXQ queue needs to be serviced */ 12071 if (pring && !list_empty(&pring->txq)) 12072 lpfc_worker_wake_up(phba); 12073 return; 12074 } 12075 } 12076 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12077 lxri = lpfc_sli4_xri_inrange(phba, xri); 12078 if (lxri == NO_XRI) 12079 return; 12080 12081 spin_lock_irqsave(&phba->hbalock, iflag); 12082 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12083 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12084 spin_unlock_irqrestore(&phba->hbalock, iflag); 12085 return; 12086 } 12087 sglq_entry->state = SGL_XRI_ABORTED; 12088 spin_unlock_irqrestore(&phba->hbalock, iflag); 12089 return; 12090 } 12091 12092 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12093 * @vport: pointer to virtual port object. 12094 * @ndlp: nodelist pointer for the impacted node. 12095 * 12096 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12097 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12098 * the driver is required to send a LOGO to the remote node before it 12099 * attempts to recover its login to the remote node. 12100 */ 12101 void 12102 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12103 struct lpfc_nodelist *ndlp) 12104 { 12105 struct Scsi_Host *shost; 12106 struct lpfc_hba *phba; 12107 unsigned long flags = 0; 12108 12109 shost = lpfc_shost_from_vport(vport); 12110 phba = vport->phba; 12111 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12112 lpfc_printf_log(phba, KERN_INFO, 12113 LOG_SLI, "3093 No rport recovery needed. " 12114 "rport in state 0x%x\n", ndlp->nlp_state); 12115 return; 12116 } 12117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12118 "3094 Start rport recovery on shost id 0x%x " 12119 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12120 "flags 0x%x\n", 12121 shost->host_no, ndlp->nlp_DID, 12122 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12123 ndlp->nlp_flag); 12124 /* 12125 * The rport is not responding. Remove the FCP-2 flag to prevent 12126 * an ADISC in the follow-up recovery code. 12127 */ 12128 spin_lock_irqsave(&ndlp->lock, flags); 12129 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12130 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12131 spin_unlock_irqrestore(&ndlp->lock, flags); 12132 lpfc_unreg_rpi(vport, ndlp); 12133 } 12134 12135 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12136 { 12137 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12138 } 12139 12140 static void 12141 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12142 { 12143 u32 i; 12144 12145 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12146 return; 12147 12148 for (i = min; i <= max; i++) 12149 set_bit(i, vport->vmid_priority_range); 12150 } 12151 12152 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12153 { 12154 set_bit(ctcl_vmid, vport->vmid_priority_range); 12155 } 12156 12157 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12158 { 12159 u32 i; 12160 12161 i = find_first_bit(vport->vmid_priority_range, 12162 LPFC_VMID_MAX_PRIORITY_RANGE); 12163 12164 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12165 return 0; 12166 12167 clear_bit(i, vport->vmid_priority_range); 12168 return i; 12169 } 12170 12171 #define MAX_PRIORITY_DESC 255 12172 12173 static void 12174 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12175 struct lpfc_iocbq *rspiocb) 12176 { 12177 struct lpfc_vport *vport = cmdiocb->vport; 12178 struct priority_range_desc *desc; 12179 struct lpfc_dmabuf *prsp = NULL; 12180 struct lpfc_vmid_priority_range *vmid_range = NULL; 12181 u32 *data; 12182 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12183 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12184 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12185 u8 *pcmd, max_desc; 12186 u32 len, i; 12187 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12188 12189 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12190 if (!prsp) 12191 goto out; 12192 12193 pcmd = prsp->virt; 12194 data = (u32 *)pcmd; 12195 if (data[0] == ELS_CMD_LS_RJT) { 12196 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12197 "3277 QFPA LS_RJT x%x x%x\n", 12198 data[0], data[1]); 12199 goto out; 12200 } 12201 if (ulp_status) { 12202 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12203 "6529 QFPA failed with status x%x x%x\n", 12204 ulp_status, ulp_word4); 12205 goto out; 12206 } 12207 12208 if (!vport->qfpa_res) { 12209 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12210 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12211 GFP_KERNEL); 12212 if (!vport->qfpa_res) 12213 goto out; 12214 } 12215 12216 len = *((u32 *)(pcmd + 4)); 12217 len = be32_to_cpu(len); 12218 memcpy(vport->qfpa_res, pcmd, len + 8); 12219 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12220 12221 desc = (struct priority_range_desc *)(pcmd + 8); 12222 vmid_range = vport->vmid_priority.vmid_range; 12223 if (!vmid_range) { 12224 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12225 GFP_KERNEL); 12226 if (!vmid_range) { 12227 kfree(vport->qfpa_res); 12228 goto out; 12229 } 12230 vport->vmid_priority.vmid_range = vmid_range; 12231 } 12232 vport->vmid_priority.num_descriptors = len; 12233 12234 for (i = 0; i < len; i++, vmid_range++, desc++) { 12235 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12236 "6539 vmid values low=%d, high=%d, qos=%d, " 12237 "local ve id=%d\n", desc->lo_range, 12238 desc->hi_range, desc->qos_priority, 12239 desc->local_ve_id); 12240 12241 vmid_range->low = desc->lo_range << 1; 12242 if (desc->local_ve_id == QFPA_ODD_ONLY) 12243 vmid_range->low++; 12244 if (desc->qos_priority) 12245 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12246 vmid_range->qos = desc->qos_priority; 12247 12248 vmid_range->high = desc->hi_range << 1; 12249 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12250 (desc->local_ve_id == QFPA_EVEN_ODD)) 12251 vmid_range->high++; 12252 } 12253 lpfc_init_cs_ctl_bitmap(vport); 12254 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12255 lpfc_vmid_set_cs_ctl_range(vport, 12256 vport->vmid_priority.vmid_range[i].low, 12257 vport->vmid_priority.vmid_range[i].high); 12258 } 12259 12260 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12261 out: 12262 lpfc_els_free_iocb(phba, cmdiocb); 12263 lpfc_nlp_put(ndlp); 12264 } 12265 12266 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12267 { 12268 struct lpfc_hba *phba = vport->phba; 12269 struct lpfc_nodelist *ndlp; 12270 struct lpfc_iocbq *elsiocb; 12271 u8 *pcmd; 12272 int ret; 12273 12274 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12275 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12276 return -ENXIO; 12277 12278 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12279 ndlp->nlp_DID, ELS_CMD_QFPA); 12280 if (!elsiocb) 12281 return -ENOMEM; 12282 12283 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12284 12285 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12286 pcmd += 4; 12287 12288 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12289 12290 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12291 if (!elsiocb->ndlp) { 12292 lpfc_els_free_iocb(vport->phba, elsiocb); 12293 return -ENXIO; 12294 } 12295 12296 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12297 if (ret != IOCB_SUCCESS) { 12298 lpfc_els_free_iocb(phba, elsiocb); 12299 lpfc_nlp_put(ndlp); 12300 return -EIO; 12301 } 12302 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12303 return 0; 12304 } 12305 12306 int 12307 lpfc_vmid_uvem(struct lpfc_vport *vport, 12308 struct lpfc_vmid *vmid, bool instantiated) 12309 { 12310 struct lpfc_vem_id_desc *vem_id_desc; 12311 struct lpfc_nodelist *ndlp; 12312 struct lpfc_iocbq *elsiocb; 12313 struct instantiated_ve_desc *inst_desc; 12314 struct lpfc_vmid_context *vmid_context; 12315 u8 *pcmd; 12316 u32 *len; 12317 int ret = 0; 12318 12319 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12320 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12321 return -ENXIO; 12322 12323 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12324 if (!vmid_context) 12325 return -ENOMEM; 12326 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12327 ndlp, Fabric_DID, ELS_CMD_UVEM); 12328 if (!elsiocb) 12329 goto out; 12330 12331 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12332 "3427 Host vmid %s %d\n", 12333 vmid->host_vmid, instantiated); 12334 vmid_context->vmp = vmid; 12335 vmid_context->nlp = ndlp; 12336 vmid_context->instantiated = instantiated; 12337 elsiocb->vmid_tag.vmid_context = vmid_context; 12338 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12339 12340 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12341 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12342 LPFC_COMPRESS_VMID_SIZE); 12343 12344 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12345 len = (u32 *)(pcmd + 4); 12346 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12347 12348 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12349 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12350 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12351 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12352 LPFC_COMPRESS_VMID_SIZE); 12353 12354 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12355 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12356 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12357 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12358 LPFC_COMPRESS_VMID_SIZE); 12359 12360 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12361 bf_set(lpfc_instantiated_local_id, inst_desc, 12362 vmid->un.cs_ctl_vmid); 12363 if (instantiated) { 12364 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12365 } else { 12366 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12367 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12368 } 12369 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12370 12371 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12372 12373 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12374 if (!elsiocb->ndlp) { 12375 lpfc_els_free_iocb(vport->phba, elsiocb); 12376 goto out; 12377 } 12378 12379 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12380 if (ret != IOCB_SUCCESS) { 12381 lpfc_els_free_iocb(vport->phba, elsiocb); 12382 lpfc_nlp_put(ndlp); 12383 goto out; 12384 } 12385 12386 return 0; 12387 out: 12388 kfree(vmid_context); 12389 return -EIO; 12390 } 12391 12392 static void 12393 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12394 struct lpfc_iocbq *rspiocb) 12395 { 12396 struct lpfc_vport *vport = icmdiocb->vport; 12397 struct lpfc_dmabuf *prsp = NULL; 12398 struct lpfc_vmid_context *vmid_context = 12399 icmdiocb->vmid_tag.vmid_context; 12400 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12401 u8 *pcmd; 12402 u32 *data; 12403 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12404 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12405 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12406 struct lpfc_vmid *vmid; 12407 12408 vmid = vmid_context->vmp; 12409 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12410 ndlp = NULL; 12411 12412 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12413 if (!prsp) 12414 goto out; 12415 pcmd = prsp->virt; 12416 data = (u32 *)pcmd; 12417 if (data[0] == ELS_CMD_LS_RJT) { 12418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12419 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12420 goto out; 12421 } 12422 if (ulp_status) { 12423 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12424 "4533 UVEM error status %x: %x\n", 12425 ulp_status, ulp_word4); 12426 goto out; 12427 } 12428 spin_lock(&phba->hbalock); 12429 /* Set IN USE flag */ 12430 vport->vmid_flag |= LPFC_VMID_IN_USE; 12431 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12432 spin_unlock(&phba->hbalock); 12433 12434 if (vmid_context->instantiated) { 12435 write_lock(&vport->vmid_lock); 12436 vmid->flag |= LPFC_VMID_REGISTERED; 12437 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12438 write_unlock(&vport->vmid_lock); 12439 } 12440 12441 out: 12442 kfree(vmid_context); 12443 lpfc_els_free_iocb(phba, icmdiocb); 12444 lpfc_nlp_put(ndlp); 12445 } 12446