1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 bool flogi_in_retry = false; 956 957 /* Check to see if link went down during discovery */ 958 if (lpfc_els_chk_latt(vport)) { 959 /* One additional decrement on node reference count to 960 * trigger the release of the node 961 */ 962 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 963 lpfc_nlp_put(ndlp); 964 goto out; 965 } 966 967 ulp_status = get_job_ulpstatus(phba, rspiocb); 968 ulp_word4 = get_job_word4(phba, rspiocb); 969 970 if (phba->sli_rev == LPFC_SLI_REV4) { 971 tmo = get_wqe_tmo(cmdiocb); 972 } else { 973 irsp = &rspiocb->iocb; 974 tmo = irsp->ulpTimeout; 975 } 976 977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 978 "FLOGI cmpl: status:x%x/x%x state:x%x", 979 ulp_status, ulp_word4, 980 vport->port_state); 981 982 if (ulp_status) { 983 /* 984 * In case of FIP mode, perform roundrobin FCF failover 985 * due to new FCF discovery 986 */ 987 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 988 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 989 if (phba->link_state < LPFC_LINK_UP) 990 goto stop_rr_fcf_flogi; 991 if ((phba->fcoe_cvl_eventtag_attn == 992 phba->fcoe_cvl_eventtag) && 993 (ulp_status == IOSTAT_LOCAL_REJECT) && 994 ((ulp_word4 & IOERR_PARAM_MASK) == 995 IOERR_SLI_ABORTED)) 996 goto stop_rr_fcf_flogi; 997 else 998 phba->fcoe_cvl_eventtag_attn = 999 phba->fcoe_cvl_eventtag; 1000 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1001 "2611 FLOGI failed on FCF (x%x), " 1002 "status:x%x/x%x, tmo:x%x, perform " 1003 "roundrobin FCF failover\n", 1004 phba->fcf.current_rec.fcf_indx, 1005 ulp_status, ulp_word4, tmo); 1006 lpfc_sli4_set_fcf_flogi_fail(phba, 1007 phba->fcf.current_rec.fcf_indx); 1008 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1009 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1010 if (rc) 1011 goto out; 1012 } 1013 1014 stop_rr_fcf_flogi: 1015 /* FLOGI failure */ 1016 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1017 ((ulp_word4 & IOERR_PARAM_MASK) == 1018 IOERR_LOOP_OPEN_FAILURE))) 1019 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1020 "2858 FLOGI failure Status:x%x/x%x TMO" 1021 ":x%x Data x%x x%x\n", 1022 ulp_status, ulp_word4, tmo, 1023 phba->hba_flag, phba->fcf.fcf_flag); 1024 1025 /* Check for retry */ 1026 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1027 /* Address a timing race with dev_loss. If dev_loss 1028 * is active on this FPort node, put the initial ref 1029 * count back to stop premature node release actions. 1030 */ 1031 lpfc_check_nlp_post_devloss(vport, ndlp); 1032 flogi_in_retry = true; 1033 goto out; 1034 } 1035 1036 /* The FLOGI will not be retried. If the FPort node is not 1037 * registered with the SCSI transport, remove the initial 1038 * reference to trigger node release. 1039 */ 1040 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1041 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1042 lpfc_nlp_put(ndlp); 1043 1044 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1045 "0150 FLOGI failure Status:x%x/x%x " 1046 "xri x%x TMO:x%x refcnt %d\n", 1047 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1048 tmo, kref_read(&ndlp->kref)); 1049 1050 /* If this is not a loop open failure, bail out */ 1051 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1052 ((ulp_word4 & IOERR_PARAM_MASK) == 1053 IOERR_LOOP_OPEN_FAILURE))) { 1054 /* FLOGI failure */ 1055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1056 "0100 FLOGI failure Status:x%x/x%x " 1057 "TMO:x%x\n", 1058 ulp_status, ulp_word4, tmo); 1059 goto flogifail; 1060 } 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1065 FC_PT2PT_NO_NVME); 1066 spin_unlock_irq(shost->host_lock); 1067 1068 /* If private loop, then allow max outstanding els to be 1069 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1070 * alpa map would take too long otherwise. 1071 */ 1072 if (phba->alpa_map[0] == 0) 1073 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1074 if ((phba->sli_rev == LPFC_SLI_REV4) && 1075 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1076 (vport->fc_prevDID != vport->fc_myDID) || 1077 phba->fc_topology_changed)) { 1078 if (vport->fc_flag & FC_VFI_REGISTERED) { 1079 if (phba->fc_topology_changed) { 1080 lpfc_unregister_fcf_prep(phba); 1081 spin_lock_irq(shost->host_lock); 1082 vport->fc_flag &= ~FC_VFI_REGISTERED; 1083 spin_unlock_irq(shost->host_lock); 1084 phba->fc_topology_changed = 0; 1085 } else { 1086 lpfc_sli4_unreg_all_rpis(vport); 1087 } 1088 } 1089 1090 /* Do not register VFI if the driver aborted FLOGI */ 1091 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 1092 lpfc_issue_reg_vfi(vport); 1093 1094 lpfc_nlp_put(ndlp); 1095 goto out; 1096 } 1097 goto flogifail; 1098 } 1099 spin_lock_irq(shost->host_lock); 1100 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1101 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1102 spin_unlock_irq(shost->host_lock); 1103 1104 /* 1105 * The FLOGI succeeded. Sync the data for the CPU before 1106 * accessing it. 1107 */ 1108 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1109 if (!prsp) 1110 goto out; 1111 sp = prsp->virt + sizeof(uint32_t); 1112 1113 /* FLOGI completes successfully */ 1114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1115 "0101 FLOGI completes successfully, I/O tag:x%x " 1116 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1117 cmdiocb->iotag, cmdiocb->sli4_xritag, 1118 ulp_word4, sp->cmn.e_d_tov, 1119 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1120 vport->port_state, vport->fc_flag, 1121 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1122 1123 if (sp->cmn.priority_tagging) 1124 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1125 LPFC_VMID_TYPE_PRIO); 1126 1127 /* 1128 * Address a timing race with dev_loss. If dev_loss is active on 1129 * this FPort node, put the initial ref count back to stop premature 1130 * node release actions. 1131 */ 1132 lpfc_check_nlp_post_devloss(vport, ndlp); 1133 if (vport->port_state == LPFC_FLOGI) { 1134 /* 1135 * If Common Service Parameters indicate Nport 1136 * we are point to point, if Fport we are Fabric. 1137 */ 1138 if (sp->cmn.fPort) 1139 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1140 ulp_word4); 1141 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1142 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1143 else { 1144 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1145 "2831 FLOGI response with cleared Fabric " 1146 "bit fcf_index 0x%x " 1147 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1148 "Fabric Name " 1149 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1150 phba->fcf.current_rec.fcf_indx, 1151 phba->fcf.current_rec.switch_name[0], 1152 phba->fcf.current_rec.switch_name[1], 1153 phba->fcf.current_rec.switch_name[2], 1154 phba->fcf.current_rec.switch_name[3], 1155 phba->fcf.current_rec.switch_name[4], 1156 phba->fcf.current_rec.switch_name[5], 1157 phba->fcf.current_rec.switch_name[6], 1158 phba->fcf.current_rec.switch_name[7], 1159 phba->fcf.current_rec.fabric_name[0], 1160 phba->fcf.current_rec.fabric_name[1], 1161 phba->fcf.current_rec.fabric_name[2], 1162 phba->fcf.current_rec.fabric_name[3], 1163 phba->fcf.current_rec.fabric_name[4], 1164 phba->fcf.current_rec.fabric_name[5], 1165 phba->fcf.current_rec.fabric_name[6], 1166 phba->fcf.current_rec.fabric_name[7]); 1167 1168 lpfc_nlp_put(ndlp); 1169 spin_lock_irq(&phba->hbalock); 1170 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1171 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1172 spin_unlock_irq(&phba->hbalock); 1173 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1174 goto out; 1175 } 1176 if (!rc) { 1177 /* Mark the FCF discovery process done */ 1178 if (phba->hba_flag & HBA_FIP_SUPPORT) 1179 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1180 LOG_ELS, 1181 "2769 FLOGI to FCF (x%x) " 1182 "completed successfully\n", 1183 phba->fcf.current_rec.fcf_indx); 1184 spin_lock_irq(&phba->hbalock); 1185 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1186 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1187 spin_unlock_irq(&phba->hbalock); 1188 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1189 goto out; 1190 } 1191 } else if (vport->port_state > LPFC_FLOGI && 1192 vport->fc_flag & FC_PT2PT) { 1193 /* 1194 * In a p2p topology, it is possible that discovery has 1195 * already progressed, and this completion can be ignored. 1196 * Recheck the indicated topology. 1197 */ 1198 if (!sp->cmn.fPort) 1199 goto out; 1200 } 1201 1202 flogifail: 1203 spin_lock_irq(&phba->hbalock); 1204 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1205 spin_unlock_irq(&phba->hbalock); 1206 1207 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) { 1208 /* FLOGI failed, so just use loop map to make discovery list */ 1209 lpfc_disc_list_loopmap(vport); 1210 1211 /* Start discovery */ 1212 lpfc_disc_start(vport); 1213 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1214 (((ulp_word4 & IOERR_PARAM_MASK) != 1215 IOERR_SLI_ABORTED) && 1216 ((ulp_word4 & IOERR_PARAM_MASK) != 1217 IOERR_SLI_DOWN))) && 1218 (phba->link_state != LPFC_CLEAR_LA)) { 1219 /* If FLOGI failed enable link interrupt. */ 1220 lpfc_issue_clear_la(phba, vport); 1221 } 1222 out: 1223 if (!flogi_in_retry) 1224 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1225 1226 lpfc_els_free_iocb(phba, cmdiocb); 1227 lpfc_nlp_put(ndlp); 1228 } 1229 1230 /** 1231 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1232 * aborted during a link down 1233 * @phba: pointer to lpfc hba data structure. 1234 * @cmdiocb: pointer to lpfc command iocb data structure. 1235 * @rspiocb: pointer to lpfc response iocb data structure. 1236 * 1237 */ 1238 static void 1239 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1240 struct lpfc_iocbq *rspiocb) 1241 { 1242 uint32_t *pcmd; 1243 uint32_t cmd; 1244 u32 ulp_status, ulp_word4; 1245 1246 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1247 cmd = *pcmd; 1248 1249 ulp_status = get_job_ulpstatus(phba, rspiocb); 1250 ulp_word4 = get_job_word4(phba, rspiocb); 1251 1252 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1253 "6445 ELS completes after LINK_DOWN: " 1254 " Status %x/%x cmd x%x flg x%x\n", 1255 ulp_status, ulp_word4, cmd, 1256 cmdiocb->cmd_flag); 1257 1258 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1259 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1260 atomic_dec(&phba->fabric_iocb_count); 1261 } 1262 lpfc_els_free_iocb(phba, cmdiocb); 1263 } 1264 1265 /** 1266 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1267 * @vport: pointer to a host virtual N_Port data structure. 1268 * @ndlp: pointer to a node-list data structure. 1269 * @retry: number of retries to the command IOCB. 1270 * 1271 * This routine issues a Fabric Login (FLOGI) Request ELS command 1272 * for a @vport. The initiator service parameters are put into the payload 1273 * of the FLOGI Request IOCB and the top-level callback function pointer 1274 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1275 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1276 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1277 * 1278 * Note that the ndlp reference count will be incremented by 1 for holding the 1279 * ndlp and the reference to ndlp will be stored into the ndlp field of 1280 * the IOCB for the completion callback function to the FLOGI ELS command. 1281 * 1282 * Return code 1283 * 0 - successfully issued flogi iocb for @vport 1284 * 1 - failed to issue flogi iocb for @vport 1285 **/ 1286 static int 1287 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1288 uint8_t retry) 1289 { 1290 struct lpfc_hba *phba = vport->phba; 1291 struct serv_parm *sp; 1292 union lpfc_wqe128 *wqe = NULL; 1293 IOCB_t *icmd = NULL; 1294 struct lpfc_iocbq *elsiocb; 1295 struct lpfc_iocbq defer_flogi_acc; 1296 u8 *pcmd, ct; 1297 uint16_t cmdsize; 1298 uint32_t tmo, did; 1299 int rc; 1300 1301 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1302 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1303 ndlp->nlp_DID, ELS_CMD_FLOGI); 1304 1305 if (!elsiocb) 1306 return 1; 1307 1308 wqe = &elsiocb->wqe; 1309 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1310 icmd = &elsiocb->iocb; 1311 1312 /* For FLOGI request, remainder of payload is service parameters */ 1313 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1314 pcmd += sizeof(uint32_t); 1315 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1316 sp = (struct serv_parm *) pcmd; 1317 1318 /* Setup CSPs accordingly for Fabric */ 1319 sp->cmn.e_d_tov = 0; 1320 sp->cmn.w2.r_a_tov = 0; 1321 sp->cmn.virtual_fabric_support = 0; 1322 sp->cls1.classValid = 0; 1323 if (sp->cmn.fcphLow < FC_PH3) 1324 sp->cmn.fcphLow = FC_PH3; 1325 if (sp->cmn.fcphHigh < FC_PH3) 1326 sp->cmn.fcphHigh = FC_PH3; 1327 1328 /* Determine if switch supports priority tagging */ 1329 if (phba->cfg_vmid_priority_tagging) { 1330 sp->cmn.priority_tagging = 1; 1331 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1332 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1333 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1334 sizeof(phba->wwpn)); 1335 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1336 sizeof(phba->wwnn)); 1337 } 1338 } 1339 1340 if (phba->sli_rev == LPFC_SLI_REV4) { 1341 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1342 LPFC_SLI_INTF_IF_TYPE_0) { 1343 /* FLOGI needs to be 3 for WQE FCFI */ 1344 ct = SLI4_CT_FCFI; 1345 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1346 1347 /* Set the fcfi to the fcfi we registered with */ 1348 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1349 phba->fcf.fcfi); 1350 } 1351 1352 /* Can't do SLI4 class2 without support sequence coalescing */ 1353 sp->cls2.classValid = 0; 1354 sp->cls2.seqDelivery = 0; 1355 } else { 1356 /* Historical, setting sequential-delivery bit for SLI3 */ 1357 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1358 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1359 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1360 sp->cmn.request_multiple_Nport = 1; 1361 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1362 icmd->ulpCt_h = 1; 1363 icmd->ulpCt_l = 0; 1364 } else { 1365 sp->cmn.request_multiple_Nport = 0; 1366 } 1367 1368 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1369 icmd->un.elsreq64.myID = 0; 1370 icmd->un.elsreq64.fl = 1; 1371 } 1372 } 1373 1374 tmo = phba->fc_ratov; 1375 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1376 lpfc_set_disctmo(vport); 1377 phba->fc_ratov = tmo; 1378 1379 phba->fc_stat.elsXmitFLOGI++; 1380 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1381 1382 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1383 "Issue FLOGI: opt:x%x", 1384 phba->sli3_options, 0, 0); 1385 1386 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1387 if (!elsiocb->ndlp) { 1388 lpfc_els_free_iocb(phba, elsiocb); 1389 return 1; 1390 } 1391 1392 /* Avoid race with FLOGI completion and hba_flags. */ 1393 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1394 1395 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1396 if (rc == IOCB_ERROR) { 1397 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1398 lpfc_els_free_iocb(phba, elsiocb); 1399 lpfc_nlp_put(ndlp); 1400 return 1; 1401 } 1402 1403 /* Clear external loopback plug detected flag */ 1404 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1405 1406 /* Check for a deferred FLOGI ACC condition */ 1407 if (phba->defer_flogi_acc_flag) { 1408 /* lookup ndlp for received FLOGI */ 1409 ndlp = lpfc_findnode_did(vport, 0); 1410 if (!ndlp) 1411 return 0; 1412 1413 did = vport->fc_myDID; 1414 vport->fc_myDID = Fabric_DID; 1415 1416 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1417 1418 if (phba->sli_rev == LPFC_SLI_REV4) { 1419 bf_set(wqe_ctxt_tag, 1420 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1421 phba->defer_flogi_acc_rx_id); 1422 bf_set(wqe_rcvoxid, 1423 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1424 phba->defer_flogi_acc_ox_id); 1425 } else { 1426 icmd = &defer_flogi_acc.iocb; 1427 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1428 icmd->unsli3.rcvsli3.ox_id = 1429 phba->defer_flogi_acc_ox_id; 1430 } 1431 1432 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1433 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1434 " ox_id: x%x, hba_flag x%x\n", 1435 phba->defer_flogi_acc_rx_id, 1436 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1437 1438 /* Send deferred FLOGI ACC */ 1439 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1440 ndlp, NULL); 1441 1442 phba->defer_flogi_acc_flag = false; 1443 vport->fc_myDID = did; 1444 1445 /* Decrement ndlp reference count to indicate the node can be 1446 * released when other references are removed. 1447 */ 1448 lpfc_nlp_put(ndlp); 1449 } 1450 1451 return 0; 1452 } 1453 1454 /** 1455 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1456 * @phba: pointer to lpfc hba data structure. 1457 * 1458 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1459 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1460 * list and issues an abort IOCB commond on each outstanding IOCB that 1461 * contains a active Fabric_DID ndlp. Note that this function is to issue 1462 * the abort IOCB command on all the outstanding IOCBs, thus when this 1463 * function returns, it does not guarantee all the IOCBs are actually aborted. 1464 * 1465 * Return code 1466 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1467 **/ 1468 int 1469 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1470 { 1471 struct lpfc_sli_ring *pring; 1472 struct lpfc_iocbq *iocb, *next_iocb; 1473 struct lpfc_nodelist *ndlp; 1474 u32 ulp_command; 1475 1476 /* Abort outstanding I/O on NPort <nlp_DID> */ 1477 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1478 "0201 Abort outstanding I/O on NPort x%x\n", 1479 Fabric_DID); 1480 1481 pring = lpfc_phba_elsring(phba); 1482 if (unlikely(!pring)) 1483 return -EIO; 1484 1485 /* 1486 * Check the txcmplq for an iocb that matches the nport the driver is 1487 * searching for. 1488 */ 1489 spin_lock_irq(&phba->hbalock); 1490 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1491 ulp_command = get_job_cmnd(phba, iocb); 1492 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1493 ndlp = iocb->ndlp; 1494 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1495 if ((phba->pport->fc_flag & FC_PT2PT) && 1496 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1497 iocb->fabric_cmd_cmpl = 1498 lpfc_ignore_els_cmpl; 1499 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1500 NULL); 1501 } 1502 } 1503 } 1504 /* Make sure HBA is alive */ 1505 lpfc_issue_hb_tmo(phba); 1506 1507 spin_unlock_irq(&phba->hbalock); 1508 1509 return 0; 1510 } 1511 1512 /** 1513 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1514 * @vport: pointer to a host virtual N_Port data structure. 1515 * 1516 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1517 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1518 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1519 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1520 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1521 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1522 * @vport. 1523 * 1524 * Return code 1525 * 0 - failed to issue initial flogi for @vport 1526 * 1 - successfully issued initial flogi for @vport 1527 **/ 1528 int 1529 lpfc_initial_flogi(struct lpfc_vport *vport) 1530 { 1531 struct lpfc_nodelist *ndlp; 1532 1533 vport->port_state = LPFC_FLOGI; 1534 lpfc_set_disctmo(vport); 1535 1536 /* First look for the Fabric ndlp */ 1537 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1538 if (!ndlp) { 1539 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1540 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1541 if (!ndlp) 1542 return 0; 1543 /* Set the node type */ 1544 ndlp->nlp_type |= NLP_FABRIC; 1545 1546 /* Put ndlp onto node list */ 1547 lpfc_enqueue_node(vport, ndlp); 1548 } 1549 1550 /* Reset the Fabric flag, topology change may have happened */ 1551 vport->fc_flag &= ~FC_FABRIC; 1552 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1553 /* A node reference should be retained while registered with a 1554 * transport or dev-loss-evt work is pending. 1555 * Otherwise, decrement node reference to trigger release. 1556 */ 1557 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1558 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1559 lpfc_nlp_put(ndlp); 1560 return 0; 1561 } 1562 return 1; 1563 } 1564 1565 /** 1566 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1567 * @vport: pointer to a host virtual N_Port data structure. 1568 * 1569 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1570 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1571 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1572 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1573 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1574 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1575 * @vport. 1576 * 1577 * Return code 1578 * 0 - failed to issue initial fdisc for @vport 1579 * 1 - successfully issued initial fdisc for @vport 1580 **/ 1581 int 1582 lpfc_initial_fdisc(struct lpfc_vport *vport) 1583 { 1584 struct lpfc_nodelist *ndlp; 1585 1586 /* First look for the Fabric ndlp */ 1587 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1588 if (!ndlp) { 1589 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1590 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1591 if (!ndlp) 1592 return 0; 1593 1594 /* NPIV is only supported in Fabrics. */ 1595 ndlp->nlp_type |= NLP_FABRIC; 1596 1597 /* Put ndlp onto node list */ 1598 lpfc_enqueue_node(vport, ndlp); 1599 } 1600 1601 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1602 /* A node reference should be retained while registered with a 1603 * transport or dev-loss-evt work is pending. 1604 * Otherwise, decrement node reference to trigger release. 1605 */ 1606 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1607 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1608 lpfc_nlp_put(ndlp); 1609 return 0; 1610 } 1611 return 1; 1612 } 1613 1614 /** 1615 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1616 * @vport: pointer to a host virtual N_Port data structure. 1617 * 1618 * This routine checks whether there are more remaining Port Logins 1619 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1620 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1621 * to issue ELS PLOGIs up to the configured discover threads with the 1622 * @vport (@vport->cfg_discovery_threads). The function also decrement 1623 * the @vport's num_disc_node by 1 if it is not already 0. 1624 **/ 1625 void 1626 lpfc_more_plogi(struct lpfc_vport *vport) 1627 { 1628 if (vport->num_disc_nodes) 1629 vport->num_disc_nodes--; 1630 1631 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1632 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1633 "0232 Continue discovery with %d PLOGIs to go " 1634 "Data: x%x x%x x%x\n", 1635 vport->num_disc_nodes, vport->fc_plogi_cnt, 1636 vport->fc_flag, vport->port_state); 1637 /* Check to see if there are more PLOGIs to be sent */ 1638 if (vport->fc_flag & FC_NLP_MORE) 1639 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1640 lpfc_els_disc_plogi(vport); 1641 1642 return; 1643 } 1644 1645 /** 1646 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1647 * @phba: pointer to lpfc hba data structure. 1648 * @prsp: pointer to response IOCB payload. 1649 * @ndlp: pointer to a node-list data structure. 1650 * 1651 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1652 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1653 * The following cases are considered N_Port confirmed: 1654 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1655 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1656 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1657 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1658 * 1) if there is a node on vport list other than the @ndlp with the same 1659 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1660 * on that node to release the RPI associated with the node; 2) if there is 1661 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1662 * into, a new node shall be allocated (or activated). In either case, the 1663 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1664 * be released and the new_ndlp shall be put on to the vport node list and 1665 * its pointer returned as the confirmed node. 1666 * 1667 * Note that before the @ndlp got "released", the keepDID from not-matching 1668 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1669 * of the @ndlp. This is because the release of @ndlp is actually to put it 1670 * into an inactive state on the vport node list and the vport node list 1671 * management algorithm does not allow two node with a same DID. 1672 * 1673 * Return code 1674 * pointer to the PLOGI N_Port @ndlp 1675 **/ 1676 static struct lpfc_nodelist * 1677 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1678 struct lpfc_nodelist *ndlp) 1679 { 1680 struct lpfc_vport *vport = ndlp->vport; 1681 struct lpfc_nodelist *new_ndlp; 1682 struct serv_parm *sp; 1683 uint8_t name[sizeof(struct lpfc_name)]; 1684 uint32_t keepDID = 0, keep_nlp_flag = 0; 1685 uint32_t keep_new_nlp_flag = 0; 1686 uint16_t keep_nlp_state; 1687 u32 keep_nlp_fc4_type = 0; 1688 struct lpfc_nvme_rport *keep_nrport = NULL; 1689 unsigned long *active_rrqs_xri_bitmap = NULL; 1690 1691 /* Fabric nodes can have the same WWPN so we don't bother searching 1692 * by WWPN. Just return the ndlp that was given to us. 1693 */ 1694 if (ndlp->nlp_type & NLP_FABRIC) 1695 return ndlp; 1696 1697 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1698 memset(name, 0, sizeof(struct lpfc_name)); 1699 1700 /* Now we find out if the NPort we are logging into, matches the WWPN 1701 * we have for that ndlp. If not, we have some work to do. 1702 */ 1703 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1704 1705 /* return immediately if the WWPN matches ndlp */ 1706 if (!new_ndlp || (new_ndlp == ndlp)) 1707 return ndlp; 1708 1709 /* 1710 * Unregister from backend if not done yet. Could have been skipped 1711 * due to ADISC 1712 */ 1713 lpfc_nlp_unreg_node(vport, new_ndlp); 1714 1715 if (phba->sli_rev == LPFC_SLI_REV4) { 1716 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1717 GFP_KERNEL); 1718 if (active_rrqs_xri_bitmap) 1719 memset(active_rrqs_xri_bitmap, 0, 1720 phba->cfg_rrq_xri_bitmap_sz); 1721 } 1722 1723 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1724 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1725 "new_ndlp x%x x%x x%x\n", 1726 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1727 (new_ndlp ? new_ndlp->nlp_DID : 0), 1728 (new_ndlp ? new_ndlp->nlp_flag : 0), 1729 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1730 1731 keepDID = new_ndlp->nlp_DID; 1732 1733 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1734 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1735 phba->cfg_rrq_xri_bitmap_sz); 1736 1737 /* At this point in this routine, we know new_ndlp will be 1738 * returned. however, any previous GID_FTs that were done 1739 * would have updated nlp_fc4_type in ndlp, so we must ensure 1740 * new_ndlp has the right value. 1741 */ 1742 if (vport->fc_flag & FC_FABRIC) { 1743 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1744 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1745 } 1746 1747 lpfc_unreg_rpi(vport, new_ndlp); 1748 new_ndlp->nlp_DID = ndlp->nlp_DID; 1749 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1750 if (phba->sli_rev == LPFC_SLI_REV4) 1751 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1752 ndlp->active_rrqs_xri_bitmap, 1753 phba->cfg_rrq_xri_bitmap_sz); 1754 1755 /* Lock both ndlps */ 1756 spin_lock_irq(&ndlp->lock); 1757 spin_lock_irq(&new_ndlp->lock); 1758 keep_new_nlp_flag = new_ndlp->nlp_flag; 1759 keep_nlp_flag = ndlp->nlp_flag; 1760 new_ndlp->nlp_flag = ndlp->nlp_flag; 1761 1762 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1763 if (keep_new_nlp_flag & NLP_UNREG_INP) 1764 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1765 else 1766 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1767 1768 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1769 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1770 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1771 else 1772 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1773 1774 /* 1775 * Retain the DROPPED flag. This will take care of the init 1776 * refcount when affecting the state change 1777 */ 1778 if (keep_new_nlp_flag & NLP_DROPPED) 1779 new_ndlp->nlp_flag |= NLP_DROPPED; 1780 else 1781 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1782 1783 ndlp->nlp_flag = keep_new_nlp_flag; 1784 1785 /* if ndlp had NLP_UNREG_INP set, keep it */ 1786 if (keep_nlp_flag & NLP_UNREG_INP) 1787 ndlp->nlp_flag |= NLP_UNREG_INP; 1788 else 1789 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1790 1791 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1792 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1793 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1794 else 1795 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1796 1797 /* 1798 * Retain the DROPPED flag. This will take care of the init 1799 * refcount when affecting the state change 1800 */ 1801 if (keep_nlp_flag & NLP_DROPPED) 1802 ndlp->nlp_flag |= NLP_DROPPED; 1803 else 1804 ndlp->nlp_flag &= ~NLP_DROPPED; 1805 1806 spin_unlock_irq(&new_ndlp->lock); 1807 spin_unlock_irq(&ndlp->lock); 1808 1809 /* Set nlp_states accordingly */ 1810 keep_nlp_state = new_ndlp->nlp_state; 1811 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1812 1813 /* interchange the nvme remoteport structs */ 1814 keep_nrport = new_ndlp->nrport; 1815 new_ndlp->nrport = ndlp->nrport; 1816 1817 /* Move this back to NPR state */ 1818 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1819 /* The ndlp doesn't have a portname yet, but does have an 1820 * NPort ID. The new_ndlp portname matches the Rport's 1821 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1822 */ 1823 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1824 "3179 PLOGI confirm NEW: %x %x\n", 1825 new_ndlp->nlp_DID, keepDID); 1826 1827 /* Two ndlps cannot have the same did on the nodelist. 1828 * The KeepDID and keep_nlp_fc4_type need to be swapped 1829 * because ndlp is inflight with no WWPN. 1830 */ 1831 ndlp->nlp_DID = keepDID; 1832 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1833 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1834 if (phba->sli_rev == LPFC_SLI_REV4 && 1835 active_rrqs_xri_bitmap) 1836 memcpy(ndlp->active_rrqs_xri_bitmap, 1837 active_rrqs_xri_bitmap, 1838 phba->cfg_rrq_xri_bitmap_sz); 1839 1840 } else { 1841 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1842 "3180 PLOGI confirm SWAP: %x %x\n", 1843 new_ndlp->nlp_DID, keepDID); 1844 1845 lpfc_unreg_rpi(vport, ndlp); 1846 1847 /* The ndlp and new_ndlp both have WWPNs but are swapping 1848 * NPort Ids and attributes. 1849 */ 1850 ndlp->nlp_DID = keepDID; 1851 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1852 1853 if (phba->sli_rev == LPFC_SLI_REV4 && 1854 active_rrqs_xri_bitmap) 1855 memcpy(ndlp->active_rrqs_xri_bitmap, 1856 active_rrqs_xri_bitmap, 1857 phba->cfg_rrq_xri_bitmap_sz); 1858 1859 /* Since we are switching over to the new_ndlp, 1860 * reset the old ndlp state 1861 */ 1862 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1863 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1864 keep_nlp_state = NLP_STE_NPR_NODE; 1865 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1866 ndlp->nrport = keep_nrport; 1867 } 1868 1869 /* 1870 * If ndlp is not associated with any rport we can drop it here else 1871 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1872 */ 1873 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1874 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1875 1876 if (phba->sli_rev == LPFC_SLI_REV4 && 1877 active_rrqs_xri_bitmap) 1878 mempool_free(active_rrqs_xri_bitmap, 1879 phba->active_rrq_pool); 1880 1881 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1882 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1883 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1884 new_ndlp->nlp_fc4_type); 1885 1886 return new_ndlp; 1887 } 1888 1889 /** 1890 * lpfc_end_rscn - Check and handle more rscn for a vport 1891 * @vport: pointer to a host virtual N_Port data structure. 1892 * 1893 * This routine checks whether more Registration State Change 1894 * Notifications (RSCNs) came in while the discovery state machine was in 1895 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1896 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1897 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1898 * handling the RSCNs. 1899 **/ 1900 void 1901 lpfc_end_rscn(struct lpfc_vport *vport) 1902 { 1903 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1904 1905 if (vport->fc_flag & FC_RSCN_MODE) { 1906 /* 1907 * Check to see if more RSCNs came in while we were 1908 * processing this one. 1909 */ 1910 if (vport->fc_rscn_id_cnt || 1911 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1912 lpfc_els_handle_rscn(vport); 1913 else { 1914 spin_lock_irq(shost->host_lock); 1915 vport->fc_flag &= ~FC_RSCN_MODE; 1916 spin_unlock_irq(shost->host_lock); 1917 } 1918 } 1919 } 1920 1921 /** 1922 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1923 * @phba: pointer to lpfc hba data structure. 1924 * @cmdiocb: pointer to lpfc command iocb data structure. 1925 * @rspiocb: pointer to lpfc response iocb data structure. 1926 * 1927 * This routine will call the clear rrq function to free the rrq and 1928 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1929 * exist then the clear_rrq is still called because the rrq needs to 1930 * be freed. 1931 **/ 1932 1933 static void 1934 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1935 struct lpfc_iocbq *rspiocb) 1936 { 1937 struct lpfc_vport *vport = cmdiocb->vport; 1938 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1939 struct lpfc_node_rrq *rrq; 1940 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1941 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1942 1943 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1944 rrq = cmdiocb->context_un.rrq; 1945 cmdiocb->rsp_iocb = rspiocb; 1946 1947 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1948 "RRQ cmpl: status:x%x/x%x did:x%x", 1949 ulp_status, ulp_word4, 1950 get_job_els_rsp64_did(phba, cmdiocb)); 1951 1952 1953 /* rrq completes to NPort <nlp_DID> */ 1954 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1955 "2880 RRQ completes to DID x%x " 1956 "Data: x%x x%x x%x x%x x%x\n", 1957 ndlp->nlp_DID, ulp_status, ulp_word4, 1958 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1959 1960 if (ulp_status) { 1961 /* Check for retry */ 1962 /* RRQ failed Don't print the vport to vport rjts */ 1963 if (ulp_status != IOSTAT_LS_RJT || 1964 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1965 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1966 (phba)->pport->cfg_log_verbose & LOG_ELS) 1967 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1968 "2881 RRQ failure DID:%06X Status:" 1969 "x%x/x%x\n", 1970 ndlp->nlp_DID, ulp_status, 1971 ulp_word4); 1972 } 1973 1974 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1975 lpfc_els_free_iocb(phba, cmdiocb); 1976 lpfc_nlp_put(ndlp); 1977 return; 1978 } 1979 /** 1980 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1981 * @phba: pointer to lpfc hba data structure. 1982 * @cmdiocb: pointer to lpfc command iocb data structure. 1983 * @rspiocb: pointer to lpfc response iocb data structure. 1984 * 1985 * This routine is the completion callback function for issuing the Port 1986 * Login (PLOGI) command. For PLOGI completion, there must be an active 1987 * ndlp on the vport node list that matches the remote node ID from the 1988 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1989 * ignored and command IOCB released. The PLOGI response IOCB status is 1990 * checked for error conditions. If there is error status reported, PLOGI 1991 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1992 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1993 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1994 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1995 * there are additional N_Port nodes with the vport that need to perform 1996 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1997 * PLOGIs. 1998 **/ 1999 static void 2000 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2001 struct lpfc_iocbq *rspiocb) 2002 { 2003 struct lpfc_vport *vport = cmdiocb->vport; 2004 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2005 IOCB_t *irsp; 2006 struct lpfc_nodelist *ndlp, *free_ndlp; 2007 struct lpfc_dmabuf *prsp; 2008 int disc; 2009 struct serv_parm *sp = NULL; 2010 u32 ulp_status, ulp_word4, did, iotag; 2011 bool release_node = false; 2012 2013 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2014 cmdiocb->rsp_iocb = rspiocb; 2015 2016 ulp_status = get_job_ulpstatus(phba, rspiocb); 2017 ulp_word4 = get_job_word4(phba, rspiocb); 2018 did = get_job_els_rsp64_did(phba, cmdiocb); 2019 2020 if (phba->sli_rev == LPFC_SLI_REV4) { 2021 iotag = get_wqe_reqtag(cmdiocb); 2022 } else { 2023 irsp = &rspiocb->iocb; 2024 iotag = irsp->ulpIoTag; 2025 } 2026 2027 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2028 "PLOGI cmpl: status:x%x/x%x did:x%x", 2029 ulp_status, ulp_word4, did); 2030 2031 ndlp = lpfc_findnode_did(vport, did); 2032 if (!ndlp) { 2033 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2034 "0136 PLOGI completes to NPort x%x " 2035 "with no ndlp. Data: x%x x%x x%x\n", 2036 did, ulp_status, ulp_word4, iotag); 2037 goto out_freeiocb; 2038 } 2039 2040 /* Since ndlp can be freed in the disc state machine, note if this node 2041 * is being used during discovery. 2042 */ 2043 spin_lock_irq(&ndlp->lock); 2044 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2045 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2046 spin_unlock_irq(&ndlp->lock); 2047 2048 /* PLOGI completes to NPort <nlp_DID> */ 2049 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2050 "0102 PLOGI completes to NPort x%06x " 2051 "Data: x%x x%x x%x x%x x%x\n", 2052 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2053 ulp_status, ulp_word4, 2054 disc, vport->num_disc_nodes); 2055 2056 /* Check to see if link went down during discovery */ 2057 if (lpfc_els_chk_latt(vport)) { 2058 spin_lock_irq(&ndlp->lock); 2059 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2060 spin_unlock_irq(&ndlp->lock); 2061 goto out; 2062 } 2063 2064 if (ulp_status) { 2065 /* Check for retry */ 2066 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2067 /* ELS command is being retried */ 2068 if (disc) { 2069 spin_lock_irq(&ndlp->lock); 2070 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2071 spin_unlock_irq(&ndlp->lock); 2072 } 2073 goto out; 2074 } 2075 /* PLOGI failed Don't print the vport to vport rjts */ 2076 if (ulp_status != IOSTAT_LS_RJT || 2077 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2078 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2079 (phba)->pport->cfg_log_verbose & LOG_ELS) 2080 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2081 "2753 PLOGI failure DID:%06X " 2082 "Status:x%x/x%x\n", 2083 ndlp->nlp_DID, ulp_status, 2084 ulp_word4); 2085 2086 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2087 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2088 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2089 NLP_EVT_CMPL_PLOGI); 2090 2091 /* If a PLOGI collision occurred, the node needs to continue 2092 * with the reglogin process. 2093 */ 2094 spin_lock_irq(&ndlp->lock); 2095 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2096 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2097 spin_unlock_irq(&ndlp->lock); 2098 goto out; 2099 } 2100 2101 /* No PLOGI collision and the node is not registered with the 2102 * scsi or nvme transport. It is no longer an active node. Just 2103 * start the device remove process. 2104 */ 2105 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2106 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2107 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2108 release_node = true; 2109 } 2110 spin_unlock_irq(&ndlp->lock); 2111 2112 if (release_node) 2113 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2114 NLP_EVT_DEVICE_RM); 2115 } else { 2116 /* Good status, call state machine */ 2117 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2118 struct lpfc_dmabuf, list); 2119 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2120 2121 sp = (struct serv_parm *)((u8 *)prsp->virt + 2122 sizeof(u32)); 2123 2124 ndlp->vmid_support = 0; 2125 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2126 (phba->cfg_vmid_priority_tagging && 2127 sp->cmn.priority_tagging)) { 2128 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2129 "4018 app_hdr_support %d tagging %d DID x%x\n", 2130 sp->cmn.app_hdr_support, 2131 sp->cmn.priority_tagging, 2132 ndlp->nlp_DID); 2133 /* if the dest port supports VMID, mark it in ndlp */ 2134 ndlp->vmid_support = 1; 2135 } 2136 2137 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2138 NLP_EVT_CMPL_PLOGI); 2139 } 2140 2141 if (disc && vport->num_disc_nodes) { 2142 /* Check to see if there are more PLOGIs to be sent */ 2143 lpfc_more_plogi(vport); 2144 2145 if (vport->num_disc_nodes == 0) { 2146 spin_lock_irq(shost->host_lock); 2147 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2148 spin_unlock_irq(shost->host_lock); 2149 2150 lpfc_can_disctmo(vport); 2151 lpfc_end_rscn(vport); 2152 } 2153 } 2154 2155 out: 2156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2157 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2158 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2159 2160 out_freeiocb: 2161 /* Release the reference on the original I/O request. */ 2162 free_ndlp = cmdiocb->ndlp; 2163 2164 lpfc_els_free_iocb(phba, cmdiocb); 2165 lpfc_nlp_put(free_ndlp); 2166 return; 2167 } 2168 2169 /** 2170 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2171 * @vport: pointer to a host virtual N_Port data structure. 2172 * @did: destination port identifier. 2173 * @retry: number of retries to the command IOCB. 2174 * 2175 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2176 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2177 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2178 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2179 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2180 * 2181 * Note that the ndlp reference count will be incremented by 1 for holding 2182 * the ndlp and the reference to ndlp will be stored into the ndlp field 2183 * of the IOCB for the completion callback function to the PLOGI ELS command. 2184 * 2185 * Return code 2186 * 0 - Successfully issued a plogi for @vport 2187 * 1 - failed to issue a plogi for @vport 2188 **/ 2189 int 2190 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2191 { 2192 struct lpfc_hba *phba = vport->phba; 2193 struct serv_parm *sp; 2194 struct lpfc_nodelist *ndlp; 2195 struct lpfc_iocbq *elsiocb; 2196 uint8_t *pcmd; 2197 uint16_t cmdsize; 2198 int ret; 2199 2200 ndlp = lpfc_findnode_did(vport, did); 2201 if (!ndlp) 2202 return 1; 2203 2204 /* Defer the processing of the issue PLOGI until after the 2205 * outstanding UNREG_RPI mbox command completes, unless we 2206 * are going offline. This logic does not apply for Fabric DIDs 2207 */ 2208 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2209 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2210 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2211 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2212 "4110 Issue PLOGI x%x deferred " 2213 "on NPort x%x rpi x%x Data: x%px\n", 2214 ndlp->nlp_defer_did, ndlp->nlp_DID, 2215 ndlp->nlp_rpi, ndlp); 2216 2217 /* We can only defer 1st PLOGI */ 2218 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2219 ndlp->nlp_defer_did = did; 2220 return 0; 2221 } 2222 2223 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2224 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2225 ELS_CMD_PLOGI); 2226 if (!elsiocb) 2227 return 1; 2228 2229 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2230 2231 /* For PLOGI request, remainder of payload is service parameters */ 2232 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2233 pcmd += sizeof(uint32_t); 2234 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2235 sp = (struct serv_parm *) pcmd; 2236 2237 /* 2238 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2239 * to device on remote loops work. 2240 */ 2241 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2242 sp->cmn.altBbCredit = 1; 2243 2244 if (sp->cmn.fcphLow < FC_PH_4_3) 2245 sp->cmn.fcphLow = FC_PH_4_3; 2246 2247 if (sp->cmn.fcphHigh < FC_PH3) 2248 sp->cmn.fcphHigh = FC_PH3; 2249 2250 sp->cmn.valid_vendor_ver_level = 0; 2251 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2252 sp->cmn.bbRcvSizeMsb &= 0xF; 2253 2254 /* Check if the destination port supports VMID */ 2255 ndlp->vmid_support = 0; 2256 if (vport->vmid_priority_tagging) 2257 sp->cmn.priority_tagging = 1; 2258 else if (phba->cfg_vmid_app_header && 2259 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2260 sp->cmn.app_hdr_support = 1; 2261 2262 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2263 "Issue PLOGI: did:x%x", 2264 did, 0, 0); 2265 2266 /* If our firmware supports this feature, convey that 2267 * information to the target using the vendor specific field. 2268 */ 2269 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2270 sp->cmn.valid_vendor_ver_level = 1; 2271 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2272 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2273 } 2274 2275 phba->fc_stat.elsXmitPLOGI++; 2276 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2277 2278 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2279 "Issue PLOGI: did:x%x refcnt %d", 2280 did, kref_read(&ndlp->kref), 0); 2281 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2282 if (!elsiocb->ndlp) { 2283 lpfc_els_free_iocb(phba, elsiocb); 2284 return 1; 2285 } 2286 2287 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2288 if (ret) { 2289 lpfc_els_free_iocb(phba, elsiocb); 2290 lpfc_nlp_put(ndlp); 2291 return 1; 2292 } 2293 2294 return 0; 2295 } 2296 2297 /** 2298 * lpfc_cmpl_els_prli - Completion callback function for prli 2299 * @phba: pointer to lpfc hba data structure. 2300 * @cmdiocb: pointer to lpfc command iocb data structure. 2301 * @rspiocb: pointer to lpfc response iocb data structure. 2302 * 2303 * This routine is the completion callback function for a Process Login 2304 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2305 * status. If there is error status reported, PRLI retry shall be attempted 2306 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2307 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2308 * ndlp to mark the PRLI completion. 2309 **/ 2310 static void 2311 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2312 struct lpfc_iocbq *rspiocb) 2313 { 2314 struct lpfc_vport *vport = cmdiocb->vport; 2315 struct lpfc_nodelist *ndlp; 2316 char *mode; 2317 u32 loglevel; 2318 u32 ulp_status; 2319 u32 ulp_word4; 2320 bool release_node = false; 2321 2322 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2323 cmdiocb->rsp_iocb = rspiocb; 2324 2325 ndlp = cmdiocb->ndlp; 2326 2327 ulp_status = get_job_ulpstatus(phba, rspiocb); 2328 ulp_word4 = get_job_word4(phba, rspiocb); 2329 2330 spin_lock_irq(&ndlp->lock); 2331 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2332 2333 /* Driver supports multiple FC4 types. Counters matter. */ 2334 vport->fc_prli_sent--; 2335 ndlp->fc4_prli_sent--; 2336 spin_unlock_irq(&ndlp->lock); 2337 2338 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2339 "PRLI cmpl: status:x%x/x%x did:x%x", 2340 ulp_status, ulp_word4, 2341 ndlp->nlp_DID); 2342 2343 /* PRLI completes to NPort <nlp_DID> */ 2344 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2345 "0103 PRLI completes to NPort x%06x " 2346 "Data: x%x x%x x%x x%x\n", 2347 ndlp->nlp_DID, ulp_status, ulp_word4, 2348 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2349 2350 /* Check to see if link went down during discovery */ 2351 if (lpfc_els_chk_latt(vport)) 2352 goto out; 2353 2354 if (ulp_status) { 2355 /* Check for retry */ 2356 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2357 /* ELS command is being retried */ 2358 goto out; 2359 } 2360 2361 /* If we don't send GFT_ID to Fabric, a PRLI error 2362 * could be expected. 2363 */ 2364 if ((vport->fc_flag & FC_FABRIC) || 2365 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2366 mode = KERN_ERR; 2367 loglevel = LOG_TRACE_EVENT; 2368 } else { 2369 mode = KERN_INFO; 2370 loglevel = LOG_ELS; 2371 } 2372 2373 /* PRLI failed */ 2374 lpfc_printf_vlog(vport, mode, loglevel, 2375 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2376 "data: x%x\n", 2377 ndlp->nlp_DID, ulp_status, 2378 ulp_word4, ndlp->fc4_prli_sent); 2379 2380 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2381 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) 2382 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2383 NLP_EVT_CMPL_PRLI); 2384 2385 /* 2386 * For P2P topology, retain the node so that PLOGI can be 2387 * attempted on it again. 2388 */ 2389 if (vport->fc_flag & FC_PT2PT) 2390 goto out; 2391 2392 /* As long as this node is not registered with the SCSI 2393 * or NVMe transport and no other PRLIs are outstanding, 2394 * it is no longer an active node. Otherwise devloss 2395 * handles the final cleanup. 2396 */ 2397 spin_lock_irq(&ndlp->lock); 2398 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2399 !ndlp->fc4_prli_sent) { 2400 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2401 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2402 release_node = true; 2403 } 2404 spin_unlock_irq(&ndlp->lock); 2405 2406 if (release_node) 2407 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2408 NLP_EVT_DEVICE_RM); 2409 } else { 2410 /* Good status, call state machine. However, if another 2411 * PRLI is outstanding, don't call the state machine 2412 * because final disposition to Mapped or Unmapped is 2413 * completed there. 2414 */ 2415 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2416 NLP_EVT_CMPL_PRLI); 2417 } 2418 2419 out: 2420 lpfc_els_free_iocb(phba, cmdiocb); 2421 lpfc_nlp_put(ndlp); 2422 return; 2423 } 2424 2425 /** 2426 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2427 * @vport: pointer to a host virtual N_Port data structure. 2428 * @ndlp: pointer to a node-list data structure. 2429 * @retry: number of retries to the command IOCB. 2430 * 2431 * This routine issues a Process Login (PRLI) ELS command for the 2432 * @vport. The PRLI service parameters are set up in the payload of the 2433 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2434 * is put to the IOCB completion callback func field before invoking the 2435 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2436 * 2437 * Note that the ndlp reference count will be incremented by 1 for holding the 2438 * ndlp and the reference to ndlp will be stored into the ndlp field of 2439 * the IOCB for the completion callback function to the PRLI ELS command. 2440 * 2441 * Return code 2442 * 0 - successfully issued prli iocb command for @vport 2443 * 1 - failed to issue prli iocb command for @vport 2444 **/ 2445 int 2446 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2447 uint8_t retry) 2448 { 2449 int rc = 0; 2450 struct lpfc_hba *phba = vport->phba; 2451 PRLI *npr; 2452 struct lpfc_nvme_prli *npr_nvme; 2453 struct lpfc_iocbq *elsiocb; 2454 uint8_t *pcmd; 2455 uint16_t cmdsize; 2456 u32 local_nlp_type, elscmd; 2457 2458 /* 2459 * If we are in RSCN mode, the FC4 types supported from a 2460 * previous GFT_ID command may not be accurate. So, if we 2461 * are a NVME Initiator, always look for the possibility of 2462 * the remote NPort beng a NVME Target. 2463 */ 2464 if (phba->sli_rev == LPFC_SLI_REV4 && 2465 vport->fc_flag & FC_RSCN_MODE && 2466 vport->nvmei_support) 2467 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2468 local_nlp_type = ndlp->nlp_fc4_type; 2469 2470 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2471 * fields here before any of them can complete. 2472 */ 2473 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2474 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2475 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2476 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2477 ndlp->nvme_fb_size = 0; 2478 2479 send_next_prli: 2480 if (local_nlp_type & NLP_FC4_FCP) { 2481 /* Payload is 4 + 16 = 20 x14 bytes. */ 2482 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2483 elscmd = ELS_CMD_PRLI; 2484 } else if (local_nlp_type & NLP_FC4_NVME) { 2485 /* Payload is 4 + 20 = 24 x18 bytes. */ 2486 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2487 elscmd = ELS_CMD_NVMEPRLI; 2488 } else { 2489 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2490 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2491 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2492 return 1; 2493 } 2494 2495 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2496 * FC4 type, implicitly LOGO. 2497 */ 2498 if (phba->sli_rev == LPFC_SLI_REV3 && 2499 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2500 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2501 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2502 ndlp->nlp_type); 2503 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2504 return 1; 2505 } 2506 2507 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2508 ndlp->nlp_DID, elscmd); 2509 if (!elsiocb) 2510 return 1; 2511 2512 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2513 2514 /* For PRLI request, remainder of payload is service parameters */ 2515 memset(pcmd, 0, cmdsize); 2516 2517 if (local_nlp_type & NLP_FC4_FCP) { 2518 /* Remainder of payload is FCP PRLI parameter page. 2519 * Note: this data structure is defined as 2520 * BE/LE in the structure definition so no 2521 * byte swap call is made. 2522 */ 2523 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2524 pcmd += sizeof(uint32_t); 2525 npr = (PRLI *)pcmd; 2526 2527 /* 2528 * If our firmware version is 3.20 or later, 2529 * set the following bits for FC-TAPE support. 2530 */ 2531 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2532 npr->ConfmComplAllowed = 1; 2533 npr->Retry = 1; 2534 npr->TaskRetryIdReq = 1; 2535 } 2536 npr->estabImagePair = 1; 2537 npr->readXferRdyDis = 1; 2538 if (vport->cfg_first_burst_size) 2539 npr->writeXferRdyDis = 1; 2540 2541 /* For FCP support */ 2542 npr->prliType = PRLI_FCP_TYPE; 2543 npr->initiatorFunc = 1; 2544 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2545 2546 /* Remove FCP type - processed. */ 2547 local_nlp_type &= ~NLP_FC4_FCP; 2548 } else if (local_nlp_type & NLP_FC4_NVME) { 2549 /* Remainder of payload is NVME PRLI parameter page. 2550 * This data structure is the newer definition that 2551 * uses bf macros so a byte swap is required. 2552 */ 2553 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2554 pcmd += sizeof(uint32_t); 2555 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2556 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2557 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2558 if (phba->nsler) { 2559 bf_set(prli_nsler, npr_nvme, 1); 2560 bf_set(prli_conf, npr_nvme, 1); 2561 } 2562 2563 /* Only initiators request first burst. */ 2564 if ((phba->cfg_nvme_enable_fb) && 2565 !phba->nvmet_support) 2566 bf_set(prli_fba, npr_nvme, 1); 2567 2568 if (phba->nvmet_support) { 2569 bf_set(prli_tgt, npr_nvme, 1); 2570 bf_set(prli_disc, npr_nvme, 1); 2571 } else { 2572 bf_set(prli_init, npr_nvme, 1); 2573 bf_set(prli_conf, npr_nvme, 1); 2574 } 2575 2576 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2577 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2578 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2579 2580 /* Remove NVME type - processed. */ 2581 local_nlp_type &= ~NLP_FC4_NVME; 2582 } 2583 2584 phba->fc_stat.elsXmitPRLI++; 2585 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2586 2587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2588 "Issue PRLI: did:x%x refcnt %d", 2589 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2590 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2591 if (!elsiocb->ndlp) { 2592 lpfc_els_free_iocb(phba, elsiocb); 2593 return 1; 2594 } 2595 2596 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2597 if (rc == IOCB_ERROR) { 2598 lpfc_els_free_iocb(phba, elsiocb); 2599 lpfc_nlp_put(ndlp); 2600 return 1; 2601 } 2602 2603 /* The vport counters are used for lpfc_scan_finished, but 2604 * the ndlp is used to track outstanding PRLIs for different 2605 * FC4 types. 2606 */ 2607 spin_lock_irq(&ndlp->lock); 2608 ndlp->nlp_flag |= NLP_PRLI_SND; 2609 vport->fc_prli_sent++; 2610 ndlp->fc4_prli_sent++; 2611 spin_unlock_irq(&ndlp->lock); 2612 2613 /* The driver supports 2 FC4 types. Make sure 2614 * a PRLI is issued for all types before exiting. 2615 */ 2616 if (phba->sli_rev == LPFC_SLI_REV4 && 2617 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2618 goto send_next_prli; 2619 else 2620 return 0; 2621 } 2622 2623 /** 2624 * lpfc_rscn_disc - Perform rscn discovery for a vport 2625 * @vport: pointer to a host virtual N_Port data structure. 2626 * 2627 * This routine performs Registration State Change Notification (RSCN) 2628 * discovery for a @vport. If the @vport's node port recovery count is not 2629 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2630 * the nodes that need recovery. If none of the PLOGI were needed through 2631 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2632 * invoked to check and handle possible more RSCN came in during the period 2633 * of processing the current ones. 2634 **/ 2635 static void 2636 lpfc_rscn_disc(struct lpfc_vport *vport) 2637 { 2638 lpfc_can_disctmo(vport); 2639 2640 /* RSCN discovery */ 2641 /* go thru NPR nodes and issue ELS PLOGIs */ 2642 if (vport->fc_npr_cnt) 2643 if (lpfc_els_disc_plogi(vport)) 2644 return; 2645 2646 lpfc_end_rscn(vport); 2647 } 2648 2649 /** 2650 * lpfc_adisc_done - Complete the adisc phase of discovery 2651 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2652 * 2653 * This function is called when the final ADISC is completed during discovery. 2654 * This function handles clearing link attention or issuing reg_vpi depending 2655 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2656 * discovery. 2657 * This function is called with no locks held. 2658 **/ 2659 static void 2660 lpfc_adisc_done(struct lpfc_vport *vport) 2661 { 2662 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2663 struct lpfc_hba *phba = vport->phba; 2664 2665 /* 2666 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2667 * and continue discovery. 2668 */ 2669 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2670 !(vport->fc_flag & FC_RSCN_MODE) && 2671 (phba->sli_rev < LPFC_SLI_REV4)) { 2672 2673 /* 2674 * If link is down, clear_la and reg_vpi will be done after 2675 * flogi following a link up event 2676 */ 2677 if (!lpfc_is_link_up(phba)) 2678 return; 2679 2680 /* The ADISCs are complete. Doesn't matter if they 2681 * succeeded or failed because the ADISC completion 2682 * routine guarantees to call the state machine and 2683 * the RPI is either unregistered (failed ADISC response) 2684 * or the RPI is still valid and the node is marked 2685 * mapped for a target. The exchanges should be in the 2686 * correct state. This code is specific to SLI3. 2687 */ 2688 lpfc_issue_clear_la(phba, vport); 2689 lpfc_issue_reg_vpi(phba, vport); 2690 return; 2691 } 2692 /* 2693 * For SLI2, we need to set port_state to READY 2694 * and continue discovery. 2695 */ 2696 if (vport->port_state < LPFC_VPORT_READY) { 2697 /* If we get here, there is nothing to ADISC */ 2698 lpfc_issue_clear_la(phba, vport); 2699 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2700 vport->num_disc_nodes = 0; 2701 /* go thru NPR list, issue ELS PLOGIs */ 2702 if (vport->fc_npr_cnt) 2703 lpfc_els_disc_plogi(vport); 2704 if (!vport->num_disc_nodes) { 2705 spin_lock_irq(shost->host_lock); 2706 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2707 spin_unlock_irq(shost->host_lock); 2708 lpfc_can_disctmo(vport); 2709 lpfc_end_rscn(vport); 2710 } 2711 } 2712 vport->port_state = LPFC_VPORT_READY; 2713 } else 2714 lpfc_rscn_disc(vport); 2715 } 2716 2717 /** 2718 * lpfc_more_adisc - Issue more adisc as needed 2719 * @vport: pointer to a host virtual N_Port data structure. 2720 * 2721 * This routine determines whether there are more ndlps on a @vport 2722 * node list need to have Address Discover (ADISC) issued. If so, it will 2723 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2724 * remaining nodes which need to have ADISC sent. 2725 **/ 2726 void 2727 lpfc_more_adisc(struct lpfc_vport *vport) 2728 { 2729 if (vport->num_disc_nodes) 2730 vport->num_disc_nodes--; 2731 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2732 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2733 "0210 Continue discovery with %d ADISCs to go " 2734 "Data: x%x x%x x%x\n", 2735 vport->num_disc_nodes, vport->fc_adisc_cnt, 2736 vport->fc_flag, vport->port_state); 2737 /* Check to see if there are more ADISCs to be sent */ 2738 if (vport->fc_flag & FC_NLP_MORE) { 2739 lpfc_set_disctmo(vport); 2740 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2741 lpfc_els_disc_adisc(vport); 2742 } 2743 if (!vport->num_disc_nodes) 2744 lpfc_adisc_done(vport); 2745 return; 2746 } 2747 2748 /** 2749 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2750 * @phba: pointer to lpfc hba data structure. 2751 * @cmdiocb: pointer to lpfc command iocb data structure. 2752 * @rspiocb: pointer to lpfc response iocb data structure. 2753 * 2754 * This routine is the completion function for issuing the Address Discover 2755 * (ADISC) command. It first checks to see whether link went down during 2756 * the discovery process. If so, the node will be marked as node port 2757 * recovery for issuing discover IOCB by the link attention handler and 2758 * exit. Otherwise, the response status is checked. If error was reported 2759 * in the response status, the ADISC command shall be retried by invoking 2760 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2761 * the response status, the state machine is invoked to set transition 2762 * with respect to NLP_EVT_CMPL_ADISC event. 2763 **/ 2764 static void 2765 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2766 struct lpfc_iocbq *rspiocb) 2767 { 2768 struct lpfc_vport *vport = cmdiocb->vport; 2769 IOCB_t *irsp; 2770 struct lpfc_nodelist *ndlp; 2771 int disc; 2772 u32 ulp_status, ulp_word4, tmo; 2773 bool release_node = false; 2774 2775 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2776 cmdiocb->rsp_iocb = rspiocb; 2777 2778 ndlp = cmdiocb->ndlp; 2779 2780 ulp_status = get_job_ulpstatus(phba, rspiocb); 2781 ulp_word4 = get_job_word4(phba, rspiocb); 2782 2783 if (phba->sli_rev == LPFC_SLI_REV4) { 2784 tmo = get_wqe_tmo(cmdiocb); 2785 } else { 2786 irsp = &rspiocb->iocb; 2787 tmo = irsp->ulpTimeout; 2788 } 2789 2790 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2791 "ADISC cmpl: status:x%x/x%x did:x%x", 2792 ulp_status, ulp_word4, 2793 ndlp->nlp_DID); 2794 2795 /* Since ndlp can be freed in the disc state machine, note if this node 2796 * is being used during discovery. 2797 */ 2798 spin_lock_irq(&ndlp->lock); 2799 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2800 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2801 spin_unlock_irq(&ndlp->lock); 2802 /* ADISC completes to NPort <nlp_DID> */ 2803 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2804 "0104 ADISC completes to NPort x%x " 2805 "Data: x%x x%x x%x x%x x%x\n", 2806 ndlp->nlp_DID, ulp_status, ulp_word4, 2807 tmo, disc, vport->num_disc_nodes); 2808 /* Check to see if link went down during discovery */ 2809 if (lpfc_els_chk_latt(vport)) { 2810 spin_lock_irq(&ndlp->lock); 2811 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2812 spin_unlock_irq(&ndlp->lock); 2813 goto out; 2814 } 2815 2816 if (ulp_status) { 2817 /* Check for retry */ 2818 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2819 /* ELS command is being retried */ 2820 if (disc) { 2821 spin_lock_irq(&ndlp->lock); 2822 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2823 spin_unlock_irq(&ndlp->lock); 2824 lpfc_set_disctmo(vport); 2825 } 2826 goto out; 2827 } 2828 /* ADISC failed */ 2829 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2830 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2831 ndlp->nlp_DID, ulp_status, 2832 ulp_word4); 2833 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2834 NLP_EVT_CMPL_ADISC); 2835 2836 /* As long as this node is not registered with the SCSI or NVMe 2837 * transport, it is no longer an active node. Otherwise 2838 * devloss handles the final cleanup. 2839 */ 2840 spin_lock_irq(&ndlp->lock); 2841 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2842 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2843 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2844 release_node = true; 2845 } 2846 spin_unlock_irq(&ndlp->lock); 2847 2848 if (release_node) 2849 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2850 NLP_EVT_DEVICE_RM); 2851 } else 2852 /* Good status, call state machine */ 2853 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2854 NLP_EVT_CMPL_ADISC); 2855 2856 /* Check to see if there are more ADISCs to be sent */ 2857 if (disc && vport->num_disc_nodes) 2858 lpfc_more_adisc(vport); 2859 out: 2860 lpfc_els_free_iocb(phba, cmdiocb); 2861 lpfc_nlp_put(ndlp); 2862 return; 2863 } 2864 2865 /** 2866 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2867 * @vport: pointer to a virtual N_Port data structure. 2868 * @ndlp: pointer to a node-list data structure. 2869 * @retry: number of retries to the command IOCB. 2870 * 2871 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2872 * @vport. It prepares the payload of the ADISC ELS command, updates the 2873 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2874 * to issue the ADISC ELS command. 2875 * 2876 * Note that the ndlp reference count will be incremented by 1 for holding the 2877 * ndlp and the reference to ndlp will be stored into the ndlp field of 2878 * the IOCB for the completion callback function to the ADISC ELS command. 2879 * 2880 * Return code 2881 * 0 - successfully issued adisc 2882 * 1 - failed to issue adisc 2883 **/ 2884 int 2885 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2886 uint8_t retry) 2887 { 2888 int rc = 0; 2889 struct lpfc_hba *phba = vport->phba; 2890 ADISC *ap; 2891 struct lpfc_iocbq *elsiocb; 2892 uint8_t *pcmd; 2893 uint16_t cmdsize; 2894 2895 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2896 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2897 ndlp->nlp_DID, ELS_CMD_ADISC); 2898 if (!elsiocb) 2899 return 1; 2900 2901 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2902 2903 /* For ADISC request, remainder of payload is service parameters */ 2904 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2905 pcmd += sizeof(uint32_t); 2906 2907 /* Fill in ADISC payload */ 2908 ap = (ADISC *) pcmd; 2909 ap->hardAL_PA = phba->fc_pref_ALPA; 2910 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2911 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2912 ap->DID = be32_to_cpu(vport->fc_myDID); 2913 2914 phba->fc_stat.elsXmitADISC++; 2915 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2916 spin_lock_irq(&ndlp->lock); 2917 ndlp->nlp_flag |= NLP_ADISC_SND; 2918 spin_unlock_irq(&ndlp->lock); 2919 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2920 if (!elsiocb->ndlp) { 2921 lpfc_els_free_iocb(phba, elsiocb); 2922 goto err; 2923 } 2924 2925 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2926 "Issue ADISC: did:x%x refcnt %d", 2927 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2928 2929 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2930 if (rc == IOCB_ERROR) { 2931 lpfc_els_free_iocb(phba, elsiocb); 2932 lpfc_nlp_put(ndlp); 2933 goto err; 2934 } 2935 2936 return 0; 2937 2938 err: 2939 spin_lock_irq(&ndlp->lock); 2940 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2941 spin_unlock_irq(&ndlp->lock); 2942 return 1; 2943 } 2944 2945 /** 2946 * lpfc_cmpl_els_logo - Completion callback function for logo 2947 * @phba: pointer to lpfc hba data structure. 2948 * @cmdiocb: pointer to lpfc command iocb data structure. 2949 * @rspiocb: pointer to lpfc response iocb data structure. 2950 * 2951 * This routine is the completion function for issuing the ELS Logout (LOGO) 2952 * command. If no error status was reported from the LOGO response, the 2953 * state machine of the associated ndlp shall be invoked for transition with 2954 * respect to NLP_EVT_CMPL_LOGO event. 2955 **/ 2956 static void 2957 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2958 struct lpfc_iocbq *rspiocb) 2959 { 2960 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2961 struct lpfc_vport *vport = ndlp->vport; 2962 IOCB_t *irsp; 2963 unsigned long flags; 2964 uint32_t skip_recovery = 0; 2965 int wake_up_waiter = 0; 2966 u32 ulp_status; 2967 u32 ulp_word4; 2968 u32 tmo; 2969 2970 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2971 cmdiocb->rsp_iocb = rspiocb; 2972 2973 ulp_status = get_job_ulpstatus(phba, rspiocb); 2974 ulp_word4 = get_job_word4(phba, rspiocb); 2975 2976 if (phba->sli_rev == LPFC_SLI_REV4) { 2977 tmo = get_wqe_tmo(cmdiocb); 2978 } else { 2979 irsp = &rspiocb->iocb; 2980 tmo = irsp->ulpTimeout; 2981 } 2982 2983 spin_lock_irq(&ndlp->lock); 2984 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2985 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2986 wake_up_waiter = 1; 2987 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2988 } 2989 spin_unlock_irq(&ndlp->lock); 2990 2991 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2992 "LOGO cmpl: status:x%x/x%x did:x%x", 2993 ulp_status, ulp_word4, 2994 ndlp->nlp_DID); 2995 2996 /* LOGO completes to NPort <nlp_DID> */ 2997 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2998 "0105 LOGO completes to NPort x%x " 2999 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 3000 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 3001 ulp_status, ulp_word4, 3002 tmo, vport->num_disc_nodes); 3003 3004 if (lpfc_els_chk_latt(vport)) { 3005 skip_recovery = 1; 3006 goto out; 3007 } 3008 3009 /* The LOGO will not be retried on failure. A LOGO was 3010 * issued to the remote rport and a ACC or RJT or no Answer are 3011 * all acceptable. Note the failure and move forward with 3012 * discovery. The PLOGI will retry. 3013 */ 3014 if (ulp_status) { 3015 /* LOGO failed */ 3016 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3017 "2756 LOGO failure, No Retry DID:%06X " 3018 "Status:x%x/x%x\n", 3019 ndlp->nlp_DID, ulp_status, 3020 ulp_word4); 3021 3022 if (lpfc_error_lost_link(ulp_status, ulp_word4)) { 3023 skip_recovery = 1; 3024 goto out; 3025 } 3026 } 3027 3028 /* Call state machine. This will unregister the rpi if needed. */ 3029 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3030 3031 /* The driver sets this flag for an NPIV instance that doesn't want to 3032 * log into the remote port. 3033 */ 3034 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3035 spin_lock_irq(&ndlp->lock); 3036 if (phba->sli_rev == LPFC_SLI_REV4) 3037 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3038 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3039 spin_unlock_irq(&ndlp->lock); 3040 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3041 NLP_EVT_DEVICE_RM); 3042 goto out_rsrc_free; 3043 } 3044 3045 out: 3046 /* At this point, the LOGO processing is complete. NOTE: For a 3047 * pt2pt topology, we are assuming the NPortID will only change 3048 * on link up processing. For a LOGO / PLOGI initiated by the 3049 * Initiator, we are assuming the NPortID is not going to change. 3050 */ 3051 3052 if (wake_up_waiter && ndlp->logo_waitq) 3053 wake_up(ndlp->logo_waitq); 3054 /* 3055 * If the node is a target, the handling attempts to recover the port. 3056 * For any other port type, the rpi is unregistered as an implicit 3057 * LOGO. 3058 */ 3059 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3060 skip_recovery == 0) { 3061 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3062 spin_lock_irqsave(&ndlp->lock, flags); 3063 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3064 spin_unlock_irqrestore(&ndlp->lock, flags); 3065 3066 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3067 "3187 LOGO completes to NPort x%x: Start " 3068 "Recovery Data: x%x x%x x%x x%x\n", 3069 ndlp->nlp_DID, ulp_status, 3070 ulp_word4, tmo, 3071 vport->num_disc_nodes); 3072 3073 lpfc_els_free_iocb(phba, cmdiocb); 3074 lpfc_nlp_put(ndlp); 3075 3076 lpfc_disc_start(vport); 3077 return; 3078 } 3079 3080 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3081 * driver sends a LOGO to the rport to cleanup. For fabric and 3082 * initiator ports cleanup the node as long as it the node is not 3083 * register with the transport. 3084 */ 3085 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3086 spin_lock_irq(&ndlp->lock); 3087 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3088 spin_unlock_irq(&ndlp->lock); 3089 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3090 NLP_EVT_DEVICE_RM); 3091 } 3092 out_rsrc_free: 3093 /* Driver is done with the I/O. */ 3094 lpfc_els_free_iocb(phba, cmdiocb); 3095 lpfc_nlp_put(ndlp); 3096 } 3097 3098 /** 3099 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3100 * @vport: pointer to a virtual N_Port data structure. 3101 * @ndlp: pointer to a node-list data structure. 3102 * @retry: number of retries to the command IOCB. 3103 * 3104 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3105 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3106 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3107 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3108 * 3109 * Note that the ndlp reference count will be incremented by 1 for holding the 3110 * ndlp and the reference to ndlp will be stored into the ndlp field of 3111 * the IOCB for the completion callback function to the LOGO ELS command. 3112 * 3113 * Callers of this routine are expected to unregister the RPI first 3114 * 3115 * Return code 3116 * 0 - successfully issued logo 3117 * 1 - failed to issue logo 3118 **/ 3119 int 3120 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3121 uint8_t retry) 3122 { 3123 struct lpfc_hba *phba = vport->phba; 3124 struct lpfc_iocbq *elsiocb; 3125 uint8_t *pcmd; 3126 uint16_t cmdsize; 3127 int rc; 3128 3129 spin_lock_irq(&ndlp->lock); 3130 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3131 spin_unlock_irq(&ndlp->lock); 3132 return 0; 3133 } 3134 spin_unlock_irq(&ndlp->lock); 3135 3136 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3137 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3138 ndlp->nlp_DID, ELS_CMD_LOGO); 3139 if (!elsiocb) 3140 return 1; 3141 3142 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3143 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3144 pcmd += sizeof(uint32_t); 3145 3146 /* Fill in LOGO payload */ 3147 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3148 pcmd += sizeof(uint32_t); 3149 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3150 3151 phba->fc_stat.elsXmitLOGO++; 3152 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3153 spin_lock_irq(&ndlp->lock); 3154 ndlp->nlp_flag |= NLP_LOGO_SND; 3155 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3156 spin_unlock_irq(&ndlp->lock); 3157 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3158 if (!elsiocb->ndlp) { 3159 lpfc_els_free_iocb(phba, elsiocb); 3160 goto err; 3161 } 3162 3163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3164 "Issue LOGO: did:x%x refcnt %d", 3165 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3166 3167 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3168 if (rc == IOCB_ERROR) { 3169 lpfc_els_free_iocb(phba, elsiocb); 3170 lpfc_nlp_put(ndlp); 3171 goto err; 3172 } 3173 3174 spin_lock_irq(&ndlp->lock); 3175 ndlp->nlp_prev_state = ndlp->nlp_state; 3176 spin_unlock_irq(&ndlp->lock); 3177 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3178 return 0; 3179 3180 err: 3181 spin_lock_irq(&ndlp->lock); 3182 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3183 spin_unlock_irq(&ndlp->lock); 3184 return 1; 3185 } 3186 3187 /** 3188 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3189 * @phba: pointer to lpfc hba data structure. 3190 * @cmdiocb: pointer to lpfc command iocb data structure. 3191 * @rspiocb: pointer to lpfc response iocb data structure. 3192 * 3193 * This routine is a generic completion callback function for ELS commands. 3194 * Specifically, it is the callback function which does not need to perform 3195 * any command specific operations. It is currently used by the ELS command 3196 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3197 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3198 * Other than certain debug loggings, this callback function simply invokes the 3199 * lpfc_els_chk_latt() routine to check whether link went down during the 3200 * discovery process. 3201 **/ 3202 static void 3203 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3204 struct lpfc_iocbq *rspiocb) 3205 { 3206 struct lpfc_vport *vport = cmdiocb->vport; 3207 struct lpfc_nodelist *free_ndlp; 3208 IOCB_t *irsp; 3209 u32 ulp_status, ulp_word4, tmo, did, iotag; 3210 3211 ulp_status = get_job_ulpstatus(phba, rspiocb); 3212 ulp_word4 = get_job_word4(phba, rspiocb); 3213 did = get_job_els_rsp64_did(phba, cmdiocb); 3214 3215 if (phba->sli_rev == LPFC_SLI_REV4) { 3216 tmo = get_wqe_tmo(cmdiocb); 3217 iotag = get_wqe_reqtag(cmdiocb); 3218 } else { 3219 irsp = &rspiocb->iocb; 3220 tmo = irsp->ulpTimeout; 3221 iotag = irsp->ulpIoTag; 3222 } 3223 3224 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3225 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3226 ulp_status, ulp_word4, did); 3227 3228 /* ELS cmd tag <ulpIoTag> completes */ 3229 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3230 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3231 iotag, ulp_status, ulp_word4, tmo); 3232 3233 /* Check to see if link went down during discovery */ 3234 lpfc_els_chk_latt(vport); 3235 3236 free_ndlp = cmdiocb->ndlp; 3237 3238 lpfc_els_free_iocb(phba, cmdiocb); 3239 lpfc_nlp_put(free_ndlp); 3240 } 3241 3242 /** 3243 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3244 * @vport: pointer to lpfc_vport data structure. 3245 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3246 * 3247 * This routine registers the rpi assigned to the fabric controller 3248 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3249 * state triggering a registration with the SCSI transport. 3250 * 3251 * This routine is single out because the fabric controller node 3252 * does not receive a PLOGI. This routine is consumed by the 3253 * SCR and RDF ELS commands. Callers are expected to qualify 3254 * with SLI4 first. 3255 **/ 3256 static int 3257 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3258 { 3259 int rc = 0; 3260 struct lpfc_hba *phba = vport->phba; 3261 struct lpfc_nodelist *ns_ndlp; 3262 LPFC_MBOXQ_t *mbox; 3263 3264 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3265 return rc; 3266 3267 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3268 if (!ns_ndlp) 3269 return -ENODEV; 3270 3271 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3272 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3273 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3274 ns_ndlp->nlp_state); 3275 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3276 return -ENODEV; 3277 3278 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3279 if (!mbox) { 3280 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3281 "0936 %s: no memory for reg_login " 3282 "Data: x%x x%x x%x x%x\n", __func__, 3283 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3284 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3285 return -ENOMEM; 3286 } 3287 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3288 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3289 if (rc) { 3290 rc = -EACCES; 3291 goto out; 3292 } 3293 3294 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3295 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3296 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3297 if (!mbox->ctx_ndlp) { 3298 rc = -ENOMEM; 3299 goto out; 3300 } 3301 3302 mbox->vport = vport; 3303 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3304 if (rc == MBX_NOT_FINISHED) { 3305 rc = -ENODEV; 3306 lpfc_nlp_put(fc_ndlp); 3307 goto out; 3308 } 3309 /* Success path. Exit. */ 3310 lpfc_nlp_set_state(vport, fc_ndlp, 3311 NLP_STE_REG_LOGIN_ISSUE); 3312 return 0; 3313 3314 out: 3315 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3316 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3317 "0938 %s: failed to format reg_login " 3318 "Data: x%x x%x x%x x%x\n", __func__, 3319 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3320 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3321 return rc; 3322 } 3323 3324 /** 3325 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3326 * @phba: pointer to lpfc hba data structure. 3327 * @cmdiocb: pointer to lpfc command iocb data structure. 3328 * @rspiocb: pointer to lpfc response iocb data structure. 3329 * 3330 * This routine is a generic completion callback function for Discovery ELS cmd. 3331 * Currently used by the ELS command issuing routines for the ELS State Change 3332 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3333 * These commands will be retried once only for ELS timeout errors. 3334 **/ 3335 static void 3336 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3337 struct lpfc_iocbq *rspiocb) 3338 { 3339 struct lpfc_vport *vport = cmdiocb->vport; 3340 IOCB_t *irsp; 3341 struct lpfc_els_rdf_rsp *prdf; 3342 struct lpfc_dmabuf *pcmd, *prsp; 3343 u32 *pdata; 3344 u32 cmd; 3345 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3346 u32 ulp_status, ulp_word4, tmo, did, iotag; 3347 3348 ulp_status = get_job_ulpstatus(phba, rspiocb); 3349 ulp_word4 = get_job_word4(phba, rspiocb); 3350 did = get_job_els_rsp64_did(phba, cmdiocb); 3351 3352 if (phba->sli_rev == LPFC_SLI_REV4) { 3353 tmo = get_wqe_tmo(cmdiocb); 3354 iotag = get_wqe_reqtag(cmdiocb); 3355 } else { 3356 irsp = &rspiocb->iocb; 3357 tmo = irsp->ulpTimeout; 3358 iotag = irsp->ulpIoTag; 3359 } 3360 3361 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3362 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3363 ulp_status, ulp_word4, did); 3364 3365 /* ELS cmd tag <ulpIoTag> completes */ 3366 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3367 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3368 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3369 3370 pcmd = cmdiocb->cmd_dmabuf; 3371 if (!pcmd) 3372 goto out; 3373 3374 pdata = (u32 *)pcmd->virt; 3375 if (!pdata) 3376 goto out; 3377 cmd = *pdata; 3378 3379 /* Only 1 retry for ELS Timeout only */ 3380 if (ulp_status == IOSTAT_LOCAL_REJECT && 3381 ((ulp_word4 & IOERR_PARAM_MASK) == 3382 IOERR_SEQUENCE_TIMEOUT)) { 3383 cmdiocb->retry++; 3384 if (cmdiocb->retry <= 1) { 3385 switch (cmd) { 3386 case ELS_CMD_SCR: 3387 lpfc_issue_els_scr(vport, cmdiocb->retry); 3388 break; 3389 case ELS_CMD_EDC: 3390 lpfc_issue_els_edc(vport, cmdiocb->retry); 3391 break; 3392 case ELS_CMD_RDF: 3393 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3394 break; 3395 } 3396 goto out; 3397 } 3398 phba->fc_stat.elsRetryExceeded++; 3399 } 3400 if (cmd == ELS_CMD_EDC) { 3401 /* must be called before checking uplStatus and returning */ 3402 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3403 return; 3404 } 3405 if (ulp_status) { 3406 /* ELS discovery cmd completes with error */ 3407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3408 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3409 ulp_status, ulp_word4); 3410 goto out; 3411 } 3412 3413 /* The RDF response doesn't have any impact on the running driver 3414 * but the notification descriptors are dumped here for support. 3415 */ 3416 if (cmd == ELS_CMD_RDF) { 3417 int i; 3418 3419 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3420 if (!prsp) 3421 goto out; 3422 3423 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3424 if (!prdf) 3425 goto out; 3426 3427 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3428 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3429 lpfc_printf_vlog(vport, KERN_INFO, 3430 LOG_ELS | LOG_CGN_MGMT, 3431 "4677 Fabric RDF Notification Grant " 3432 "Data: 0x%08x Reg: %x %x\n", 3433 be32_to_cpu( 3434 prdf->reg_d1.desc_tags[i]), 3435 phba->cgn_reg_signal, 3436 phba->cgn_reg_fpin); 3437 } 3438 3439 out: 3440 /* Check to see if link went down during discovery */ 3441 lpfc_els_chk_latt(vport); 3442 lpfc_els_free_iocb(phba, cmdiocb); 3443 lpfc_nlp_put(ndlp); 3444 return; 3445 } 3446 3447 /** 3448 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3449 * @vport: pointer to a host virtual N_Port data structure. 3450 * @retry: retry counter for the command IOCB. 3451 * 3452 * This routine issues a State Change Request (SCR) to a fabric node 3453 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3454 * first search the @vport node list to find the matching ndlp. If no such 3455 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3456 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3457 * routine is invoked to send the SCR IOCB. 3458 * 3459 * Note that the ndlp reference count will be incremented by 1 for holding the 3460 * ndlp and the reference to ndlp will be stored into the ndlp field of 3461 * the IOCB for the completion callback function to the SCR ELS command. 3462 * 3463 * Return code 3464 * 0 - Successfully issued scr command 3465 * 1 - Failed to issue scr command 3466 **/ 3467 int 3468 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3469 { 3470 int rc = 0; 3471 struct lpfc_hba *phba = vport->phba; 3472 struct lpfc_iocbq *elsiocb; 3473 uint8_t *pcmd; 3474 uint16_t cmdsize; 3475 struct lpfc_nodelist *ndlp; 3476 3477 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3478 3479 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3480 if (!ndlp) { 3481 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3482 if (!ndlp) 3483 return 1; 3484 lpfc_enqueue_node(vport, ndlp); 3485 } 3486 3487 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3488 ndlp->nlp_DID, ELS_CMD_SCR); 3489 if (!elsiocb) 3490 return 1; 3491 3492 if (phba->sli_rev == LPFC_SLI_REV4) { 3493 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3494 if (rc) { 3495 lpfc_els_free_iocb(phba, elsiocb); 3496 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3497 "0937 %s: Failed to reg fc node, rc %d\n", 3498 __func__, rc); 3499 return 1; 3500 } 3501 } 3502 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3503 3504 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3505 pcmd += sizeof(uint32_t); 3506 3507 /* For SCR, remainder of payload is SCR parameter page */ 3508 memset(pcmd, 0, sizeof(SCR)); 3509 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3510 3511 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3512 "Issue SCR: did:x%x", 3513 ndlp->nlp_DID, 0, 0); 3514 3515 phba->fc_stat.elsXmitSCR++; 3516 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3517 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3518 if (!elsiocb->ndlp) { 3519 lpfc_els_free_iocb(phba, elsiocb); 3520 return 1; 3521 } 3522 3523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3524 "Issue SCR: did:x%x refcnt %d", 3525 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3526 3527 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3528 if (rc == IOCB_ERROR) { 3529 lpfc_els_free_iocb(phba, elsiocb); 3530 lpfc_nlp_put(ndlp); 3531 return 1; 3532 } 3533 3534 return 0; 3535 } 3536 3537 /** 3538 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3539 * or the other nport (pt2pt). 3540 * @vport: pointer to a host virtual N_Port data structure. 3541 * @retry: number of retries to the command IOCB. 3542 * 3543 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3544 * when connected to a fabric, or to the remote port when connected 3545 * in point-to-point mode. When sent to the Fabric Controller, it will 3546 * replay the RSCN to registered recipients. 3547 * 3548 * Note that the ndlp reference count will be incremented by 1 for holding the 3549 * ndlp and the reference to ndlp will be stored into the ndlp field of 3550 * the IOCB for the completion callback function to the RSCN ELS command. 3551 * 3552 * Return code 3553 * 0 - Successfully issued RSCN command 3554 * 1 - Failed to issue RSCN command 3555 **/ 3556 int 3557 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3558 { 3559 int rc = 0; 3560 struct lpfc_hba *phba = vport->phba; 3561 struct lpfc_iocbq *elsiocb; 3562 struct lpfc_nodelist *ndlp; 3563 struct { 3564 struct fc_els_rscn rscn; 3565 struct fc_els_rscn_page portid; 3566 } *event; 3567 uint32_t nportid; 3568 uint16_t cmdsize = sizeof(*event); 3569 3570 /* Not supported for private loop */ 3571 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3572 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3573 return 1; 3574 3575 if (vport->fc_flag & FC_PT2PT) { 3576 /* find any mapped nport - that would be the other nport */ 3577 ndlp = lpfc_findnode_mapped(vport); 3578 if (!ndlp) 3579 return 1; 3580 } else { 3581 nportid = FC_FID_FCTRL; 3582 /* find the fabric controller node */ 3583 ndlp = lpfc_findnode_did(vport, nportid); 3584 if (!ndlp) { 3585 /* if one didn't exist, make one */ 3586 ndlp = lpfc_nlp_init(vport, nportid); 3587 if (!ndlp) 3588 return 1; 3589 lpfc_enqueue_node(vport, ndlp); 3590 } 3591 } 3592 3593 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3594 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3595 3596 if (!elsiocb) 3597 return 1; 3598 3599 event = elsiocb->cmd_dmabuf->virt; 3600 3601 event->rscn.rscn_cmd = ELS_RSCN; 3602 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3603 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3604 3605 nportid = vport->fc_myDID; 3606 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3607 event->portid.rscn_page_flags = 0; 3608 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3609 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3610 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3611 3612 phba->fc_stat.elsXmitRSCN++; 3613 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3614 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3615 if (!elsiocb->ndlp) { 3616 lpfc_els_free_iocb(phba, elsiocb); 3617 return 1; 3618 } 3619 3620 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3621 "Issue RSCN: did:x%x", 3622 ndlp->nlp_DID, 0, 0); 3623 3624 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3625 if (rc == IOCB_ERROR) { 3626 lpfc_els_free_iocb(phba, elsiocb); 3627 lpfc_nlp_put(ndlp); 3628 return 1; 3629 } 3630 3631 return 0; 3632 } 3633 3634 /** 3635 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3636 * @vport: pointer to a host virtual N_Port data structure. 3637 * @nportid: N_Port identifier to the remote node. 3638 * @retry: number of retries to the command IOCB. 3639 * 3640 * This routine issues a Fibre Channel Address Resolution Response 3641 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3642 * is passed into the function. It first search the @vport node list to find 3643 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3644 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3645 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3646 * 3647 * Note that the ndlp reference count will be incremented by 1 for holding the 3648 * ndlp and the reference to ndlp will be stored into the ndlp field of 3649 * the IOCB for the completion callback function to the FARPR ELS command. 3650 * 3651 * Return code 3652 * 0 - Successfully issued farpr command 3653 * 1 - Failed to issue farpr command 3654 **/ 3655 static int 3656 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3657 { 3658 int rc = 0; 3659 struct lpfc_hba *phba = vport->phba; 3660 struct lpfc_iocbq *elsiocb; 3661 FARP *fp; 3662 uint8_t *pcmd; 3663 uint32_t *lp; 3664 uint16_t cmdsize; 3665 struct lpfc_nodelist *ondlp; 3666 struct lpfc_nodelist *ndlp; 3667 3668 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3669 3670 ndlp = lpfc_findnode_did(vport, nportid); 3671 if (!ndlp) { 3672 ndlp = lpfc_nlp_init(vport, nportid); 3673 if (!ndlp) 3674 return 1; 3675 lpfc_enqueue_node(vport, ndlp); 3676 } 3677 3678 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3679 ndlp->nlp_DID, ELS_CMD_FARPR); 3680 if (!elsiocb) 3681 return 1; 3682 3683 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3684 3685 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3686 pcmd += sizeof(uint32_t); 3687 3688 /* Fill in FARPR payload */ 3689 fp = (FARP *) (pcmd); 3690 memset(fp, 0, sizeof(FARP)); 3691 lp = (uint32_t *) pcmd; 3692 *lp++ = be32_to_cpu(nportid); 3693 *lp++ = be32_to_cpu(vport->fc_myDID); 3694 fp->Rflags = 0; 3695 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3696 3697 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3698 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3699 ondlp = lpfc_findnode_did(vport, nportid); 3700 if (ondlp) { 3701 memcpy(&fp->OportName, &ondlp->nlp_portname, 3702 sizeof(struct lpfc_name)); 3703 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3704 sizeof(struct lpfc_name)); 3705 } 3706 3707 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3708 "Issue FARPR: did:x%x", 3709 ndlp->nlp_DID, 0, 0); 3710 3711 phba->fc_stat.elsXmitFARPR++; 3712 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3713 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3714 if (!elsiocb->ndlp) { 3715 lpfc_els_free_iocb(phba, elsiocb); 3716 return 1; 3717 } 3718 3719 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3720 if (rc == IOCB_ERROR) { 3721 /* The additional lpfc_nlp_put will cause the following 3722 * lpfc_els_free_iocb routine to trigger the release of 3723 * the node. 3724 */ 3725 lpfc_els_free_iocb(phba, elsiocb); 3726 lpfc_nlp_put(ndlp); 3727 return 1; 3728 } 3729 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3730 * trigger the release of the node. 3731 */ 3732 /* Don't release reference count as RDF is likely outstanding */ 3733 return 0; 3734 } 3735 3736 /** 3737 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3738 * @vport: pointer to a host virtual N_Port data structure. 3739 * @retry: retry counter for the command IOCB. 3740 * 3741 * This routine issues an ELS RDF to the Fabric Controller to register 3742 * for diagnostic functions. 3743 * 3744 * Note that the ndlp reference count will be incremented by 1 for holding the 3745 * ndlp and the reference to ndlp will be stored into the ndlp field of 3746 * the IOCB for the completion callback function to the RDF ELS command. 3747 * 3748 * Return code 3749 * 0 - Successfully issued rdf command 3750 * 1 - Failed to issue rdf command 3751 **/ 3752 int 3753 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3754 { 3755 struct lpfc_hba *phba = vport->phba; 3756 struct lpfc_iocbq *elsiocb; 3757 struct lpfc_els_rdf_req *prdf; 3758 struct lpfc_nodelist *ndlp; 3759 uint16_t cmdsize; 3760 int rc; 3761 3762 cmdsize = sizeof(*prdf); 3763 3764 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3765 if (!ndlp) { 3766 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3767 if (!ndlp) 3768 return -ENODEV; 3769 lpfc_enqueue_node(vport, ndlp); 3770 } 3771 3772 /* RDF ELS is not required on an NPIV VN_Port. */ 3773 if (vport->port_type == LPFC_NPIV_PORT) 3774 return -EACCES; 3775 3776 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3777 ndlp->nlp_DID, ELS_CMD_RDF); 3778 if (!elsiocb) 3779 return -ENOMEM; 3780 3781 /* Configure the payload for the supported FPIN events. */ 3782 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3783 memset(prdf, 0, cmdsize); 3784 prdf->rdf.fpin_cmd = ELS_RDF; 3785 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3786 sizeof(struct fc_els_rdf)); 3787 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3788 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3789 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3790 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3791 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3792 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3793 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3794 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3795 3796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3797 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3798 ndlp->nlp_DID, phba->cgn_reg_signal, 3799 phba->cgn_reg_fpin); 3800 3801 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3802 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3803 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3804 if (!elsiocb->ndlp) { 3805 lpfc_els_free_iocb(phba, elsiocb); 3806 return -EIO; 3807 } 3808 3809 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3810 "Issue RDF: did:x%x refcnt %d", 3811 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3812 3813 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3814 if (rc == IOCB_ERROR) { 3815 lpfc_els_free_iocb(phba, elsiocb); 3816 lpfc_nlp_put(ndlp); 3817 return -EIO; 3818 } 3819 return 0; 3820 } 3821 3822 /** 3823 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3824 * @vport: pointer to a host virtual N_Port data structure. 3825 * @cmdiocb: pointer to lpfc command iocb data structure. 3826 * @ndlp: pointer to a node-list data structure. 3827 * 3828 * A received RDF implies a possible change to fabric supported diagnostic 3829 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3830 * RDF request to reregister for supported diagnostic functions. 3831 * 3832 * Return code 3833 * 0 - Success 3834 * -EIO - Failed to process received RDF 3835 **/ 3836 static int 3837 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3838 struct lpfc_nodelist *ndlp) 3839 { 3840 /* Send LS_ACC */ 3841 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3842 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3843 "1623 Failed to RDF_ACC from x%x for x%x\n", 3844 ndlp->nlp_DID, vport->fc_myDID); 3845 return -EIO; 3846 } 3847 3848 /* Issue new RDF for reregistering */ 3849 if (lpfc_issue_els_rdf(vport, 0)) { 3850 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3851 "2623 Failed to re register RDF for x%x\n", 3852 vport->fc_myDID); 3853 return -EIO; 3854 } 3855 3856 return 0; 3857 } 3858 3859 /** 3860 * lpfc_least_capable_settings - helper function for EDC rsp processing 3861 * @phba: pointer to lpfc hba data structure. 3862 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3863 * 3864 * This helper routine determines the least capable setting for 3865 * congestion signals, signal freq, including scale, from the 3866 * congestion detection descriptor in the EDC rsp. The routine 3867 * sets @phba values in preparation for a set_featues mailbox. 3868 **/ 3869 static void 3870 lpfc_least_capable_settings(struct lpfc_hba *phba, 3871 struct fc_diag_cg_sig_desc *pcgd) 3872 { 3873 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3874 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3875 3876 /* Get rsp signal and frequency capabilities. */ 3877 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3878 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3879 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3880 3881 /* If the Fport does not support signals. Set FPIN only */ 3882 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3883 goto out_no_support; 3884 3885 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3886 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3887 * to milliSeconds. 3888 */ 3889 switch (rsp_sig_freq_scale) { 3890 case EDC_CG_SIGFREQ_SEC: 3891 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3892 break; 3893 case EDC_CG_SIGFREQ_MSEC: 3894 rsp_sig_freq_cyc = 1; 3895 break; 3896 default: 3897 goto out_no_support; 3898 } 3899 3900 /* Convenient shorthand. */ 3901 drv_sig_cap = phba->cgn_reg_signal; 3902 3903 /* Choose the least capable frequency. */ 3904 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3905 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3906 3907 /* Should be some common signals support. Settle on least capable 3908 * signal and adjust FPIN values. Initialize defaults to ease the 3909 * decision. 3910 */ 3911 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3912 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3913 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3914 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3915 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3916 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3917 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3918 } 3919 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3920 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3921 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3922 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3923 } 3924 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3925 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3926 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3927 } 3928 } 3929 3930 /* We are NOT recording signal frequency in congestion info buffer */ 3931 return; 3932 3933 out_no_support: 3934 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3935 phba->cgn_sig_freq = 0; 3936 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3937 } 3938 3939 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3940 FC_LS_TLV_DTAG_INIT); 3941 3942 /** 3943 * lpfc_cmpl_els_edc - Completion callback function for EDC 3944 * @phba: pointer to lpfc hba data structure. 3945 * @cmdiocb: pointer to lpfc command iocb data structure. 3946 * @rspiocb: pointer to lpfc response iocb data structure. 3947 * 3948 * This routine is the completion callback function for issuing the Exchange 3949 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3950 * notify the FPort of its Congestion and Link Fault capabilities. This 3951 * routine parses the FPort's response and decides on the least common 3952 * values applicable to both FPort and NPort for Warnings and Alarms that 3953 * are communicated via hardware signals. 3954 **/ 3955 static void 3956 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3957 struct lpfc_iocbq *rspiocb) 3958 { 3959 IOCB_t *irsp_iocb; 3960 struct fc_els_edc_resp *edc_rsp; 3961 struct fc_tlv_desc *tlv; 3962 struct fc_diag_cg_sig_desc *pcgd; 3963 struct fc_diag_lnkflt_desc *plnkflt; 3964 struct lpfc_dmabuf *pcmd, *prsp; 3965 const char *dtag_nm; 3966 u32 *pdata, dtag; 3967 int desc_cnt = 0, bytes_remain; 3968 bool rcv_cap_desc = false; 3969 struct lpfc_nodelist *ndlp; 3970 u32 ulp_status, ulp_word4, tmo, did, iotag; 3971 3972 ndlp = cmdiocb->ndlp; 3973 3974 ulp_status = get_job_ulpstatus(phba, rspiocb); 3975 ulp_word4 = get_job_word4(phba, rspiocb); 3976 did = get_job_els_rsp64_did(phba, rspiocb); 3977 3978 if (phba->sli_rev == LPFC_SLI_REV4) { 3979 tmo = get_wqe_tmo(rspiocb); 3980 iotag = get_wqe_reqtag(rspiocb); 3981 } else { 3982 irsp_iocb = &rspiocb->iocb; 3983 tmo = irsp_iocb->ulpTimeout; 3984 iotag = irsp_iocb->ulpIoTag; 3985 } 3986 3987 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3988 "EDC cmpl: status:x%x/x%x did:x%x", 3989 ulp_status, ulp_word4, did); 3990 3991 /* ELS cmd tag <ulpIoTag> completes */ 3992 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3993 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3994 iotag, ulp_status, ulp_word4, tmo); 3995 3996 pcmd = cmdiocb->cmd_dmabuf; 3997 if (!pcmd) 3998 goto out; 3999 4000 pdata = (u32 *)pcmd->virt; 4001 if (!pdata) 4002 goto out; 4003 4004 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4005 if (ulp_status) 4006 goto out; 4007 4008 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4009 if (!prsp) 4010 goto out; 4011 4012 edc_rsp = prsp->virt; 4013 if (!edc_rsp) 4014 goto out; 4015 4016 /* ELS cmd tag <ulpIoTag> completes */ 4017 lpfc_printf_log(phba, KERN_INFO, 4018 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4019 "4676 Fabric EDC Rsp: " 4020 "0x%02x, 0x%08x\n", 4021 edc_rsp->acc_hdr.la_cmd, 4022 be32_to_cpu(edc_rsp->desc_list_len)); 4023 4024 /* 4025 * Payload length in bytes is the response descriptor list 4026 * length minus the 12 bytes of Link Service Request 4027 * Information descriptor in the reply. 4028 */ 4029 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4030 sizeof(struct fc_els_lsri_desc); 4031 if (bytes_remain <= 0) 4032 goto out; 4033 4034 tlv = edc_rsp->desc; 4035 4036 /* 4037 * cycle through EDC diagnostic descriptors to find the 4038 * congestion signaling capability descriptor 4039 */ 4040 while (bytes_remain) { 4041 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4042 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4043 "6461 Truncated TLV hdr on " 4044 "Diagnostic descriptor[%d]\n", 4045 desc_cnt); 4046 goto out; 4047 } 4048 4049 dtag = be32_to_cpu(tlv->desc_tag); 4050 switch (dtag) { 4051 case ELS_DTAG_LNK_FAULT_CAP: 4052 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4053 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4054 sizeof(struct fc_diag_lnkflt_desc)) { 4055 lpfc_printf_log(phba, KERN_WARNING, 4056 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4057 "6462 Truncated Link Fault Diagnostic " 4058 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4059 desc_cnt, bytes_remain, 4060 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4061 sizeof(struct fc_diag_lnkflt_desc)); 4062 goto out; 4063 } 4064 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4065 lpfc_printf_log(phba, KERN_INFO, 4066 LOG_ELS | LOG_LDS_EVENT, 4067 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4068 "0x%08x 0x%08x 0x%08x\n", 4069 be32_to_cpu(plnkflt->desc_tag), 4070 be32_to_cpu(plnkflt->desc_len), 4071 be32_to_cpu( 4072 plnkflt->degrade_activate_threshold), 4073 be32_to_cpu( 4074 plnkflt->degrade_deactivate_threshold), 4075 be32_to_cpu(plnkflt->fec_degrade_interval)); 4076 break; 4077 case ELS_DTAG_CG_SIGNAL_CAP: 4078 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4079 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4080 sizeof(struct fc_diag_cg_sig_desc)) { 4081 lpfc_printf_log( 4082 phba, KERN_WARNING, LOG_CGN_MGMT, 4083 "6463 Truncated Cgn Signal Diagnostic " 4084 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4085 desc_cnt, bytes_remain, 4086 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4087 sizeof(struct fc_diag_cg_sig_desc)); 4088 goto out; 4089 } 4090 4091 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4092 lpfc_printf_log( 4093 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4094 "4616 CGN Desc Data: 0x%08x 0x%08x " 4095 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4096 be32_to_cpu(pcgd->desc_tag), 4097 be32_to_cpu(pcgd->desc_len), 4098 be32_to_cpu(pcgd->xmt_signal_capability), 4099 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4100 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4101 be32_to_cpu(pcgd->rcv_signal_capability), 4102 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4103 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4104 4105 /* Compare driver and Fport capabilities and choose 4106 * least common. 4107 */ 4108 lpfc_least_capable_settings(phba, pcgd); 4109 rcv_cap_desc = true; 4110 break; 4111 default: 4112 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4113 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4114 "4919 unknown Diagnostic " 4115 "Descriptor[%d]: tag x%x (%s)\n", 4116 desc_cnt, dtag, dtag_nm); 4117 } 4118 4119 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4120 tlv = fc_tlv_next_desc(tlv); 4121 desc_cnt++; 4122 } 4123 4124 out: 4125 if (!rcv_cap_desc) { 4126 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4127 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4128 phba->cgn_sig_freq = 0; 4129 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4130 "4202 EDC rsp error - sending RDF " 4131 "for FPIN only.\n"); 4132 } 4133 4134 lpfc_config_cgn_signal(phba); 4135 4136 /* Check to see if link went down during discovery */ 4137 lpfc_els_chk_latt(phba->pport); 4138 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4139 "EDC Cmpl: did:x%x refcnt %d", 4140 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4141 lpfc_els_free_iocb(phba, cmdiocb); 4142 lpfc_nlp_put(ndlp); 4143 } 4144 4145 static void 4146 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4147 { 4148 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4149 4150 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4151 lft->desc_len = cpu_to_be32( 4152 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4153 4154 lft->degrade_activate_threshold = 4155 cpu_to_be32(phba->degrade_activate_threshold); 4156 lft->degrade_deactivate_threshold = 4157 cpu_to_be32(phba->degrade_deactivate_threshold); 4158 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4159 } 4160 4161 static void 4162 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4163 { 4164 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4165 4166 /* We are assuming cgd was zero'ed before calling this routine */ 4167 4168 /* Configure the congestion detection capability */ 4169 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4170 4171 /* Descriptor len doesn't include the tag or len fields. */ 4172 cgd->desc_len = cpu_to_be32( 4173 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4174 4175 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4176 * xmt_signal_frequency.count already set to 0. 4177 * xmt_signal_frequency.units already set to 0. 4178 */ 4179 4180 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4181 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4182 * rcv_signal_frequency.count already set to 0. 4183 * rcv_signal_frequency.units already set to 0. 4184 */ 4185 phba->cgn_sig_freq = 0; 4186 return; 4187 } 4188 switch (phba->cgn_reg_signal) { 4189 case EDC_CG_SIG_WARN_ONLY: 4190 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4191 break; 4192 case EDC_CG_SIG_WARN_ALARM: 4193 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4194 break; 4195 default: 4196 /* rcv_signal_capability left 0 thus no support */ 4197 break; 4198 } 4199 4200 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4201 * the completion we settle on the higher frequency. 4202 */ 4203 cgd->rcv_signal_frequency.count = 4204 cpu_to_be16(lpfc_fabric_cgn_frequency); 4205 cgd->rcv_signal_frequency.units = 4206 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4207 } 4208 4209 static bool 4210 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4211 { 4212 if (!(phba->lmt & LMT_64Gb)) 4213 return false; 4214 if (phba->sli_rev != LPFC_SLI_REV4) 4215 return false; 4216 4217 if (phba->sli4_hba.conf_trunk) { 4218 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4219 return true; 4220 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4221 return true; 4222 } 4223 return false; 4224 } 4225 4226 /** 4227 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4228 * @vport: pointer to a host virtual N_Port data structure. 4229 * @retry: retry counter for the command iocb. 4230 * 4231 * This routine issues an ELS EDC to the F-Port Controller to communicate 4232 * this N_Port's support of hardware signals in its Congestion 4233 * Capabilities Descriptor. 4234 * 4235 * Note: This routine does not check if one or more signals are 4236 * set in the cgn_reg_signal parameter. The caller makes the 4237 * decision to enforce cgn_reg_signal as nonzero or zero depending 4238 * on the conditions. During Fabric requests, the driver 4239 * requires cgn_reg_signals to be nonzero. But a dynamic request 4240 * to set the congestion mode to OFF from Monitor or Manage 4241 * would correctly issue an EDC with no signals enabled to 4242 * turn off switch functionality and then update the FW. 4243 * 4244 * Return code 4245 * 0 - Successfully issued edc command 4246 * 1 - Failed to issue edc command 4247 **/ 4248 int 4249 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4250 { 4251 struct lpfc_hba *phba = vport->phba; 4252 struct lpfc_iocbq *elsiocb; 4253 struct fc_els_edc *edc_req; 4254 struct fc_tlv_desc *tlv; 4255 u16 cmdsize; 4256 struct lpfc_nodelist *ndlp; 4257 u8 *pcmd = NULL; 4258 u32 cgn_desc_size, lft_desc_size; 4259 int rc; 4260 4261 if (vport->port_type == LPFC_NPIV_PORT) 4262 return -EACCES; 4263 4264 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4265 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4266 return -ENODEV; 4267 4268 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4269 sizeof(struct fc_diag_cg_sig_desc) : 0; 4270 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4271 sizeof(struct fc_diag_lnkflt_desc) : 0; 4272 cmdsize = cgn_desc_size + lft_desc_size; 4273 4274 /* Skip EDC if no applicable descriptors */ 4275 if (!cmdsize) 4276 goto try_rdf; 4277 4278 cmdsize += sizeof(struct fc_els_edc); 4279 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4280 ndlp->nlp_DID, ELS_CMD_EDC); 4281 if (!elsiocb) 4282 goto try_rdf; 4283 4284 /* Configure the payload for the supported Diagnostics capabilities. */ 4285 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4286 memset(pcmd, 0, cmdsize); 4287 edc_req = (struct fc_els_edc *)pcmd; 4288 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4289 edc_req->edc_cmd = ELS_EDC; 4290 tlv = edc_req->desc; 4291 4292 if (cgn_desc_size) { 4293 lpfc_format_edc_cgn_desc(phba, tlv); 4294 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4295 tlv = fc_tlv_next_desc(tlv); 4296 } 4297 4298 if (lft_desc_size) 4299 lpfc_format_edc_lft_desc(phba, tlv); 4300 4301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4302 "4623 Xmit EDC to remote " 4303 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4304 ndlp->nlp_DID, phba->cgn_reg_signal, 4305 phba->cgn_reg_fpin); 4306 4307 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4308 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4309 if (!elsiocb->ndlp) { 4310 lpfc_els_free_iocb(phba, elsiocb); 4311 return -EIO; 4312 } 4313 4314 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4315 "Issue EDC: did:x%x refcnt %d", 4316 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4317 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4318 if (rc == IOCB_ERROR) { 4319 /* The additional lpfc_nlp_put will cause the following 4320 * lpfc_els_free_iocb routine to trigger the rlease of 4321 * the node. 4322 */ 4323 lpfc_els_free_iocb(phba, elsiocb); 4324 lpfc_nlp_put(ndlp); 4325 goto try_rdf; 4326 } 4327 return 0; 4328 try_rdf: 4329 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4330 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4331 rc = lpfc_issue_els_rdf(vport, 0); 4332 return rc; 4333 } 4334 4335 /** 4336 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4337 * @vport: pointer to a host virtual N_Port data structure. 4338 * @nlp: pointer to a node-list data structure. 4339 * 4340 * This routine cancels the timer with a delayed IOCB-command retry for 4341 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4342 * removes the ELS retry event if it presents. In addition, if the 4343 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4344 * commands are sent for the @vport's nodes that require issuing discovery 4345 * ADISC. 4346 **/ 4347 void 4348 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4349 { 4350 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4351 struct lpfc_work_evt *evtp; 4352 4353 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4354 return; 4355 spin_lock_irq(&nlp->lock); 4356 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4357 spin_unlock_irq(&nlp->lock); 4358 del_timer_sync(&nlp->nlp_delayfunc); 4359 nlp->nlp_last_elscmd = 0; 4360 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4361 list_del_init(&nlp->els_retry_evt.evt_listp); 4362 /* Decrement nlp reference count held for the delayed retry */ 4363 evtp = &nlp->els_retry_evt; 4364 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4365 } 4366 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4367 spin_lock_irq(&nlp->lock); 4368 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4369 spin_unlock_irq(&nlp->lock); 4370 if (vport->num_disc_nodes) { 4371 if (vport->port_state < LPFC_VPORT_READY) { 4372 /* Check if there are more ADISCs to be sent */ 4373 lpfc_more_adisc(vport); 4374 } else { 4375 /* Check if there are more PLOGIs to be sent */ 4376 lpfc_more_plogi(vport); 4377 if (vport->num_disc_nodes == 0) { 4378 spin_lock_irq(shost->host_lock); 4379 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4380 spin_unlock_irq(shost->host_lock); 4381 lpfc_can_disctmo(vport); 4382 lpfc_end_rscn(vport); 4383 } 4384 } 4385 } 4386 } 4387 return; 4388 } 4389 4390 /** 4391 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4392 * @t: pointer to the timer function associated data (ndlp). 4393 * 4394 * This routine is invoked by the ndlp delayed-function timer to check 4395 * whether there is any pending ELS retry event(s) with the node. If not, it 4396 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4397 * adds the delayed events to the HBA work list and invokes the 4398 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4399 * event. Note that lpfc_nlp_get() is called before posting the event to 4400 * the work list to hold reference count of ndlp so that it guarantees the 4401 * reference to ndlp will still be available when the worker thread gets 4402 * to the event associated with the ndlp. 4403 **/ 4404 void 4405 lpfc_els_retry_delay(struct timer_list *t) 4406 { 4407 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4408 struct lpfc_vport *vport = ndlp->vport; 4409 struct lpfc_hba *phba = vport->phba; 4410 unsigned long flags; 4411 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4412 4413 spin_lock_irqsave(&phba->hbalock, flags); 4414 if (!list_empty(&evtp->evt_listp)) { 4415 spin_unlock_irqrestore(&phba->hbalock, flags); 4416 return; 4417 } 4418 4419 /* We need to hold the node by incrementing the reference 4420 * count until the queued work is done 4421 */ 4422 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4423 if (evtp->evt_arg1) { 4424 evtp->evt = LPFC_EVT_ELS_RETRY; 4425 list_add_tail(&evtp->evt_listp, &phba->work_list); 4426 lpfc_worker_wake_up(phba); 4427 } 4428 spin_unlock_irqrestore(&phba->hbalock, flags); 4429 return; 4430 } 4431 4432 /** 4433 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4434 * @ndlp: pointer to a node-list data structure. 4435 * 4436 * This routine is the worker-thread handler for processing the @ndlp delayed 4437 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4438 * the last ELS command from the associated ndlp and invokes the proper ELS 4439 * function according to the delayed ELS command to retry the command. 4440 **/ 4441 void 4442 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4443 { 4444 struct lpfc_vport *vport = ndlp->vport; 4445 uint32_t cmd, retry; 4446 4447 spin_lock_irq(&ndlp->lock); 4448 cmd = ndlp->nlp_last_elscmd; 4449 ndlp->nlp_last_elscmd = 0; 4450 4451 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4452 spin_unlock_irq(&ndlp->lock); 4453 return; 4454 } 4455 4456 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4457 spin_unlock_irq(&ndlp->lock); 4458 /* 4459 * If a discovery event readded nlp_delayfunc after timer 4460 * firing and before processing the timer, cancel the 4461 * nlp_delayfunc. 4462 */ 4463 del_timer_sync(&ndlp->nlp_delayfunc); 4464 retry = ndlp->nlp_retry; 4465 ndlp->nlp_retry = 0; 4466 4467 switch (cmd) { 4468 case ELS_CMD_FLOGI: 4469 lpfc_issue_els_flogi(vport, ndlp, retry); 4470 break; 4471 case ELS_CMD_PLOGI: 4472 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4473 ndlp->nlp_prev_state = ndlp->nlp_state; 4474 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4475 } 4476 break; 4477 case ELS_CMD_ADISC: 4478 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4479 ndlp->nlp_prev_state = ndlp->nlp_state; 4480 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4481 } 4482 break; 4483 case ELS_CMD_PRLI: 4484 case ELS_CMD_NVMEPRLI: 4485 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4486 ndlp->nlp_prev_state = ndlp->nlp_state; 4487 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4488 } 4489 break; 4490 case ELS_CMD_LOGO: 4491 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4492 ndlp->nlp_prev_state = ndlp->nlp_state; 4493 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4494 } 4495 break; 4496 case ELS_CMD_FDISC: 4497 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4498 lpfc_issue_els_fdisc(vport, ndlp, retry); 4499 break; 4500 } 4501 return; 4502 } 4503 4504 /** 4505 * lpfc_link_reset - Issue link reset 4506 * @vport: pointer to a virtual N_Port data structure. 4507 * 4508 * This routine performs link reset by sending INIT_LINK mailbox command. 4509 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4510 * INIT_LINK mailbox command. 4511 * 4512 * Return code 4513 * 0 - Link reset initiated successfully 4514 * 1 - Failed to initiate link reset 4515 **/ 4516 int 4517 lpfc_link_reset(struct lpfc_vport *vport) 4518 { 4519 struct lpfc_hba *phba = vport->phba; 4520 LPFC_MBOXQ_t *mbox; 4521 uint32_t control; 4522 int rc; 4523 4524 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4525 "2851 Attempt link reset\n"); 4526 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4527 if (!mbox) { 4528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4529 "2852 Failed to allocate mbox memory"); 4530 return 1; 4531 } 4532 4533 /* Enable Link attention interrupts */ 4534 if (phba->sli_rev <= LPFC_SLI_REV3) { 4535 spin_lock_irq(&phba->hbalock); 4536 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4537 control = readl(phba->HCregaddr); 4538 control |= HC_LAINT_ENA; 4539 writel(control, phba->HCregaddr); 4540 readl(phba->HCregaddr); /* flush */ 4541 spin_unlock_irq(&phba->hbalock); 4542 } 4543 4544 lpfc_init_link(phba, mbox, phba->cfg_topology, 4545 phba->cfg_link_speed); 4546 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4547 mbox->vport = vport; 4548 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4549 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4551 "2853 Failed to issue INIT_LINK " 4552 "mbox command, rc:x%x\n", rc); 4553 mempool_free(mbox, phba->mbox_mem_pool); 4554 return 1; 4555 } 4556 4557 return 0; 4558 } 4559 4560 /** 4561 * lpfc_els_retry - Make retry decision on an els command iocb 4562 * @phba: pointer to lpfc hba data structure. 4563 * @cmdiocb: pointer to lpfc command iocb data structure. 4564 * @rspiocb: pointer to lpfc response iocb data structure. 4565 * 4566 * This routine makes a retry decision on an ELS command IOCB, which has 4567 * failed. The following ELS IOCBs use this function for retrying the command 4568 * when previously issued command responsed with error status: FLOGI, PLOGI, 4569 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4570 * returned error status, it makes the decision whether a retry shall be 4571 * issued for the command, and whether a retry shall be made immediately or 4572 * delayed. In the former case, the corresponding ELS command issuing-function 4573 * is called to retry the command. In the later case, the ELS command shall 4574 * be posted to the ndlp delayed event and delayed function timer set to the 4575 * ndlp for the delayed command issusing. 4576 * 4577 * Return code 4578 * 0 - No retry of els command is made 4579 * 1 - Immediate or delayed retry of els command is made 4580 **/ 4581 static int 4582 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4583 struct lpfc_iocbq *rspiocb) 4584 { 4585 struct lpfc_vport *vport = cmdiocb->vport; 4586 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4587 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4588 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4589 uint32_t *elscmd; 4590 struct ls_rjt stat; 4591 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4592 int logerr = 0; 4593 uint32_t cmd = 0; 4594 uint32_t did; 4595 int link_reset = 0, rc; 4596 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4597 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4598 4599 4600 /* Note: cmd_dmabuf may be 0 for internal driver abort 4601 * of delays ELS command. 4602 */ 4603 4604 if (pcmd && pcmd->virt) { 4605 elscmd = (uint32_t *) (pcmd->virt); 4606 cmd = *elscmd++; 4607 } 4608 4609 if (ndlp) 4610 did = ndlp->nlp_DID; 4611 else { 4612 /* We should only hit this case for retrying PLOGI */ 4613 did = get_job_els_rsp64_did(phba, rspiocb); 4614 ndlp = lpfc_findnode_did(vport, did); 4615 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4616 return 0; 4617 } 4618 4619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4620 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4621 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4622 4623 switch (ulp_status) { 4624 case IOSTAT_FCP_RSP_ERROR: 4625 break; 4626 case IOSTAT_REMOTE_STOP: 4627 if (phba->sli_rev == LPFC_SLI_REV4) { 4628 /* This IO was aborted by the target, we don't 4629 * know the rxid and because we did not send the 4630 * ABTS we cannot generate and RRQ. 4631 */ 4632 lpfc_set_rrq_active(phba, ndlp, 4633 cmdiocb->sli4_lxritag, 0, 0); 4634 } 4635 break; 4636 case IOSTAT_LOCAL_REJECT: 4637 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4638 case IOERR_LOOP_OPEN_FAILURE: 4639 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4640 delay = 1000; 4641 retry = 1; 4642 break; 4643 4644 case IOERR_ILLEGAL_COMMAND: 4645 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4646 "0124 Retry illegal cmd x%x " 4647 "retry:x%x delay:x%x\n", 4648 cmd, cmdiocb->retry, delay); 4649 retry = 1; 4650 /* All command's retry policy */ 4651 maxretry = 8; 4652 if (cmdiocb->retry > 2) 4653 delay = 1000; 4654 break; 4655 4656 case IOERR_NO_RESOURCES: 4657 logerr = 1; /* HBA out of resources */ 4658 retry = 1; 4659 if (cmdiocb->retry > 100) 4660 delay = 100; 4661 maxretry = 250; 4662 break; 4663 4664 case IOERR_ILLEGAL_FRAME: 4665 delay = 100; 4666 retry = 1; 4667 break; 4668 4669 case IOERR_INVALID_RPI: 4670 if (cmd == ELS_CMD_PLOGI && 4671 did == NameServer_DID) { 4672 /* Continue forever if plogi to */ 4673 /* the nameserver fails */ 4674 maxretry = 0; 4675 delay = 100; 4676 } 4677 retry = 1; 4678 break; 4679 4680 case IOERR_SEQUENCE_TIMEOUT: 4681 if (cmd == ELS_CMD_PLOGI && 4682 did == NameServer_DID && 4683 (cmdiocb->retry + 1) == maxretry) { 4684 /* Reset the Link */ 4685 link_reset = 1; 4686 break; 4687 } 4688 retry = 1; 4689 delay = 100; 4690 break; 4691 case IOERR_SLI_ABORTED: 4692 /* Retry ELS PLOGI command? 4693 * Possibly the rport just wasn't ready. 4694 */ 4695 if (cmd == ELS_CMD_PLOGI) { 4696 /* No retry if state change */ 4697 if (ndlp && 4698 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4699 goto out_retry; 4700 retry = 1; 4701 maxretry = 2; 4702 } 4703 break; 4704 } 4705 break; 4706 4707 case IOSTAT_NPORT_RJT: 4708 case IOSTAT_FABRIC_RJT: 4709 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4710 retry = 1; 4711 break; 4712 } 4713 break; 4714 4715 case IOSTAT_NPORT_BSY: 4716 case IOSTAT_FABRIC_BSY: 4717 logerr = 1; /* Fabric / Remote NPort out of resources */ 4718 retry = 1; 4719 break; 4720 4721 case IOSTAT_LS_RJT: 4722 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4723 /* Added for Vendor specifc support 4724 * Just keep retrying for these Rsn / Exp codes 4725 */ 4726 if ((vport->fc_flag & FC_PT2PT) && 4727 cmd == ELS_CMD_NVMEPRLI) { 4728 switch (stat.un.b.lsRjtRsnCode) { 4729 case LSRJT_UNABLE_TPC: 4730 case LSRJT_INVALID_CMD: 4731 case LSRJT_LOGICAL_ERR: 4732 case LSRJT_CMD_UNSUPPORTED: 4733 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4734 "0168 NVME PRLI LS_RJT " 4735 "reason %x port doesn't " 4736 "support NVME, disabling NVME\n", 4737 stat.un.b.lsRjtRsnCode); 4738 retry = 0; 4739 vport->fc_flag |= FC_PT2PT_NO_NVME; 4740 goto out_retry; 4741 } 4742 } 4743 switch (stat.un.b.lsRjtRsnCode) { 4744 case LSRJT_UNABLE_TPC: 4745 /* Special case for PRLI LS_RJTs. Recall that lpfc 4746 * uses a single routine to issue both PRLI FC4 types. 4747 * If the PRLI is rejected because that FC4 type 4748 * isn't really supported, don't retry and cause 4749 * multiple transport registrations. Otherwise, parse 4750 * the reason code/reason code explanation and take the 4751 * appropriate action. 4752 */ 4753 lpfc_printf_vlog(vport, KERN_INFO, 4754 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4755 "0153 ELS cmd x%x LS_RJT by x%x. " 4756 "RsnCode x%x RsnCodeExp x%x\n", 4757 cmd, did, stat.un.b.lsRjtRsnCode, 4758 stat.un.b.lsRjtRsnCodeExp); 4759 4760 switch (stat.un.b.lsRjtRsnCodeExp) { 4761 case LSEXP_CANT_GIVE_DATA: 4762 case LSEXP_CMD_IN_PROGRESS: 4763 if (cmd == ELS_CMD_PLOGI) { 4764 delay = 1000; 4765 maxretry = 48; 4766 } 4767 retry = 1; 4768 break; 4769 case LSEXP_REQ_UNSUPPORTED: 4770 case LSEXP_NO_RSRC_ASSIGN: 4771 /* These explanation codes get no retry. */ 4772 if (cmd == ELS_CMD_PRLI || 4773 cmd == ELS_CMD_NVMEPRLI) 4774 break; 4775 fallthrough; 4776 default: 4777 /* Limit the delay and retry action to a limited 4778 * cmd set. There are other ELS commands where 4779 * a retry is not expected. 4780 */ 4781 if (cmd == ELS_CMD_PLOGI || 4782 cmd == ELS_CMD_PRLI || 4783 cmd == ELS_CMD_NVMEPRLI) { 4784 delay = 1000; 4785 maxretry = lpfc_max_els_tries + 1; 4786 retry = 1; 4787 } 4788 break; 4789 } 4790 4791 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4792 (cmd == ELS_CMD_FDISC) && 4793 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4794 lpfc_printf_vlog(vport, KERN_ERR, 4795 LOG_TRACE_EVENT, 4796 "0125 FDISC Failed (x%x). " 4797 "Fabric out of resources\n", 4798 stat.un.lsRjtError); 4799 lpfc_vport_set_state(vport, 4800 FC_VPORT_NO_FABRIC_RSCS); 4801 } 4802 break; 4803 4804 case LSRJT_LOGICAL_BSY: 4805 if ((cmd == ELS_CMD_PLOGI) || 4806 (cmd == ELS_CMD_PRLI) || 4807 (cmd == ELS_CMD_NVMEPRLI)) { 4808 delay = 1000; 4809 maxretry = 48; 4810 } else if (cmd == ELS_CMD_FDISC) { 4811 /* FDISC retry policy */ 4812 maxretry = 48; 4813 if (cmdiocb->retry >= 32) 4814 delay = 1000; 4815 } 4816 retry = 1; 4817 break; 4818 4819 case LSRJT_LOGICAL_ERR: 4820 /* There are some cases where switches return this 4821 * error when they are not ready and should be returning 4822 * Logical Busy. We should delay every time. 4823 */ 4824 if (cmd == ELS_CMD_FDISC && 4825 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4826 maxretry = 3; 4827 delay = 1000; 4828 retry = 1; 4829 } else if (cmd == ELS_CMD_FLOGI && 4830 stat.un.b.lsRjtRsnCodeExp == 4831 LSEXP_NOTHING_MORE) { 4832 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4833 retry = 1; 4834 lpfc_printf_vlog(vport, KERN_ERR, 4835 LOG_TRACE_EVENT, 4836 "0820 FLOGI Failed (x%x). " 4837 "BBCredit Not Supported\n", 4838 stat.un.lsRjtError); 4839 } 4840 break; 4841 4842 case LSRJT_PROTOCOL_ERR: 4843 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4844 (cmd == ELS_CMD_FDISC) && 4845 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4846 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4847 ) { 4848 lpfc_printf_vlog(vport, KERN_ERR, 4849 LOG_TRACE_EVENT, 4850 "0122 FDISC Failed (x%x). " 4851 "Fabric Detected Bad WWN\n", 4852 stat.un.lsRjtError); 4853 lpfc_vport_set_state(vport, 4854 FC_VPORT_FABRIC_REJ_WWN); 4855 } 4856 break; 4857 case LSRJT_VENDOR_UNIQUE: 4858 if ((stat.un.b.vendorUnique == 0x45) && 4859 (cmd == ELS_CMD_FLOGI)) { 4860 goto out_retry; 4861 } 4862 break; 4863 case LSRJT_CMD_UNSUPPORTED: 4864 /* lpfc nvmet returns this type of LS_RJT when it 4865 * receives an FCP PRLI because lpfc nvmet only 4866 * support NVME. ELS request is terminated for FCP4 4867 * on this rport. 4868 */ 4869 if (stat.un.b.lsRjtRsnCodeExp == 4870 LSEXP_REQ_UNSUPPORTED) { 4871 if (cmd == ELS_CMD_PRLI) 4872 goto out_retry; 4873 } 4874 break; 4875 } 4876 break; 4877 4878 case IOSTAT_INTERMED_RSP: 4879 case IOSTAT_BA_RJT: 4880 break; 4881 4882 default: 4883 break; 4884 } 4885 4886 if (link_reset) { 4887 rc = lpfc_link_reset(vport); 4888 if (rc) { 4889 /* Do not give up. Retry PLOGI one more time and attempt 4890 * link reset if PLOGI fails again. 4891 */ 4892 retry = 1; 4893 delay = 100; 4894 goto out_retry; 4895 } 4896 return 1; 4897 } 4898 4899 if (did == FDMI_DID) 4900 retry = 1; 4901 4902 if ((cmd == ELS_CMD_FLOGI) && 4903 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4904 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4905 /* FLOGI retry policy */ 4906 retry = 1; 4907 /* retry FLOGI forever */ 4908 if (phba->link_flag != LS_LOOPBACK_MODE) 4909 maxretry = 0; 4910 else 4911 maxretry = 2; 4912 4913 if (cmdiocb->retry >= 100) 4914 delay = 5000; 4915 else if (cmdiocb->retry >= 32) 4916 delay = 1000; 4917 } else if ((cmd == ELS_CMD_FDISC) && 4918 !lpfc_error_lost_link(ulp_status, ulp_word4)) { 4919 /* retry FDISCs every second up to devloss */ 4920 retry = 1; 4921 maxretry = vport->cfg_devloss_tmo; 4922 delay = 1000; 4923 } 4924 4925 cmdiocb->retry++; 4926 if (maxretry && (cmdiocb->retry >= maxretry)) { 4927 phba->fc_stat.elsRetryExceeded++; 4928 retry = 0; 4929 } 4930 4931 if ((vport->load_flag & FC_UNLOADING) != 0) 4932 retry = 0; 4933 4934 out_retry: 4935 if (retry) { 4936 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4937 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4938 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4939 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4940 "2849 Stop retry ELS command " 4941 "x%x to remote NPORT x%x, " 4942 "Data: x%x x%x\n", cmd, did, 4943 cmdiocb->retry, delay); 4944 return 0; 4945 } 4946 } 4947 4948 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4950 "0107 Retry ELS command x%x to remote " 4951 "NPORT x%x Data: x%x x%x\n", 4952 cmd, did, cmdiocb->retry, delay); 4953 4954 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4955 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4956 ((ulp_word4 & IOERR_PARAM_MASK) != 4957 IOERR_NO_RESOURCES))) { 4958 /* Don't reset timer for no resources */ 4959 4960 /* If discovery / RSCN timer is running, reset it */ 4961 if (timer_pending(&vport->fc_disctmo) || 4962 (vport->fc_flag & FC_RSCN_MODE)) 4963 lpfc_set_disctmo(vport); 4964 } 4965 4966 phba->fc_stat.elsXmitRetry++; 4967 if (ndlp && delay) { 4968 phba->fc_stat.elsDelayRetry++; 4969 ndlp->nlp_retry = cmdiocb->retry; 4970 4971 /* delay is specified in milliseconds */ 4972 mod_timer(&ndlp->nlp_delayfunc, 4973 jiffies + msecs_to_jiffies(delay)); 4974 spin_lock_irq(&ndlp->lock); 4975 ndlp->nlp_flag |= NLP_DELAY_TMO; 4976 spin_unlock_irq(&ndlp->lock); 4977 4978 ndlp->nlp_prev_state = ndlp->nlp_state; 4979 if ((cmd == ELS_CMD_PRLI) || 4980 (cmd == ELS_CMD_NVMEPRLI)) 4981 lpfc_nlp_set_state(vport, ndlp, 4982 NLP_STE_PRLI_ISSUE); 4983 else if (cmd != ELS_CMD_ADISC) 4984 lpfc_nlp_set_state(vport, ndlp, 4985 NLP_STE_NPR_NODE); 4986 ndlp->nlp_last_elscmd = cmd; 4987 4988 return 1; 4989 } 4990 switch (cmd) { 4991 case ELS_CMD_FLOGI: 4992 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4993 return 1; 4994 case ELS_CMD_FDISC: 4995 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4996 return 1; 4997 case ELS_CMD_PLOGI: 4998 if (ndlp) { 4999 ndlp->nlp_prev_state = ndlp->nlp_state; 5000 lpfc_nlp_set_state(vport, ndlp, 5001 NLP_STE_PLOGI_ISSUE); 5002 } 5003 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5004 return 1; 5005 case ELS_CMD_ADISC: 5006 ndlp->nlp_prev_state = ndlp->nlp_state; 5007 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5008 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5009 return 1; 5010 case ELS_CMD_PRLI: 5011 case ELS_CMD_NVMEPRLI: 5012 ndlp->nlp_prev_state = ndlp->nlp_state; 5013 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5014 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5015 return 1; 5016 case ELS_CMD_LOGO: 5017 ndlp->nlp_prev_state = ndlp->nlp_state; 5018 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5019 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5020 return 1; 5021 } 5022 } 5023 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5024 if (logerr) { 5025 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5026 "0137 No retry ELS command x%x to remote " 5027 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 5028 cmd, did, ulp_status, 5029 ulp_word4); 5030 } 5031 else { 5032 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5033 "0108 No retry ELS command x%x to remote " 5034 "NPORT x%x Retried:%d Error:x%x/%x\n", 5035 cmd, did, cmdiocb->retry, ulp_status, 5036 ulp_word4); 5037 } 5038 return 0; 5039 } 5040 5041 /** 5042 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5043 * @phba: pointer to lpfc hba data structure. 5044 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5045 * 5046 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5047 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5048 * checks to see whether there is a lpfc DMA buffer associated with the 5049 * response of the command IOCB. If so, it will be released before releasing 5050 * the lpfc DMA buffer associated with the IOCB itself. 5051 * 5052 * Return code 5053 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5054 **/ 5055 static int 5056 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5057 { 5058 struct lpfc_dmabuf *buf_ptr; 5059 5060 /* Free the response before processing the command. */ 5061 if (!list_empty(&buf_ptr1->list)) { 5062 list_remove_head(&buf_ptr1->list, buf_ptr, 5063 struct lpfc_dmabuf, 5064 list); 5065 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5066 kfree(buf_ptr); 5067 } 5068 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5069 kfree(buf_ptr1); 5070 return 0; 5071 } 5072 5073 /** 5074 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5075 * @phba: pointer to lpfc hba data structure. 5076 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5077 * 5078 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5079 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5080 * pool. 5081 * 5082 * Return code 5083 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5084 **/ 5085 static int 5086 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5087 { 5088 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5089 kfree(buf_ptr); 5090 return 0; 5091 } 5092 5093 /** 5094 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5095 * @phba: pointer to lpfc hba data structure. 5096 * @elsiocb: pointer to lpfc els command iocb data structure. 5097 * 5098 * This routine frees a command IOCB and its associated resources. The 5099 * command IOCB data structure contains the reference to various associated 5100 * resources, these fields must be set to NULL if the associated reference 5101 * not present: 5102 * cmd_dmabuf - reference to cmd. 5103 * cmd_dmabuf->next - reference to rsp 5104 * rsp_dmabuf - unused 5105 * bpl_dmabuf - reference to bpl 5106 * 5107 * It first properly decrements the reference count held on ndlp for the 5108 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5109 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5110 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5111 * adds the DMA buffer the @phba data structure for the delayed release. 5112 * If reference to the Buffer Pointer List (BPL) is present, the 5113 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5114 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5115 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5116 * 5117 * Return code 5118 * 0 - Success (currently, always return 0) 5119 **/ 5120 int 5121 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5122 { 5123 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5124 5125 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5126 elsiocb->ndlp = NULL; 5127 5128 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5129 if (elsiocb->cmd_dmabuf) { 5130 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5131 /* Firmware could still be in progress of DMAing 5132 * payload, so don't free data buffer till after 5133 * a hbeat. 5134 */ 5135 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5136 buf_ptr = elsiocb->cmd_dmabuf; 5137 elsiocb->cmd_dmabuf = NULL; 5138 if (buf_ptr) { 5139 buf_ptr1 = NULL; 5140 spin_lock_irq(&phba->hbalock); 5141 if (!list_empty(&buf_ptr->list)) { 5142 list_remove_head(&buf_ptr->list, 5143 buf_ptr1, struct lpfc_dmabuf, 5144 list); 5145 INIT_LIST_HEAD(&buf_ptr1->list); 5146 list_add_tail(&buf_ptr1->list, 5147 &phba->elsbuf); 5148 phba->elsbuf_cnt++; 5149 } 5150 INIT_LIST_HEAD(&buf_ptr->list); 5151 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5152 phba->elsbuf_cnt++; 5153 spin_unlock_irq(&phba->hbalock); 5154 } 5155 } else { 5156 buf_ptr1 = elsiocb->cmd_dmabuf; 5157 lpfc_els_free_data(phba, buf_ptr1); 5158 elsiocb->cmd_dmabuf = NULL; 5159 } 5160 } 5161 5162 if (elsiocb->bpl_dmabuf) { 5163 buf_ptr = elsiocb->bpl_dmabuf; 5164 lpfc_els_free_bpl(phba, buf_ptr); 5165 elsiocb->bpl_dmabuf = NULL; 5166 } 5167 lpfc_sli_release_iocbq(phba, elsiocb); 5168 return 0; 5169 } 5170 5171 /** 5172 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5173 * @phba: pointer to lpfc hba data structure. 5174 * @cmdiocb: pointer to lpfc command iocb data structure. 5175 * @rspiocb: pointer to lpfc response iocb data structure. 5176 * 5177 * This routine is the completion callback function to the Logout (LOGO) 5178 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5179 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5180 * release the ndlp if it has the last reference remaining (reference count 5181 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp 5182 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5183 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5184 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5185 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5186 * IOCB data structure. 5187 **/ 5188 static void 5189 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5190 struct lpfc_iocbq *rspiocb) 5191 { 5192 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5193 struct lpfc_vport *vport = cmdiocb->vport; 5194 u32 ulp_status, ulp_word4; 5195 5196 ulp_status = get_job_ulpstatus(phba, rspiocb); 5197 ulp_word4 = get_job_word4(phba, rspiocb); 5198 5199 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5200 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5201 ulp_status, ulp_word4, ndlp->nlp_DID); 5202 /* ACC to LOGO completes to NPort <nlp_DID> */ 5203 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5204 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5205 "Data: x%x x%x x%x\n", 5206 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5207 ndlp->nlp_state, ndlp->nlp_rpi); 5208 5209 /* This clause allows the LOGO ACC to complete and free resources 5210 * for the Fabric Domain Controller. It does deliberately skip 5211 * the unreg_rpi and release rpi because some fabrics send RDP 5212 * requests after logging out from the initiator. 5213 */ 5214 if (ndlp->nlp_type & NLP_FABRIC && 5215 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5216 goto out; 5217 5218 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5219 /* If PLOGI is being retried, PLOGI completion will cleanup the 5220 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5221 * progress on nodes discovered from last RSCN. 5222 */ 5223 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5224 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5225 goto out; 5226 5227 /* NPort Recovery mode or node is just allocated */ 5228 if (!lpfc_nlp_not_used(ndlp)) { 5229 /* A LOGO is completing and the node is in NPR state. 5230 * Just unregister the RPI because the node is still 5231 * required. 5232 */ 5233 lpfc_unreg_rpi(vport, ndlp); 5234 } else { 5235 /* Indicate the node has already released, should 5236 * not reference to it from within lpfc_els_free_iocb. 5237 */ 5238 cmdiocb->ndlp = NULL; 5239 } 5240 } 5241 out: 5242 /* 5243 * The driver received a LOGO from the rport and has ACK'd it. 5244 * At this point, the driver is done so release the IOCB 5245 */ 5246 lpfc_els_free_iocb(phba, cmdiocb); 5247 lpfc_nlp_put(ndlp); 5248 } 5249 5250 /** 5251 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5252 * @phba: pointer to lpfc hba data structure. 5253 * @pmb: pointer to the driver internal queue element for mailbox command. 5254 * 5255 * This routine is the completion callback function for unregister default 5256 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5257 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5258 * decrements the ndlp reference count held for this completion callback 5259 * function. After that, it invokes the lpfc_nlp_not_used() to check 5260 * whether there is only one reference left on the ndlp. If so, it will 5261 * perform one more decrement and trigger the release of the ndlp. 5262 **/ 5263 void 5264 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5265 { 5266 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5267 u32 mbx_flag = pmb->mbox_flag; 5268 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5269 5270 if (ndlp) { 5271 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5272 "0006 rpi x%x DID:%x flg:%x %d x%px " 5273 "mbx_cmd x%x mbx_flag x%x x%px\n", 5274 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5275 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5276 mbx_flag, pmb); 5277 5278 /* This ends the default/temporary RPI cleanup logic for this 5279 * ndlp and the node and rpi needs to be released. Free the rpi 5280 * first on an UNREG_LOGIN and then release the final 5281 * references. 5282 */ 5283 spin_lock_irq(&ndlp->lock); 5284 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5285 if (mbx_cmd == MBX_UNREG_LOGIN) 5286 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5287 spin_unlock_irq(&ndlp->lock); 5288 lpfc_nlp_put(ndlp); 5289 lpfc_drop_node(ndlp->vport, ndlp); 5290 } 5291 5292 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5293 } 5294 5295 /** 5296 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5297 * @phba: pointer to lpfc hba data structure. 5298 * @cmdiocb: pointer to lpfc command iocb data structure. 5299 * @rspiocb: pointer to lpfc response iocb data structure. 5300 * 5301 * This routine is the completion callback function for ELS Response IOCB 5302 * command. In normal case, this callback function just properly sets the 5303 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5304 * field in the command IOCB is not NULL, the referred mailbox command will 5305 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5306 * the IOCB. 5307 **/ 5308 static void 5309 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5310 struct lpfc_iocbq *rspiocb) 5311 { 5312 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5313 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5314 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5315 IOCB_t *irsp; 5316 LPFC_MBOXQ_t *mbox = NULL; 5317 u32 ulp_status, ulp_word4, tmo, did, iotag; 5318 5319 if (!vport) { 5320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5321 "3177 ELS response failed\n"); 5322 goto out; 5323 } 5324 if (cmdiocb->context_un.mbox) 5325 mbox = cmdiocb->context_un.mbox; 5326 5327 ulp_status = get_job_ulpstatus(phba, rspiocb); 5328 ulp_word4 = get_job_word4(phba, rspiocb); 5329 did = get_job_els_rsp64_did(phba, cmdiocb); 5330 5331 if (phba->sli_rev == LPFC_SLI_REV4) { 5332 tmo = get_wqe_tmo(cmdiocb); 5333 iotag = get_wqe_reqtag(cmdiocb); 5334 } else { 5335 irsp = &rspiocb->iocb; 5336 tmo = irsp->ulpTimeout; 5337 iotag = irsp->ulpIoTag; 5338 } 5339 5340 /* Check to see if link went down during discovery */ 5341 if (!ndlp || lpfc_els_chk_latt(vport)) { 5342 if (mbox) 5343 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5344 goto out; 5345 } 5346 5347 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5348 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5349 ulp_status, ulp_word4, did); 5350 /* ELS response tag <ulpIoTag> completes */ 5351 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5352 "0110 ELS response tag x%x completes " 5353 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5354 iotag, ulp_status, ulp_word4, tmo, 5355 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5356 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5357 if (mbox) { 5358 if (ulp_status == 0 5359 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5360 if (!lpfc_unreg_rpi(vport, ndlp) && 5361 (!(vport->fc_flag & FC_PT2PT))) { 5362 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5363 ndlp->nlp_state == 5364 NLP_STE_REG_LOGIN_ISSUE) { 5365 lpfc_printf_vlog(vport, KERN_INFO, 5366 LOG_DISCOVERY, 5367 "0314 PLOGI recov " 5368 "DID x%x " 5369 "Data: x%x x%x x%x\n", 5370 ndlp->nlp_DID, 5371 ndlp->nlp_state, 5372 ndlp->nlp_rpi, 5373 ndlp->nlp_flag); 5374 goto out_free_mbox; 5375 } 5376 } 5377 5378 /* Increment reference count to ndlp to hold the 5379 * reference to ndlp for the callback function. 5380 */ 5381 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5382 if (!mbox->ctx_ndlp) 5383 goto out_free_mbox; 5384 5385 mbox->vport = vport; 5386 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5387 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5388 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5389 } 5390 else { 5391 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5392 ndlp->nlp_prev_state = ndlp->nlp_state; 5393 lpfc_nlp_set_state(vport, ndlp, 5394 NLP_STE_REG_LOGIN_ISSUE); 5395 } 5396 5397 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5398 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5399 != MBX_NOT_FINISHED) 5400 goto out; 5401 5402 /* Decrement the ndlp reference count we 5403 * set for this failed mailbox command. 5404 */ 5405 lpfc_nlp_put(ndlp); 5406 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5407 5408 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5409 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5410 "0138 ELS rsp: Cannot issue reg_login for x%x " 5411 "Data: x%x x%x x%x\n", 5412 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5413 ndlp->nlp_rpi); 5414 } 5415 out_free_mbox: 5416 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5417 } 5418 out: 5419 if (ndlp && shost) { 5420 spin_lock_irq(&ndlp->lock); 5421 if (mbox) 5422 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5423 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5424 spin_unlock_irq(&ndlp->lock); 5425 } 5426 5427 /* An SLI4 NPIV instance wants to drop the node at this point under 5428 * these conditions and release the RPI. 5429 */ 5430 if (phba->sli_rev == LPFC_SLI_REV4 && 5431 (vport && vport->port_type == LPFC_NPIV_PORT) && 5432 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5433 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5434 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5435 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5436 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5437 spin_lock_irq(&ndlp->lock); 5438 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5439 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5440 spin_unlock_irq(&ndlp->lock); 5441 lpfc_drop_node(vport, ndlp); 5442 } 5443 } 5444 5445 /* Release the originating I/O reference. */ 5446 lpfc_els_free_iocb(phba, cmdiocb); 5447 lpfc_nlp_put(ndlp); 5448 return; 5449 } 5450 5451 /** 5452 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5453 * @vport: pointer to a host virtual N_Port data structure. 5454 * @flag: the els command code to be accepted. 5455 * @oldiocb: pointer to the original lpfc command iocb data structure. 5456 * @ndlp: pointer to a node-list data structure. 5457 * @mbox: pointer to the driver internal queue element for mailbox command. 5458 * 5459 * This routine prepares and issues an Accept (ACC) response IOCB 5460 * command. It uses the @flag to properly set up the IOCB field for the 5461 * specific ACC response command to be issued and invokes the 5462 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5463 * @mbox pointer is passed in, it will be put into the context_un.mbox 5464 * field of the IOCB for the completion callback function to issue the 5465 * mailbox command to the HBA later when callback is invoked. 5466 * 5467 * Note that the ndlp reference count will be incremented by 1 for holding the 5468 * ndlp and the reference to ndlp will be stored into the ndlp field of 5469 * the IOCB for the completion callback function to the corresponding 5470 * response ELS IOCB command. 5471 * 5472 * Return code 5473 * 0 - Successfully issued acc response 5474 * 1 - Failed to issue acc response 5475 **/ 5476 int 5477 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5478 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5479 LPFC_MBOXQ_t *mbox) 5480 { 5481 struct lpfc_hba *phba = vport->phba; 5482 IOCB_t *icmd; 5483 IOCB_t *oldcmd; 5484 union lpfc_wqe128 *wqe; 5485 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5486 struct lpfc_iocbq *elsiocb; 5487 uint8_t *pcmd; 5488 struct serv_parm *sp; 5489 uint16_t cmdsize; 5490 int rc; 5491 ELS_PKT *els_pkt_ptr; 5492 struct fc_els_rdf_resp *rdf_resp; 5493 5494 switch (flag) { 5495 case ELS_CMD_ACC: 5496 cmdsize = sizeof(uint32_t); 5497 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5498 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5499 if (!elsiocb) { 5500 spin_lock_irq(&ndlp->lock); 5501 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5502 spin_unlock_irq(&ndlp->lock); 5503 return 1; 5504 } 5505 5506 if (phba->sli_rev == LPFC_SLI_REV4) { 5507 wqe = &elsiocb->wqe; 5508 /* XRI / rx_id */ 5509 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5510 bf_get(wqe_ctxt_tag, 5511 &oldwqe->xmit_els_rsp.wqe_com)); 5512 5513 /* oxid */ 5514 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5515 bf_get(wqe_rcvoxid, 5516 &oldwqe->xmit_els_rsp.wqe_com)); 5517 } else { 5518 icmd = &elsiocb->iocb; 5519 oldcmd = &oldiocb->iocb; 5520 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5521 icmd->unsli3.rcvsli3.ox_id = 5522 oldcmd->unsli3.rcvsli3.ox_id; 5523 } 5524 5525 pcmd = elsiocb->cmd_dmabuf->virt; 5526 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5527 pcmd += sizeof(uint32_t); 5528 5529 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5530 "Issue ACC: did:x%x flg:x%x", 5531 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5532 break; 5533 case ELS_CMD_FLOGI: 5534 case ELS_CMD_PLOGI: 5535 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5536 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5537 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5538 if (!elsiocb) 5539 return 1; 5540 5541 if (phba->sli_rev == LPFC_SLI_REV4) { 5542 wqe = &elsiocb->wqe; 5543 /* XRI / rx_id */ 5544 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5545 bf_get(wqe_ctxt_tag, 5546 &oldwqe->xmit_els_rsp.wqe_com)); 5547 5548 /* oxid */ 5549 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5550 bf_get(wqe_rcvoxid, 5551 &oldwqe->xmit_els_rsp.wqe_com)); 5552 } else { 5553 icmd = &elsiocb->iocb; 5554 oldcmd = &oldiocb->iocb; 5555 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5556 icmd->unsli3.rcvsli3.ox_id = 5557 oldcmd->unsli3.rcvsli3.ox_id; 5558 } 5559 5560 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5561 5562 if (mbox) 5563 elsiocb->context_un.mbox = mbox; 5564 5565 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5566 pcmd += sizeof(uint32_t); 5567 sp = (struct serv_parm *)pcmd; 5568 5569 if (flag == ELS_CMD_FLOGI) { 5570 /* Copy the received service parameters back */ 5571 memcpy(sp, &phba->fc_fabparam, 5572 sizeof(struct serv_parm)); 5573 5574 /* Clear the F_Port bit */ 5575 sp->cmn.fPort = 0; 5576 5577 /* Mark all class service parameters as invalid */ 5578 sp->cls1.classValid = 0; 5579 sp->cls2.classValid = 0; 5580 sp->cls3.classValid = 0; 5581 sp->cls4.classValid = 0; 5582 5583 /* Copy our worldwide names */ 5584 memcpy(&sp->portName, &vport->fc_sparam.portName, 5585 sizeof(struct lpfc_name)); 5586 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5587 sizeof(struct lpfc_name)); 5588 } else { 5589 memcpy(pcmd, &vport->fc_sparam, 5590 sizeof(struct serv_parm)); 5591 5592 sp->cmn.valid_vendor_ver_level = 0; 5593 memset(sp->un.vendorVersion, 0, 5594 sizeof(sp->un.vendorVersion)); 5595 sp->cmn.bbRcvSizeMsb &= 0xF; 5596 5597 /* If our firmware supports this feature, convey that 5598 * info to the target using the vendor specific field. 5599 */ 5600 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5601 sp->cmn.valid_vendor_ver_level = 1; 5602 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5603 sp->un.vv.flags = 5604 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5605 } 5606 } 5607 5608 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5609 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5610 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5611 break; 5612 case ELS_CMD_PRLO: 5613 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5614 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5615 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5616 if (!elsiocb) 5617 return 1; 5618 5619 if (phba->sli_rev == LPFC_SLI_REV4) { 5620 wqe = &elsiocb->wqe; 5621 /* XRI / rx_id */ 5622 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5623 bf_get(wqe_ctxt_tag, 5624 &oldwqe->xmit_els_rsp.wqe_com)); 5625 5626 /* oxid */ 5627 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5628 bf_get(wqe_rcvoxid, 5629 &oldwqe->xmit_els_rsp.wqe_com)); 5630 } else { 5631 icmd = &elsiocb->iocb; 5632 oldcmd = &oldiocb->iocb; 5633 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5634 icmd->unsli3.rcvsli3.ox_id = 5635 oldcmd->unsli3.rcvsli3.ox_id; 5636 } 5637 5638 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5639 5640 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5641 sizeof(uint32_t) + sizeof(PRLO)); 5642 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5643 els_pkt_ptr = (ELS_PKT *) pcmd; 5644 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5645 5646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5647 "Issue ACC PRLO: did:x%x flg:x%x", 5648 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5649 break; 5650 case ELS_CMD_RDF: 5651 cmdsize = sizeof(*rdf_resp); 5652 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5653 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5654 if (!elsiocb) 5655 return 1; 5656 5657 if (phba->sli_rev == LPFC_SLI_REV4) { 5658 wqe = &elsiocb->wqe; 5659 /* XRI / rx_id */ 5660 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5661 bf_get(wqe_ctxt_tag, 5662 &oldwqe->xmit_els_rsp.wqe_com)); 5663 5664 /* oxid */ 5665 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5666 bf_get(wqe_rcvoxid, 5667 &oldwqe->xmit_els_rsp.wqe_com)); 5668 } else { 5669 icmd = &elsiocb->iocb; 5670 oldcmd = &oldiocb->iocb; 5671 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5672 icmd->unsli3.rcvsli3.ox_id = 5673 oldcmd->unsli3.rcvsli3.ox_id; 5674 } 5675 5676 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5677 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5678 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5679 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5680 5681 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5682 rdf_resp->desc_list_len = cpu_to_be32(12); 5683 5684 /* FC-LS-5 specifies LS REQ Information descriptor */ 5685 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5686 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5687 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5688 break; 5689 default: 5690 return 1; 5691 } 5692 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5693 spin_lock_irq(&ndlp->lock); 5694 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5695 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5696 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5697 spin_unlock_irq(&ndlp->lock); 5698 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5699 } else { 5700 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5701 } 5702 5703 phba->fc_stat.elsXmitACC++; 5704 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5705 if (!elsiocb->ndlp) { 5706 lpfc_els_free_iocb(phba, elsiocb); 5707 return 1; 5708 } 5709 5710 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5711 if (rc == IOCB_ERROR) { 5712 lpfc_els_free_iocb(phba, elsiocb); 5713 lpfc_nlp_put(ndlp); 5714 return 1; 5715 } 5716 5717 /* Xmit ELS ACC response tag <ulpIoTag> */ 5718 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5719 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5720 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5721 "RPI: x%x, fc_flag x%x refcnt %d\n", 5722 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5723 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5724 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5725 return 0; 5726 } 5727 5728 /** 5729 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5730 * @vport: pointer to a virtual N_Port data structure. 5731 * @rejectError: reject response to issue 5732 * @oldiocb: pointer to the original lpfc command iocb data structure. 5733 * @ndlp: pointer to a node-list data structure. 5734 * @mbox: pointer to the driver internal queue element for mailbox command. 5735 * 5736 * This routine prepares and issue an Reject (RJT) response IOCB 5737 * command. If a @mbox pointer is passed in, it will be put into the 5738 * context_un.mbox field of the IOCB for the completion callback function 5739 * to issue to the HBA later. 5740 * 5741 * Note that the ndlp reference count will be incremented by 1 for holding the 5742 * ndlp and the reference to ndlp will be stored into the ndlp field of 5743 * the IOCB for the completion callback function to the reject response 5744 * ELS IOCB command. 5745 * 5746 * Return code 5747 * 0 - Successfully issued reject response 5748 * 1 - Failed to issue reject response 5749 **/ 5750 int 5751 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5752 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5753 LPFC_MBOXQ_t *mbox) 5754 { 5755 int rc; 5756 struct lpfc_hba *phba = vport->phba; 5757 IOCB_t *icmd; 5758 IOCB_t *oldcmd; 5759 union lpfc_wqe128 *wqe; 5760 struct lpfc_iocbq *elsiocb; 5761 uint8_t *pcmd; 5762 uint16_t cmdsize; 5763 5764 cmdsize = 2 * sizeof(uint32_t); 5765 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5766 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5767 if (!elsiocb) 5768 return 1; 5769 5770 if (phba->sli_rev == LPFC_SLI_REV4) { 5771 wqe = &elsiocb->wqe; 5772 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5773 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5774 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5775 get_job_rcvoxid(phba, oldiocb)); 5776 } else { 5777 icmd = &elsiocb->iocb; 5778 oldcmd = &oldiocb->iocb; 5779 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5780 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5781 } 5782 5783 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5784 5785 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5786 pcmd += sizeof(uint32_t); 5787 *((uint32_t *) (pcmd)) = rejectError; 5788 5789 if (mbox) 5790 elsiocb->context_un.mbox = mbox; 5791 5792 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5794 "0129 Xmit ELS RJT x%x response tag x%x " 5795 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5796 "rpi x%x\n", 5797 rejectError, elsiocb->iotag, 5798 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5799 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5800 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5801 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5802 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5803 5804 phba->fc_stat.elsXmitLSRJT++; 5805 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5806 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5807 if (!elsiocb->ndlp) { 5808 lpfc_els_free_iocb(phba, elsiocb); 5809 return 1; 5810 } 5811 5812 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5813 * node's assigned RPI gets released provided this node is not already 5814 * registered with the transport. 5815 */ 5816 if (phba->sli_rev == LPFC_SLI_REV4 && 5817 vport->port_type == LPFC_NPIV_PORT && 5818 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5819 spin_lock_irq(&ndlp->lock); 5820 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5821 spin_unlock_irq(&ndlp->lock); 5822 } 5823 5824 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5825 if (rc == IOCB_ERROR) { 5826 lpfc_els_free_iocb(phba, elsiocb); 5827 lpfc_nlp_put(ndlp); 5828 return 1; 5829 } 5830 5831 return 0; 5832 } 5833 5834 /** 5835 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5836 * @vport: pointer to a host virtual N_Port data structure. 5837 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5838 * @ndlp: NPort to where rsp is directed 5839 * 5840 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5841 * this N_Port's support of hardware signals in its Congestion 5842 * Capabilities Descriptor. 5843 * 5844 * Return code 5845 * 0 - Successfully issued edc rsp command 5846 * 1 - Failed to issue edc rsp command 5847 **/ 5848 static int 5849 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5850 struct lpfc_nodelist *ndlp) 5851 { 5852 struct lpfc_hba *phba = vport->phba; 5853 struct fc_els_edc_resp *edc_rsp; 5854 struct fc_tlv_desc *tlv; 5855 struct lpfc_iocbq *elsiocb; 5856 IOCB_t *icmd, *cmd; 5857 union lpfc_wqe128 *wqe; 5858 u32 cgn_desc_size, lft_desc_size; 5859 u16 cmdsize; 5860 uint8_t *pcmd; 5861 int rc; 5862 5863 cmdsize = sizeof(struct fc_els_edc_resp); 5864 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5865 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5866 sizeof(struct fc_diag_lnkflt_desc) : 0; 5867 cmdsize += cgn_desc_size + lft_desc_size; 5868 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5869 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5870 if (!elsiocb) 5871 return 1; 5872 5873 if (phba->sli_rev == LPFC_SLI_REV4) { 5874 wqe = &elsiocb->wqe; 5875 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5876 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5877 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5878 get_job_rcvoxid(phba, cmdiocb)); 5879 } else { 5880 icmd = &elsiocb->iocb; 5881 cmd = &cmdiocb->iocb; 5882 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5883 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5884 } 5885 5886 pcmd = elsiocb->cmd_dmabuf->virt; 5887 memset(pcmd, 0, cmdsize); 5888 5889 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5890 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5891 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5892 cgn_desc_size + lft_desc_size); 5893 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5894 edc_rsp->lsri.desc_len = cpu_to_be32( 5895 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5896 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5897 tlv = edc_rsp->desc; 5898 lpfc_format_edc_cgn_desc(phba, tlv); 5899 tlv = fc_tlv_next_desc(tlv); 5900 if (lft_desc_size) 5901 lpfc_format_edc_lft_desc(phba, tlv); 5902 5903 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5904 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5905 ndlp->nlp_DID, ndlp->nlp_flag, 5906 kref_read(&ndlp->kref)); 5907 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5908 5909 phba->fc_stat.elsXmitACC++; 5910 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5911 if (!elsiocb->ndlp) { 5912 lpfc_els_free_iocb(phba, elsiocb); 5913 return 1; 5914 } 5915 5916 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5917 if (rc == IOCB_ERROR) { 5918 lpfc_els_free_iocb(phba, elsiocb); 5919 lpfc_nlp_put(ndlp); 5920 return 1; 5921 } 5922 5923 /* Xmit ELS ACC response tag <ulpIoTag> */ 5924 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5925 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5926 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5927 "RPI: x%x, fc_flag x%x\n", 5928 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5929 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5930 ndlp->nlp_rpi, vport->fc_flag); 5931 5932 return 0; 5933 } 5934 5935 /** 5936 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5937 * @vport: pointer to a virtual N_Port data structure. 5938 * @oldiocb: pointer to the original lpfc command iocb data structure. 5939 * @ndlp: pointer to a node-list data structure. 5940 * 5941 * This routine prepares and issues an Accept (ACC) response to Address 5942 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5943 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5944 * 5945 * Note that the ndlp reference count will be incremented by 1 for holding the 5946 * ndlp and the reference to ndlp will be stored into the ndlp field of 5947 * the IOCB for the completion callback function to the ADISC Accept response 5948 * ELS IOCB command. 5949 * 5950 * Return code 5951 * 0 - Successfully issued acc adisc response 5952 * 1 - Failed to issue adisc acc response 5953 **/ 5954 int 5955 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5956 struct lpfc_nodelist *ndlp) 5957 { 5958 struct lpfc_hba *phba = vport->phba; 5959 ADISC *ap; 5960 IOCB_t *icmd, *oldcmd; 5961 union lpfc_wqe128 *wqe; 5962 struct lpfc_iocbq *elsiocb; 5963 uint8_t *pcmd; 5964 uint16_t cmdsize; 5965 int rc; 5966 u32 ulp_context; 5967 5968 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5969 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5970 ndlp->nlp_DID, ELS_CMD_ACC); 5971 if (!elsiocb) 5972 return 1; 5973 5974 if (phba->sli_rev == LPFC_SLI_REV4) { 5975 wqe = &elsiocb->wqe; 5976 /* XRI / rx_id */ 5977 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5978 get_job_ulpcontext(phba, oldiocb)); 5979 ulp_context = get_job_ulpcontext(phba, elsiocb); 5980 /* oxid */ 5981 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5982 get_job_rcvoxid(phba, oldiocb)); 5983 } else { 5984 icmd = &elsiocb->iocb; 5985 oldcmd = &oldiocb->iocb; 5986 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5987 ulp_context = elsiocb->iocb.ulpContext; 5988 icmd->unsli3.rcvsli3.ox_id = 5989 oldcmd->unsli3.rcvsli3.ox_id; 5990 } 5991 5992 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5993 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5994 "0130 Xmit ADISC ACC response iotag x%x xri: " 5995 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5996 elsiocb->iotag, ulp_context, 5997 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5998 ndlp->nlp_rpi); 5999 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6000 6001 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6002 pcmd += sizeof(uint32_t); 6003 6004 ap = (ADISC *) (pcmd); 6005 ap->hardAL_PA = phba->fc_pref_ALPA; 6006 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6007 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6008 ap->DID = be32_to_cpu(vport->fc_myDID); 6009 6010 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6011 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6012 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6013 6014 phba->fc_stat.elsXmitACC++; 6015 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6016 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6017 if (!elsiocb->ndlp) { 6018 lpfc_els_free_iocb(phba, elsiocb); 6019 return 1; 6020 } 6021 6022 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6023 if (rc == IOCB_ERROR) { 6024 lpfc_els_free_iocb(phba, elsiocb); 6025 lpfc_nlp_put(ndlp); 6026 return 1; 6027 } 6028 6029 return 0; 6030 } 6031 6032 /** 6033 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6034 * @vport: pointer to a virtual N_Port data structure. 6035 * @oldiocb: pointer to the original lpfc command iocb data structure. 6036 * @ndlp: pointer to a node-list data structure. 6037 * 6038 * This routine prepares and issues an Accept (ACC) response to Process 6039 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6040 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6041 * 6042 * Note that the ndlp reference count will be incremented by 1 for holding the 6043 * ndlp and the reference to ndlp will be stored into the ndlp field of 6044 * the IOCB for the completion callback function to the PRLI Accept response 6045 * ELS IOCB command. 6046 * 6047 * Return code 6048 * 0 - Successfully issued acc prli response 6049 * 1 - Failed to issue acc prli response 6050 **/ 6051 int 6052 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6053 struct lpfc_nodelist *ndlp) 6054 { 6055 struct lpfc_hba *phba = vport->phba; 6056 PRLI *npr; 6057 struct lpfc_nvme_prli *npr_nvme; 6058 lpfc_vpd_t *vpd; 6059 IOCB_t *icmd; 6060 IOCB_t *oldcmd; 6061 union lpfc_wqe128 *wqe; 6062 struct lpfc_iocbq *elsiocb; 6063 uint8_t *pcmd; 6064 uint16_t cmdsize; 6065 uint32_t prli_fc4_req, *req_payload; 6066 struct lpfc_dmabuf *req_buf; 6067 int rc; 6068 u32 elsrspcmd, ulp_context; 6069 6070 /* Need the incoming PRLI payload to determine if the ACC is for an 6071 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6072 */ 6073 req_buf = oldiocb->cmd_dmabuf; 6074 req_payload = (((uint32_t *)req_buf->virt) + 1); 6075 6076 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6077 prli_fc4_req = be32_to_cpu(*req_payload); 6078 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6079 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6080 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6081 prli_fc4_req, *((uint32_t *)req_payload)); 6082 6083 if (prli_fc4_req == PRLI_FCP_TYPE) { 6084 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6085 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6086 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6087 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6088 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6089 } else { 6090 return 1; 6091 } 6092 6093 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6094 ndlp->nlp_DID, elsrspcmd); 6095 if (!elsiocb) 6096 return 1; 6097 6098 if (phba->sli_rev == LPFC_SLI_REV4) { 6099 wqe = &elsiocb->wqe; 6100 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6101 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6102 ulp_context = get_job_ulpcontext(phba, elsiocb); 6103 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6104 get_job_rcvoxid(phba, oldiocb)); 6105 } else { 6106 icmd = &elsiocb->iocb; 6107 oldcmd = &oldiocb->iocb; 6108 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6109 ulp_context = elsiocb->iocb.ulpContext; 6110 icmd->unsli3.rcvsli3.ox_id = 6111 oldcmd->unsli3.rcvsli3.ox_id; 6112 } 6113 6114 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6115 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6116 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6117 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6118 elsiocb->iotag, ulp_context, 6119 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6120 ndlp->nlp_rpi); 6121 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6122 memset(pcmd, 0, cmdsize); 6123 6124 *((uint32_t *)(pcmd)) = elsrspcmd; 6125 pcmd += sizeof(uint32_t); 6126 6127 /* For PRLI, remainder of payload is PRLI parameter page */ 6128 vpd = &phba->vpd; 6129 6130 if (prli_fc4_req == PRLI_FCP_TYPE) { 6131 /* 6132 * If the remote port is a target and our firmware version 6133 * is 3.20 or later, set the following bits for FC-TAPE 6134 * support. 6135 */ 6136 npr = (PRLI *) pcmd; 6137 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6138 (vpd->rev.feaLevelHigh >= 0x02)) { 6139 npr->ConfmComplAllowed = 1; 6140 npr->Retry = 1; 6141 npr->TaskRetryIdReq = 1; 6142 } 6143 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6144 npr->estabImagePair = 1; 6145 npr->readXferRdyDis = 1; 6146 npr->ConfmComplAllowed = 1; 6147 npr->prliType = PRLI_FCP_TYPE; 6148 npr->initiatorFunc = 1; 6149 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6150 /* Respond with an NVME PRLI Type */ 6151 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6152 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6153 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6154 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6155 if (phba->nvmet_support) { 6156 bf_set(prli_tgt, npr_nvme, 1); 6157 bf_set(prli_disc, npr_nvme, 1); 6158 if (phba->cfg_nvme_enable_fb) { 6159 bf_set(prli_fba, npr_nvme, 1); 6160 6161 /* TBD. Target mode needs to post buffers 6162 * that support the configured first burst 6163 * byte size. 6164 */ 6165 bf_set(prli_fb_sz, npr_nvme, 6166 phba->cfg_nvmet_fb_size); 6167 } 6168 } else { 6169 bf_set(prli_init, npr_nvme, 1); 6170 } 6171 6172 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6173 "6015 NVME issue PRLI ACC word1 x%08x " 6174 "word4 x%08x word5 x%08x flag x%x, " 6175 "fcp_info x%x nlp_type x%x\n", 6176 npr_nvme->word1, npr_nvme->word4, 6177 npr_nvme->word5, ndlp->nlp_flag, 6178 ndlp->nlp_fcp_info, ndlp->nlp_type); 6179 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6180 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6181 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6182 } else 6183 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6184 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6185 prli_fc4_req, ndlp->nlp_fc4_type, 6186 ndlp->nlp_DID); 6187 6188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6189 "Issue ACC PRLI: did:x%x flg:x%x", 6190 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6191 6192 phba->fc_stat.elsXmitACC++; 6193 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6194 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6195 if (!elsiocb->ndlp) { 6196 lpfc_els_free_iocb(phba, elsiocb); 6197 return 1; 6198 } 6199 6200 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6201 if (rc == IOCB_ERROR) { 6202 lpfc_els_free_iocb(phba, elsiocb); 6203 lpfc_nlp_put(ndlp); 6204 return 1; 6205 } 6206 6207 return 0; 6208 } 6209 6210 /** 6211 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6212 * @vport: pointer to a virtual N_Port data structure. 6213 * @format: rnid command format. 6214 * @oldiocb: pointer to the original lpfc command iocb data structure. 6215 * @ndlp: pointer to a node-list data structure. 6216 * 6217 * This routine issues a Request Node Identification Data (RNID) Accept 6218 * (ACC) response. It constructs the RNID ACC response command according to 6219 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6220 * issue the response. 6221 * 6222 * Note that the ndlp reference count will be incremented by 1 for holding the 6223 * ndlp and the reference to ndlp will be stored into the ndlp field of 6224 * the IOCB for the completion callback function. 6225 * 6226 * Return code 6227 * 0 - Successfully issued acc rnid response 6228 * 1 - Failed to issue acc rnid response 6229 **/ 6230 static int 6231 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6232 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6233 { 6234 struct lpfc_hba *phba = vport->phba; 6235 RNID *rn; 6236 IOCB_t *icmd, *oldcmd; 6237 union lpfc_wqe128 *wqe; 6238 struct lpfc_iocbq *elsiocb; 6239 uint8_t *pcmd; 6240 uint16_t cmdsize; 6241 int rc; 6242 u32 ulp_context; 6243 6244 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6245 + (2 * sizeof(struct lpfc_name)); 6246 if (format) 6247 cmdsize += sizeof(RNID_TOP_DISC); 6248 6249 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6250 ndlp->nlp_DID, ELS_CMD_ACC); 6251 if (!elsiocb) 6252 return 1; 6253 6254 if (phba->sli_rev == LPFC_SLI_REV4) { 6255 wqe = &elsiocb->wqe; 6256 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6257 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6258 ulp_context = get_job_ulpcontext(phba, elsiocb); 6259 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6260 get_job_rcvoxid(phba, oldiocb)); 6261 } else { 6262 icmd = &elsiocb->iocb; 6263 oldcmd = &oldiocb->iocb; 6264 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6265 ulp_context = elsiocb->iocb.ulpContext; 6266 icmd->unsli3.rcvsli3.ox_id = 6267 oldcmd->unsli3.rcvsli3.ox_id; 6268 } 6269 6270 /* Xmit RNID ACC response tag <ulpIoTag> */ 6271 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6272 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6273 elsiocb->iotag, ulp_context); 6274 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6275 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6276 pcmd += sizeof(uint32_t); 6277 6278 memset(pcmd, 0, sizeof(RNID)); 6279 rn = (RNID *) (pcmd); 6280 rn->Format = format; 6281 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6282 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6283 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6284 switch (format) { 6285 case 0: 6286 rn->SpecificLen = 0; 6287 break; 6288 case RNID_TOPOLOGY_DISC: 6289 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6290 memcpy(&rn->un.topologyDisc.portName, 6291 &vport->fc_portname, sizeof(struct lpfc_name)); 6292 rn->un.topologyDisc.unitType = RNID_HBA; 6293 rn->un.topologyDisc.physPort = 0; 6294 rn->un.topologyDisc.attachedNodes = 0; 6295 break; 6296 default: 6297 rn->CommonLen = 0; 6298 rn->SpecificLen = 0; 6299 break; 6300 } 6301 6302 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6303 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6304 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6305 6306 phba->fc_stat.elsXmitACC++; 6307 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6308 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6309 if (!elsiocb->ndlp) { 6310 lpfc_els_free_iocb(phba, elsiocb); 6311 return 1; 6312 } 6313 6314 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6315 if (rc == IOCB_ERROR) { 6316 lpfc_els_free_iocb(phba, elsiocb); 6317 lpfc_nlp_put(ndlp); 6318 return 1; 6319 } 6320 6321 return 0; 6322 } 6323 6324 /** 6325 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6326 * @vport: pointer to a virtual N_Port data structure. 6327 * @iocb: pointer to the lpfc command iocb data structure. 6328 * @ndlp: pointer to a node-list data structure. 6329 * 6330 * Return 6331 **/ 6332 static void 6333 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6334 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6335 { 6336 struct lpfc_hba *phba = vport->phba; 6337 uint8_t *pcmd; 6338 struct RRQ *rrq; 6339 uint16_t rxid; 6340 uint16_t xri; 6341 struct lpfc_node_rrq *prrq; 6342 6343 6344 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6345 pcmd += sizeof(uint32_t); 6346 rrq = (struct RRQ *)pcmd; 6347 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6348 rxid = bf_get(rrq_rxid, rrq); 6349 6350 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6351 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6352 " x%x x%x\n", 6353 be32_to_cpu(bf_get(rrq_did, rrq)), 6354 bf_get(rrq_oxid, rrq), 6355 rxid, 6356 get_wqe_reqtag(iocb), 6357 get_job_ulpcontext(phba, iocb)); 6358 6359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6360 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6361 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6362 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6363 xri = bf_get(rrq_oxid, rrq); 6364 else 6365 xri = rxid; 6366 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6367 if (prrq) 6368 lpfc_clr_rrq_active(phba, xri, prrq); 6369 return; 6370 } 6371 6372 /** 6373 * lpfc_els_rsp_echo_acc - Issue echo acc response 6374 * @vport: pointer to a virtual N_Port data structure. 6375 * @data: pointer to echo data to return in the accept. 6376 * @oldiocb: pointer to the original lpfc command iocb data structure. 6377 * @ndlp: pointer to a node-list data structure. 6378 * 6379 * Return code 6380 * 0 - Successfully issued acc echo response 6381 * 1 - Failed to issue acc echo response 6382 **/ 6383 static int 6384 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6385 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6386 { 6387 struct lpfc_hba *phba = vport->phba; 6388 IOCB_t *icmd, *oldcmd; 6389 union lpfc_wqe128 *wqe; 6390 struct lpfc_iocbq *elsiocb; 6391 uint8_t *pcmd; 6392 uint16_t cmdsize; 6393 int rc; 6394 u32 ulp_context; 6395 6396 if (phba->sli_rev == LPFC_SLI_REV4) 6397 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6398 else 6399 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6400 6401 /* The accumulated length can exceed the BPL_SIZE. For 6402 * now, use this as the limit 6403 */ 6404 if (cmdsize > LPFC_BPL_SIZE) 6405 cmdsize = LPFC_BPL_SIZE; 6406 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6407 ndlp->nlp_DID, ELS_CMD_ACC); 6408 if (!elsiocb) 6409 return 1; 6410 6411 if (phba->sli_rev == LPFC_SLI_REV4) { 6412 wqe = &elsiocb->wqe; 6413 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6414 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6415 ulp_context = get_job_ulpcontext(phba, elsiocb); 6416 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6417 get_job_rcvoxid(phba, oldiocb)); 6418 } else { 6419 icmd = &elsiocb->iocb; 6420 oldcmd = &oldiocb->iocb; 6421 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6422 ulp_context = elsiocb->iocb.ulpContext; 6423 icmd->unsli3.rcvsli3.ox_id = 6424 oldcmd->unsli3.rcvsli3.ox_id; 6425 } 6426 6427 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6428 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6429 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6430 elsiocb->iotag, ulp_context); 6431 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6432 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6433 pcmd += sizeof(uint32_t); 6434 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6435 6436 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6437 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6438 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6439 6440 phba->fc_stat.elsXmitACC++; 6441 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6442 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6443 if (!elsiocb->ndlp) { 6444 lpfc_els_free_iocb(phba, elsiocb); 6445 return 1; 6446 } 6447 6448 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6449 if (rc == IOCB_ERROR) { 6450 lpfc_els_free_iocb(phba, elsiocb); 6451 lpfc_nlp_put(ndlp); 6452 return 1; 6453 } 6454 6455 return 0; 6456 } 6457 6458 /** 6459 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6460 * @vport: pointer to a host virtual N_Port data structure. 6461 * 6462 * This routine issues Address Discover (ADISC) ELS commands to those 6463 * N_Ports which are in node port recovery state and ADISC has not been issued 6464 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6465 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6466 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6467 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6468 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6469 * IOCBs quit for later pick up. On the other hand, after walking through 6470 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6471 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6472 * no more ADISC need to be sent. 6473 * 6474 * Return code 6475 * The number of N_Ports with adisc issued. 6476 **/ 6477 int 6478 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6479 { 6480 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6481 struct lpfc_nodelist *ndlp, *next_ndlp; 6482 int sentadisc = 0; 6483 6484 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6485 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6486 6487 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6488 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6489 continue; 6490 6491 spin_lock_irq(&ndlp->lock); 6492 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6493 spin_unlock_irq(&ndlp->lock); 6494 6495 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6496 /* This node was marked for ADISC but was not picked 6497 * for discovery. This is possible if the node was 6498 * missing in gidft response. 6499 * 6500 * At time of marking node for ADISC, we skipped unreg 6501 * from backend 6502 */ 6503 lpfc_nlp_unreg_node(vport, ndlp); 6504 lpfc_unreg_rpi(vport, ndlp); 6505 continue; 6506 } 6507 6508 ndlp->nlp_prev_state = ndlp->nlp_state; 6509 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6510 lpfc_issue_els_adisc(vport, ndlp, 0); 6511 sentadisc++; 6512 vport->num_disc_nodes++; 6513 if (vport->num_disc_nodes >= 6514 vport->cfg_discovery_threads) { 6515 spin_lock_irq(shost->host_lock); 6516 vport->fc_flag |= FC_NLP_MORE; 6517 spin_unlock_irq(shost->host_lock); 6518 break; 6519 } 6520 6521 } 6522 if (sentadisc == 0) { 6523 spin_lock_irq(shost->host_lock); 6524 vport->fc_flag &= ~FC_NLP_MORE; 6525 spin_unlock_irq(shost->host_lock); 6526 } 6527 return sentadisc; 6528 } 6529 6530 /** 6531 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6532 * @vport: pointer to a host virtual N_Port data structure. 6533 * 6534 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6535 * which are in node port recovery state, with a @vport. Each time an ELS 6536 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6537 * the per @vport number of discover count (num_disc_nodes) shall be 6538 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6539 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6540 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6541 * later pick up. On the other hand, after walking through all the ndlps with 6542 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6543 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6544 * PLOGI need to be sent. 6545 * 6546 * Return code 6547 * The number of N_Ports with plogi issued. 6548 **/ 6549 int 6550 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6551 { 6552 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6553 struct lpfc_nodelist *ndlp, *next_ndlp; 6554 int sentplogi = 0; 6555 6556 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6557 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6558 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6559 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6560 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6561 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6562 ndlp->nlp_prev_state = ndlp->nlp_state; 6563 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6564 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6565 sentplogi++; 6566 vport->num_disc_nodes++; 6567 if (vport->num_disc_nodes >= 6568 vport->cfg_discovery_threads) { 6569 spin_lock_irq(shost->host_lock); 6570 vport->fc_flag |= FC_NLP_MORE; 6571 spin_unlock_irq(shost->host_lock); 6572 break; 6573 } 6574 } 6575 } 6576 6577 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6578 "6452 Discover PLOGI %d flag x%x\n", 6579 sentplogi, vport->fc_flag); 6580 6581 if (sentplogi) { 6582 lpfc_set_disctmo(vport); 6583 } 6584 else { 6585 spin_lock_irq(shost->host_lock); 6586 vport->fc_flag &= ~FC_NLP_MORE; 6587 spin_unlock_irq(shost->host_lock); 6588 } 6589 return sentplogi; 6590 } 6591 6592 static uint32_t 6593 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6594 uint32_t word0) 6595 { 6596 6597 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6598 desc->payload.els_req = word0; 6599 desc->length = cpu_to_be32(sizeof(desc->payload)); 6600 6601 return sizeof(struct fc_rdp_link_service_desc); 6602 } 6603 6604 static uint32_t 6605 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6606 uint8_t *page_a0, uint8_t *page_a2) 6607 { 6608 uint16_t wavelength; 6609 uint16_t temperature; 6610 uint16_t rx_power; 6611 uint16_t tx_bias; 6612 uint16_t tx_power; 6613 uint16_t vcc; 6614 uint16_t flag = 0; 6615 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6616 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6617 6618 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6619 6620 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6621 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6622 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6623 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6624 6625 if ((trasn_code_byte4->fc_sw_laser) || 6626 (trasn_code_byte5->fc_sw_laser_sl) || 6627 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6628 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6629 } else if (trasn_code_byte4->fc_lw_laser) { 6630 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6631 page_a0[SSF_WAVELENGTH_B0]; 6632 if (wavelength == SFP_WAVELENGTH_LC1310) 6633 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6634 if (wavelength == SFP_WAVELENGTH_LL1550) 6635 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6636 } 6637 /* check if its SFP+ */ 6638 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6639 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6640 << SFP_FLAG_CT_SHIFT; 6641 6642 /* check if its OPTICAL */ 6643 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6644 SFP_FLAG_IS_OPTICAL_PORT : 0) 6645 << SFP_FLAG_IS_OPTICAL_SHIFT; 6646 6647 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6648 page_a2[SFF_TEMPERATURE_B0]); 6649 vcc = (page_a2[SFF_VCC_B1] << 8 | 6650 page_a2[SFF_VCC_B0]); 6651 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6652 page_a2[SFF_TXPOWER_B0]); 6653 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6654 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6655 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6656 page_a2[SFF_RXPOWER_B0]); 6657 desc->sfp_info.temperature = cpu_to_be16(temperature); 6658 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6659 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6660 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6661 desc->sfp_info.vcc = cpu_to_be16(vcc); 6662 6663 desc->sfp_info.flags = cpu_to_be16(flag); 6664 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6665 6666 return sizeof(struct fc_rdp_sfp_desc); 6667 } 6668 6669 static uint32_t 6670 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6671 READ_LNK_VAR *stat) 6672 { 6673 uint32_t type; 6674 6675 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6676 6677 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6678 6679 desc->info.port_type = cpu_to_be32(type); 6680 6681 desc->info.link_status.link_failure_cnt = 6682 cpu_to_be32(stat->linkFailureCnt); 6683 desc->info.link_status.loss_of_synch_cnt = 6684 cpu_to_be32(stat->lossSyncCnt); 6685 desc->info.link_status.loss_of_signal_cnt = 6686 cpu_to_be32(stat->lossSignalCnt); 6687 desc->info.link_status.primitive_seq_proto_err = 6688 cpu_to_be32(stat->primSeqErrCnt); 6689 desc->info.link_status.invalid_trans_word = 6690 cpu_to_be32(stat->invalidXmitWord); 6691 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6692 6693 desc->length = cpu_to_be32(sizeof(desc->info)); 6694 6695 return sizeof(struct fc_rdp_link_error_status_desc); 6696 } 6697 6698 static uint32_t 6699 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6700 struct lpfc_vport *vport) 6701 { 6702 uint32_t bbCredit; 6703 6704 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6705 6706 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6707 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6708 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6709 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6710 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6711 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6712 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6713 } else { 6714 desc->bbc_info.attached_port_bbc = 0; 6715 } 6716 6717 desc->bbc_info.rtt = 0; 6718 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6719 6720 return sizeof(struct fc_rdp_bbc_desc); 6721 } 6722 6723 static uint32_t 6724 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6725 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6726 { 6727 uint32_t flags = 0; 6728 6729 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6730 6731 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6732 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6733 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6734 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6735 6736 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6737 flags |= RDP_OET_HIGH_ALARM; 6738 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6739 flags |= RDP_OET_LOW_ALARM; 6740 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6741 flags |= RDP_OET_HIGH_WARNING; 6742 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6743 flags |= RDP_OET_LOW_WARNING; 6744 6745 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6746 desc->oed_info.function_flags = cpu_to_be32(flags); 6747 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6748 return sizeof(struct fc_rdp_oed_sfp_desc); 6749 } 6750 6751 static uint32_t 6752 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6753 struct fc_rdp_oed_sfp_desc *desc, 6754 uint8_t *page_a2) 6755 { 6756 uint32_t flags = 0; 6757 6758 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6759 6760 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6761 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6762 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6763 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6764 6765 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6766 flags |= RDP_OET_HIGH_ALARM; 6767 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6768 flags |= RDP_OET_LOW_ALARM; 6769 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6770 flags |= RDP_OET_HIGH_WARNING; 6771 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6772 flags |= RDP_OET_LOW_WARNING; 6773 6774 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6775 desc->oed_info.function_flags = cpu_to_be32(flags); 6776 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6777 return sizeof(struct fc_rdp_oed_sfp_desc); 6778 } 6779 6780 static uint32_t 6781 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6782 struct fc_rdp_oed_sfp_desc *desc, 6783 uint8_t *page_a2) 6784 { 6785 uint32_t flags = 0; 6786 6787 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6788 6789 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6790 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6791 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6792 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6793 6794 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6795 flags |= RDP_OET_HIGH_ALARM; 6796 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6797 flags |= RDP_OET_LOW_ALARM; 6798 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6799 flags |= RDP_OET_HIGH_WARNING; 6800 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6801 flags |= RDP_OET_LOW_WARNING; 6802 6803 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6804 desc->oed_info.function_flags = cpu_to_be32(flags); 6805 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6806 return sizeof(struct fc_rdp_oed_sfp_desc); 6807 } 6808 6809 static uint32_t 6810 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6811 struct fc_rdp_oed_sfp_desc *desc, 6812 uint8_t *page_a2) 6813 { 6814 uint32_t flags = 0; 6815 6816 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6817 6818 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6819 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6820 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6821 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6822 6823 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6824 flags |= RDP_OET_HIGH_ALARM; 6825 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6826 flags |= RDP_OET_LOW_ALARM; 6827 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6828 flags |= RDP_OET_HIGH_WARNING; 6829 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6830 flags |= RDP_OET_LOW_WARNING; 6831 6832 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6833 desc->oed_info.function_flags = cpu_to_be32(flags); 6834 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6835 return sizeof(struct fc_rdp_oed_sfp_desc); 6836 } 6837 6838 6839 static uint32_t 6840 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6841 struct fc_rdp_oed_sfp_desc *desc, 6842 uint8_t *page_a2) 6843 { 6844 uint32_t flags = 0; 6845 6846 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6847 6848 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6849 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6850 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6851 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6852 6853 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6854 flags |= RDP_OET_HIGH_ALARM; 6855 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6856 flags |= RDP_OET_LOW_ALARM; 6857 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6858 flags |= RDP_OET_HIGH_WARNING; 6859 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6860 flags |= RDP_OET_LOW_WARNING; 6861 6862 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6863 desc->oed_info.function_flags = cpu_to_be32(flags); 6864 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6865 return sizeof(struct fc_rdp_oed_sfp_desc); 6866 } 6867 6868 static uint32_t 6869 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6870 uint8_t *page_a0, struct lpfc_vport *vport) 6871 { 6872 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6873 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6874 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6875 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6876 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6877 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6878 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6879 return sizeof(struct fc_rdp_opd_sfp_desc); 6880 } 6881 6882 static uint32_t 6883 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6884 { 6885 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6886 return 0; 6887 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6888 6889 desc->info.CorrectedBlocks = 6890 cpu_to_be32(stat->fecCorrBlkCount); 6891 desc->info.UncorrectableBlocks = 6892 cpu_to_be32(stat->fecUncorrBlkCount); 6893 6894 desc->length = cpu_to_be32(sizeof(desc->info)); 6895 6896 return sizeof(struct fc_fec_rdp_desc); 6897 } 6898 6899 static uint32_t 6900 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6901 { 6902 uint16_t rdp_cap = 0; 6903 uint16_t rdp_speed; 6904 6905 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6906 6907 switch (phba->fc_linkspeed) { 6908 case LPFC_LINK_SPEED_1GHZ: 6909 rdp_speed = RDP_PS_1GB; 6910 break; 6911 case LPFC_LINK_SPEED_2GHZ: 6912 rdp_speed = RDP_PS_2GB; 6913 break; 6914 case LPFC_LINK_SPEED_4GHZ: 6915 rdp_speed = RDP_PS_4GB; 6916 break; 6917 case LPFC_LINK_SPEED_8GHZ: 6918 rdp_speed = RDP_PS_8GB; 6919 break; 6920 case LPFC_LINK_SPEED_10GHZ: 6921 rdp_speed = RDP_PS_10GB; 6922 break; 6923 case LPFC_LINK_SPEED_16GHZ: 6924 rdp_speed = RDP_PS_16GB; 6925 break; 6926 case LPFC_LINK_SPEED_32GHZ: 6927 rdp_speed = RDP_PS_32GB; 6928 break; 6929 case LPFC_LINK_SPEED_64GHZ: 6930 rdp_speed = RDP_PS_64GB; 6931 break; 6932 case LPFC_LINK_SPEED_128GHZ: 6933 rdp_speed = RDP_PS_128GB; 6934 break; 6935 case LPFC_LINK_SPEED_256GHZ: 6936 rdp_speed = RDP_PS_256GB; 6937 break; 6938 default: 6939 rdp_speed = RDP_PS_UNKNOWN; 6940 break; 6941 } 6942 6943 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6944 6945 if (phba->lmt & LMT_256Gb) 6946 rdp_cap |= RDP_PS_256GB; 6947 if (phba->lmt & LMT_128Gb) 6948 rdp_cap |= RDP_PS_128GB; 6949 if (phba->lmt & LMT_64Gb) 6950 rdp_cap |= RDP_PS_64GB; 6951 if (phba->lmt & LMT_32Gb) 6952 rdp_cap |= RDP_PS_32GB; 6953 if (phba->lmt & LMT_16Gb) 6954 rdp_cap |= RDP_PS_16GB; 6955 if (phba->lmt & LMT_10Gb) 6956 rdp_cap |= RDP_PS_10GB; 6957 if (phba->lmt & LMT_8Gb) 6958 rdp_cap |= RDP_PS_8GB; 6959 if (phba->lmt & LMT_4Gb) 6960 rdp_cap |= RDP_PS_4GB; 6961 if (phba->lmt & LMT_2Gb) 6962 rdp_cap |= RDP_PS_2GB; 6963 if (phba->lmt & LMT_1Gb) 6964 rdp_cap |= RDP_PS_1GB; 6965 6966 if (rdp_cap == 0) 6967 rdp_cap = RDP_CAP_UNKNOWN; 6968 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6969 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6970 6971 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6972 desc->length = cpu_to_be32(sizeof(desc->info)); 6973 return sizeof(struct fc_rdp_port_speed_desc); 6974 } 6975 6976 static uint32_t 6977 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6978 struct lpfc_vport *vport) 6979 { 6980 6981 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6982 6983 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6984 sizeof(desc->port_names.wwnn)); 6985 6986 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6987 sizeof(desc->port_names.wwpn)); 6988 6989 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6990 return sizeof(struct fc_rdp_port_name_desc); 6991 } 6992 6993 static uint32_t 6994 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6995 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6996 { 6997 6998 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6999 if (vport->fc_flag & FC_FABRIC) { 7000 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7001 sizeof(desc->port_names.wwnn)); 7002 7003 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7004 sizeof(desc->port_names.wwpn)); 7005 } else { /* Point to Point */ 7006 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7007 sizeof(desc->port_names.wwnn)); 7008 7009 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7010 sizeof(desc->port_names.wwpn)); 7011 } 7012 7013 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7014 return sizeof(struct fc_rdp_port_name_desc); 7015 } 7016 7017 static void 7018 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7019 int status) 7020 { 7021 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7022 struct lpfc_vport *vport = ndlp->vport; 7023 struct lpfc_iocbq *elsiocb; 7024 struct ulp_bde64 *bpl; 7025 IOCB_t *icmd; 7026 union lpfc_wqe128 *wqe; 7027 uint8_t *pcmd; 7028 struct ls_rjt *stat; 7029 struct fc_rdp_res_frame *rdp_res; 7030 uint32_t cmdsize, len; 7031 uint16_t *flag_ptr; 7032 int rc; 7033 u32 ulp_context; 7034 7035 if (status != SUCCESS) 7036 goto error; 7037 7038 /* This will change once we know the true size of the RDP payload */ 7039 cmdsize = sizeof(struct fc_rdp_res_frame); 7040 7041 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7042 lpfc_max_els_tries, rdp_context->ndlp, 7043 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7044 if (!elsiocb) 7045 goto free_rdp_context; 7046 7047 ulp_context = get_job_ulpcontext(phba, elsiocb); 7048 if (phba->sli_rev == LPFC_SLI_REV4) { 7049 wqe = &elsiocb->wqe; 7050 /* ox-id of the frame */ 7051 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7052 rdp_context->ox_id); 7053 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7054 rdp_context->rx_id); 7055 } else { 7056 icmd = &elsiocb->iocb; 7057 icmd->ulpContext = rdp_context->rx_id; 7058 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7059 } 7060 7061 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7062 "2171 Xmit RDP response tag x%x xri x%x, " 7063 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7064 elsiocb->iotag, ulp_context, 7065 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7066 ndlp->nlp_rpi); 7067 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7068 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7069 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7070 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7071 7072 /* Update Alarm and Warning */ 7073 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7074 phba->sfp_alarm |= *flag_ptr; 7075 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7076 phba->sfp_warning |= *flag_ptr; 7077 7078 /* For RDP payload */ 7079 len = 8; 7080 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7081 (len + pcmd), ELS_CMD_RDP); 7082 7083 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7084 rdp_context->page_a0, rdp_context->page_a2); 7085 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7086 phba); 7087 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7088 (len + pcmd), &rdp_context->link_stat); 7089 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7090 (len + pcmd), vport); 7091 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7092 (len + pcmd), vport, ndlp); 7093 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7094 &rdp_context->link_stat); 7095 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7096 &rdp_context->link_stat, vport); 7097 len += lpfc_rdp_res_oed_temp_desc(phba, 7098 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7099 rdp_context->page_a2); 7100 len += lpfc_rdp_res_oed_voltage_desc(phba, 7101 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7102 rdp_context->page_a2); 7103 len += lpfc_rdp_res_oed_txbias_desc(phba, 7104 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7105 rdp_context->page_a2); 7106 len += lpfc_rdp_res_oed_txpower_desc(phba, 7107 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7108 rdp_context->page_a2); 7109 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7110 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7111 rdp_context->page_a2); 7112 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7113 rdp_context->page_a0, vport); 7114 7115 rdp_res->length = cpu_to_be32(len - 8); 7116 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7117 7118 /* Now that we know the true size of the payload, update the BPL */ 7119 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7120 bpl->tus.f.bdeSize = len; 7121 bpl->tus.f.bdeFlags = 0; 7122 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7123 7124 phba->fc_stat.elsXmitACC++; 7125 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7126 if (!elsiocb->ndlp) { 7127 lpfc_els_free_iocb(phba, elsiocb); 7128 goto free_rdp_context; 7129 } 7130 7131 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7132 if (rc == IOCB_ERROR) { 7133 lpfc_els_free_iocb(phba, elsiocb); 7134 lpfc_nlp_put(ndlp); 7135 } 7136 7137 goto free_rdp_context; 7138 7139 error: 7140 cmdsize = 2 * sizeof(uint32_t); 7141 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7142 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7143 if (!elsiocb) 7144 goto free_rdp_context; 7145 7146 if (phba->sli_rev == LPFC_SLI_REV4) { 7147 wqe = &elsiocb->wqe; 7148 /* ox-id of the frame */ 7149 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7150 rdp_context->ox_id); 7151 bf_set(wqe_ctxt_tag, 7152 &wqe->xmit_els_rsp.wqe_com, 7153 rdp_context->rx_id); 7154 } else { 7155 icmd = &elsiocb->iocb; 7156 icmd->ulpContext = rdp_context->rx_id; 7157 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7158 } 7159 7160 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7161 7162 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7163 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7164 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7165 7166 phba->fc_stat.elsXmitLSRJT++; 7167 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7168 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7169 if (!elsiocb->ndlp) { 7170 lpfc_els_free_iocb(phba, elsiocb); 7171 goto free_rdp_context; 7172 } 7173 7174 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7175 if (rc == IOCB_ERROR) { 7176 lpfc_els_free_iocb(phba, elsiocb); 7177 lpfc_nlp_put(ndlp); 7178 } 7179 7180 free_rdp_context: 7181 /* This reference put is for the original unsolicited RDP. If the 7182 * prep failed, there is no reference to remove. 7183 */ 7184 lpfc_nlp_put(ndlp); 7185 kfree(rdp_context); 7186 } 7187 7188 static int 7189 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7190 { 7191 LPFC_MBOXQ_t *mbox = NULL; 7192 int rc; 7193 7194 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7195 if (!mbox) { 7196 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7197 "7105 failed to allocate mailbox memory"); 7198 return 1; 7199 } 7200 7201 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7202 goto rdp_fail; 7203 mbox->vport = rdp_context->ndlp->vport; 7204 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7205 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7206 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7207 if (rc == MBX_NOT_FINISHED) { 7208 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7209 return 1; 7210 } 7211 7212 return 0; 7213 7214 rdp_fail: 7215 mempool_free(mbox, phba->mbox_mem_pool); 7216 return 1; 7217 } 7218 7219 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7220 struct lpfc_rdp_context *rdp_context) 7221 { 7222 LPFC_MBOXQ_t *mbox = NULL; 7223 int rc; 7224 struct lpfc_dmabuf *mp; 7225 struct lpfc_dmabuf *mpsave; 7226 void *virt; 7227 MAILBOX_t *mb; 7228 7229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7230 if (!mbox) { 7231 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7232 "7205 failed to allocate mailbox memory"); 7233 return 1; 7234 } 7235 7236 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7237 goto sfp_fail; 7238 mp = mbox->ctx_buf; 7239 mpsave = mp; 7240 virt = mp->virt; 7241 if (phba->sli_rev < LPFC_SLI_REV4) { 7242 mb = &mbox->u.mb; 7243 mb->un.varDmp.cv = 1; 7244 mb->un.varDmp.co = 1; 7245 mb->un.varWords[2] = 0; 7246 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7247 mb->un.varWords[4] = 0; 7248 mb->un.varWords[5] = 0; 7249 mb->un.varWords[6] = 0; 7250 mb->un.varWords[7] = 0; 7251 mb->un.varWords[8] = 0; 7252 mb->un.varWords[9] = 0; 7253 mb->un.varWords[10] = 0; 7254 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7255 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7256 mbox->mbox_offset_word = 5; 7257 mbox->ctx_buf = virt; 7258 } else { 7259 bf_set(lpfc_mbx_memory_dump_type3_length, 7260 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7261 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7262 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7263 } 7264 mbox->vport = phba->pport; 7265 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7266 7267 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7268 if (rc == MBX_NOT_FINISHED) { 7269 rc = 1; 7270 goto error; 7271 } 7272 7273 if (phba->sli_rev == LPFC_SLI_REV4) 7274 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 7275 else 7276 mp = mpsave; 7277 7278 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7279 rc = 1; 7280 goto error; 7281 } 7282 7283 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7284 DMP_SFF_PAGE_A0_SIZE); 7285 7286 memset(mbox, 0, sizeof(*mbox)); 7287 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7288 INIT_LIST_HEAD(&mp->list); 7289 7290 /* save address for completion */ 7291 mbox->ctx_buf = mp; 7292 mbox->vport = phba->pport; 7293 7294 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7295 bf_set(lpfc_mbx_memory_dump_type3_type, 7296 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7297 bf_set(lpfc_mbx_memory_dump_type3_link, 7298 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7299 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7300 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7301 if (phba->sli_rev < LPFC_SLI_REV4) { 7302 mb = &mbox->u.mb; 7303 mb->un.varDmp.cv = 1; 7304 mb->un.varDmp.co = 1; 7305 mb->un.varWords[2] = 0; 7306 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7307 mb->un.varWords[4] = 0; 7308 mb->un.varWords[5] = 0; 7309 mb->un.varWords[6] = 0; 7310 mb->un.varWords[7] = 0; 7311 mb->un.varWords[8] = 0; 7312 mb->un.varWords[9] = 0; 7313 mb->un.varWords[10] = 0; 7314 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7315 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7316 mbox->mbox_offset_word = 5; 7317 mbox->ctx_buf = virt; 7318 } else { 7319 bf_set(lpfc_mbx_memory_dump_type3_length, 7320 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7321 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7322 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7323 } 7324 7325 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7326 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7327 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7328 rc = 1; 7329 goto error; 7330 } 7331 rc = 0; 7332 7333 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7334 DMP_SFF_PAGE_A2_SIZE); 7335 7336 error: 7337 mbox->ctx_buf = mpsave; 7338 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7339 7340 return rc; 7341 7342 sfp_fail: 7343 mempool_free(mbox, phba->mbox_mem_pool); 7344 return 1; 7345 } 7346 7347 /* 7348 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7349 * @vport: pointer to a host virtual N_Port data structure. 7350 * @cmdiocb: pointer to lpfc command iocb data structure. 7351 * @ndlp: pointer to a node-list data structure. 7352 * 7353 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7354 * IOCB. First, the payload of the unsolicited RDP is checked. 7355 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7356 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7357 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7358 * gather all data and send RDP response. 7359 * 7360 * Return code 7361 * 0 - Sent the acc response 7362 * 1 - Sent the reject response. 7363 */ 7364 static int 7365 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7366 struct lpfc_nodelist *ndlp) 7367 { 7368 struct lpfc_hba *phba = vport->phba; 7369 struct lpfc_dmabuf *pcmd; 7370 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7371 struct fc_rdp_req_frame *rdp_req; 7372 struct lpfc_rdp_context *rdp_context; 7373 union lpfc_wqe128 *cmd = NULL; 7374 struct ls_rjt stat; 7375 7376 if (phba->sli_rev < LPFC_SLI_REV4 || 7377 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7378 LPFC_SLI_INTF_IF_TYPE_2) { 7379 rjt_err = LSRJT_UNABLE_TPC; 7380 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7381 goto error; 7382 } 7383 7384 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7385 rjt_err = LSRJT_UNABLE_TPC; 7386 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7387 goto error; 7388 } 7389 7390 pcmd = cmdiocb->cmd_dmabuf; 7391 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7392 7393 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7394 "2422 ELS RDP Request " 7395 "dec len %d tag x%x port_id %d len %d\n", 7396 be32_to_cpu(rdp_req->rdp_des_length), 7397 be32_to_cpu(rdp_req->nport_id_desc.tag), 7398 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7399 be32_to_cpu(rdp_req->nport_id_desc.length)); 7400 7401 if (sizeof(struct fc_rdp_nport_desc) != 7402 be32_to_cpu(rdp_req->rdp_des_length)) 7403 goto rjt_logerr; 7404 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7405 goto rjt_logerr; 7406 if (RDP_NPORT_ID_SIZE != 7407 be32_to_cpu(rdp_req->nport_id_desc.length)) 7408 goto rjt_logerr; 7409 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7410 if (!rdp_context) { 7411 rjt_err = LSRJT_UNABLE_TPC; 7412 goto error; 7413 } 7414 7415 cmd = &cmdiocb->wqe; 7416 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7417 if (!rdp_context->ndlp) { 7418 kfree(rdp_context); 7419 rjt_err = LSRJT_UNABLE_TPC; 7420 goto error; 7421 } 7422 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7423 &cmd->xmit_els_rsp.wqe_com); 7424 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7425 &cmd->xmit_els_rsp.wqe_com); 7426 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7427 if (lpfc_get_rdp_info(phba, rdp_context)) { 7428 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7429 "2423 Unable to send mailbox"); 7430 kfree(rdp_context); 7431 rjt_err = LSRJT_UNABLE_TPC; 7432 lpfc_nlp_put(ndlp); 7433 goto error; 7434 } 7435 7436 return 0; 7437 7438 rjt_logerr: 7439 rjt_err = LSRJT_LOGICAL_ERR; 7440 7441 error: 7442 memset(&stat, 0, sizeof(stat)); 7443 stat.un.b.lsRjtRsnCode = rjt_err; 7444 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7445 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7446 return 1; 7447 } 7448 7449 7450 static void 7451 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7452 { 7453 MAILBOX_t *mb; 7454 IOCB_t *icmd; 7455 union lpfc_wqe128 *wqe; 7456 uint8_t *pcmd; 7457 struct lpfc_iocbq *elsiocb; 7458 struct lpfc_nodelist *ndlp; 7459 struct ls_rjt *stat; 7460 union lpfc_sli4_cfg_shdr *shdr; 7461 struct lpfc_lcb_context *lcb_context; 7462 struct fc_lcb_res_frame *lcb_res; 7463 uint32_t cmdsize, shdr_status, shdr_add_status; 7464 int rc; 7465 7466 mb = &pmb->u.mb; 7467 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7468 ndlp = lcb_context->ndlp; 7469 pmb->ctx_ndlp = NULL; 7470 pmb->ctx_buf = NULL; 7471 7472 shdr = (union lpfc_sli4_cfg_shdr *) 7473 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7474 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7475 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7476 7477 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7478 "0194 SET_BEACON_CONFIG mailbox " 7479 "completed with status x%x add_status x%x," 7480 " mbx status x%x\n", 7481 shdr_status, shdr_add_status, mb->mbxStatus); 7482 7483 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7484 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7485 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7486 mempool_free(pmb, phba->mbox_mem_pool); 7487 goto error; 7488 } 7489 7490 mempool_free(pmb, phba->mbox_mem_pool); 7491 cmdsize = sizeof(struct fc_lcb_res_frame); 7492 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7493 lpfc_max_els_tries, ndlp, 7494 ndlp->nlp_DID, ELS_CMD_ACC); 7495 7496 /* Decrement the ndlp reference count from previous mbox command */ 7497 lpfc_nlp_put(ndlp); 7498 7499 if (!elsiocb) 7500 goto free_lcb_context; 7501 7502 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7503 7504 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7505 7506 if (phba->sli_rev == LPFC_SLI_REV4) { 7507 wqe = &elsiocb->wqe; 7508 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7509 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7510 lcb_context->ox_id); 7511 } else { 7512 icmd = &elsiocb->iocb; 7513 icmd->ulpContext = lcb_context->rx_id; 7514 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7515 } 7516 7517 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7518 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7519 lcb_res->lcb_sub_command = lcb_context->sub_command; 7520 lcb_res->lcb_type = lcb_context->type; 7521 lcb_res->capability = lcb_context->capability; 7522 lcb_res->lcb_frequency = lcb_context->frequency; 7523 lcb_res->lcb_duration = lcb_context->duration; 7524 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7525 phba->fc_stat.elsXmitACC++; 7526 7527 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7528 if (!elsiocb->ndlp) { 7529 lpfc_els_free_iocb(phba, elsiocb); 7530 goto out; 7531 } 7532 7533 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7534 if (rc == IOCB_ERROR) { 7535 lpfc_els_free_iocb(phba, elsiocb); 7536 lpfc_nlp_put(ndlp); 7537 } 7538 out: 7539 kfree(lcb_context); 7540 return; 7541 7542 error: 7543 cmdsize = sizeof(struct fc_lcb_res_frame); 7544 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7545 lpfc_max_els_tries, ndlp, 7546 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7547 lpfc_nlp_put(ndlp); 7548 if (!elsiocb) 7549 goto free_lcb_context; 7550 7551 if (phba->sli_rev == LPFC_SLI_REV4) { 7552 wqe = &elsiocb->wqe; 7553 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7554 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7555 lcb_context->ox_id); 7556 } else { 7557 icmd = &elsiocb->iocb; 7558 icmd->ulpContext = lcb_context->rx_id; 7559 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7560 } 7561 7562 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7563 7564 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7565 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7566 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7567 7568 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7569 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7570 7571 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7572 phba->fc_stat.elsXmitLSRJT++; 7573 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7574 if (!elsiocb->ndlp) { 7575 lpfc_els_free_iocb(phba, elsiocb); 7576 goto free_lcb_context; 7577 } 7578 7579 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7580 if (rc == IOCB_ERROR) { 7581 lpfc_els_free_iocb(phba, elsiocb); 7582 lpfc_nlp_put(ndlp); 7583 } 7584 free_lcb_context: 7585 kfree(lcb_context); 7586 } 7587 7588 static int 7589 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7590 struct lpfc_lcb_context *lcb_context, 7591 uint32_t beacon_state) 7592 { 7593 struct lpfc_hba *phba = vport->phba; 7594 union lpfc_sli4_cfg_shdr *cfg_shdr; 7595 LPFC_MBOXQ_t *mbox = NULL; 7596 uint32_t len; 7597 int rc; 7598 7599 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7600 if (!mbox) 7601 return 1; 7602 7603 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7604 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7605 sizeof(struct lpfc_sli4_cfg_mhdr); 7606 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7607 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7608 LPFC_SLI4_MBX_EMBED); 7609 mbox->ctx_ndlp = (void *)lcb_context; 7610 mbox->vport = phba->pport; 7611 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7612 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7613 phba->sli4_hba.physical_port); 7614 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7615 beacon_state); 7616 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7617 7618 /* 7619 * Check bv1s bit before issuing the mailbox 7620 * if bv1s == 1, LCB V1 supported 7621 * else, LCB V0 supported 7622 */ 7623 7624 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7625 /* COMMON_SET_BEACON_CONFIG_V1 */ 7626 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7627 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7628 bf_set(lpfc_mbx_set_beacon_port_type, 7629 &mbox->u.mqe.un.beacon_config, 0); 7630 bf_set(lpfc_mbx_set_beacon_duration_v1, 7631 &mbox->u.mqe.un.beacon_config, 7632 be16_to_cpu(lcb_context->duration)); 7633 } else { 7634 /* COMMON_SET_BEACON_CONFIG_V0 */ 7635 if (be16_to_cpu(lcb_context->duration) != 0) { 7636 mempool_free(mbox, phba->mbox_mem_pool); 7637 return 1; 7638 } 7639 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7640 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7641 bf_set(lpfc_mbx_set_beacon_state, 7642 &mbox->u.mqe.un.beacon_config, beacon_state); 7643 bf_set(lpfc_mbx_set_beacon_port_type, 7644 &mbox->u.mqe.un.beacon_config, 1); 7645 bf_set(lpfc_mbx_set_beacon_duration, 7646 &mbox->u.mqe.un.beacon_config, 7647 be16_to_cpu(lcb_context->duration)); 7648 } 7649 7650 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7651 if (rc == MBX_NOT_FINISHED) { 7652 mempool_free(mbox, phba->mbox_mem_pool); 7653 return 1; 7654 } 7655 7656 return 0; 7657 } 7658 7659 7660 /** 7661 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7662 * @vport: pointer to a host virtual N_Port data structure. 7663 * @cmdiocb: pointer to lpfc command iocb data structure. 7664 * @ndlp: pointer to a node-list data structure. 7665 * 7666 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7667 * First, the payload of the unsolicited LCB is checked. 7668 * Then based on Subcommand beacon will either turn on or off. 7669 * 7670 * Return code 7671 * 0 - Sent the acc response 7672 * 1 - Sent the reject response. 7673 **/ 7674 static int 7675 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7676 struct lpfc_nodelist *ndlp) 7677 { 7678 struct lpfc_hba *phba = vport->phba; 7679 struct lpfc_dmabuf *pcmd; 7680 uint8_t *lp; 7681 struct fc_lcb_request_frame *beacon; 7682 struct lpfc_lcb_context *lcb_context; 7683 u8 state, rjt_err = 0; 7684 struct ls_rjt stat; 7685 7686 pcmd = cmdiocb->cmd_dmabuf; 7687 lp = (uint8_t *)pcmd->virt; 7688 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7689 7690 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7691 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7692 "type x%x frequency %x duration x%x\n", 7693 lp[0], lp[1], lp[2], 7694 beacon->lcb_command, 7695 beacon->lcb_sub_command, 7696 beacon->lcb_type, 7697 beacon->lcb_frequency, 7698 be16_to_cpu(beacon->lcb_duration)); 7699 7700 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7701 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7702 rjt_err = LSRJT_CMD_UNSUPPORTED; 7703 goto rjt; 7704 } 7705 7706 if (phba->sli_rev < LPFC_SLI_REV4 || 7707 phba->hba_flag & HBA_FCOE_MODE || 7708 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7709 LPFC_SLI_INTF_IF_TYPE_2)) { 7710 rjt_err = LSRJT_CMD_UNSUPPORTED; 7711 goto rjt; 7712 } 7713 7714 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7715 if (!lcb_context) { 7716 rjt_err = LSRJT_UNABLE_TPC; 7717 goto rjt; 7718 } 7719 7720 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7721 lcb_context->sub_command = beacon->lcb_sub_command; 7722 lcb_context->capability = 0; 7723 lcb_context->type = beacon->lcb_type; 7724 lcb_context->frequency = beacon->lcb_frequency; 7725 lcb_context->duration = beacon->lcb_duration; 7726 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7727 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7728 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7729 if (!lcb_context->ndlp) { 7730 rjt_err = LSRJT_UNABLE_TPC; 7731 goto rjt_free; 7732 } 7733 7734 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7735 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7736 "0193 failed to send mail box"); 7737 lpfc_nlp_put(ndlp); 7738 rjt_err = LSRJT_UNABLE_TPC; 7739 goto rjt_free; 7740 } 7741 return 0; 7742 7743 rjt_free: 7744 kfree(lcb_context); 7745 rjt: 7746 memset(&stat, 0, sizeof(stat)); 7747 stat.un.b.lsRjtRsnCode = rjt_err; 7748 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7749 return 1; 7750 } 7751 7752 7753 /** 7754 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7755 * @vport: pointer to a host virtual N_Port data structure. 7756 * 7757 * This routine cleans up any Registration State Change Notification 7758 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7759 * @vport together with the host_lock is used to prevent multiple thread 7760 * trying to access the RSCN array on a same @vport at the same time. 7761 **/ 7762 void 7763 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7764 { 7765 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7766 struct lpfc_hba *phba = vport->phba; 7767 int i; 7768 7769 spin_lock_irq(shost->host_lock); 7770 if (vport->fc_rscn_flush) { 7771 /* Another thread is walking fc_rscn_id_list on this vport */ 7772 spin_unlock_irq(shost->host_lock); 7773 return; 7774 } 7775 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7776 vport->fc_rscn_flush = 1; 7777 spin_unlock_irq(shost->host_lock); 7778 7779 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7780 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7781 vport->fc_rscn_id_list[i] = NULL; 7782 } 7783 spin_lock_irq(shost->host_lock); 7784 vport->fc_rscn_id_cnt = 0; 7785 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7786 spin_unlock_irq(shost->host_lock); 7787 lpfc_can_disctmo(vport); 7788 /* Indicate we are done walking this fc_rscn_id_list */ 7789 vport->fc_rscn_flush = 0; 7790 } 7791 7792 /** 7793 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7794 * @vport: pointer to a host virtual N_Port data structure. 7795 * @did: remote destination port identifier. 7796 * 7797 * This routine checks whether there is any pending Registration State 7798 * Configuration Notification (RSCN) to a @did on @vport. 7799 * 7800 * Return code 7801 * None zero - The @did matched with a pending rscn 7802 * 0 - not able to match @did with a pending rscn 7803 **/ 7804 int 7805 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7806 { 7807 D_ID ns_did; 7808 D_ID rscn_did; 7809 uint32_t *lp; 7810 uint32_t payload_len, i; 7811 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7812 7813 ns_did.un.word = did; 7814 7815 /* Never match fabric nodes for RSCNs */ 7816 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7817 return 0; 7818 7819 /* If we are doing a FULL RSCN rediscovery, match everything */ 7820 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7821 return did; 7822 7823 spin_lock_irq(shost->host_lock); 7824 if (vport->fc_rscn_flush) { 7825 /* Another thread is walking fc_rscn_id_list on this vport */ 7826 spin_unlock_irq(shost->host_lock); 7827 return 0; 7828 } 7829 /* Indicate we are walking fc_rscn_id_list on this vport */ 7830 vport->fc_rscn_flush = 1; 7831 spin_unlock_irq(shost->host_lock); 7832 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7833 lp = vport->fc_rscn_id_list[i]->virt; 7834 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7835 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7836 while (payload_len) { 7837 rscn_did.un.word = be32_to_cpu(*lp++); 7838 payload_len -= sizeof(uint32_t); 7839 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7840 case RSCN_ADDRESS_FORMAT_PORT: 7841 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7842 && (ns_did.un.b.area == rscn_did.un.b.area) 7843 && (ns_did.un.b.id == rscn_did.un.b.id)) 7844 goto return_did_out; 7845 break; 7846 case RSCN_ADDRESS_FORMAT_AREA: 7847 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7848 && (ns_did.un.b.area == rscn_did.un.b.area)) 7849 goto return_did_out; 7850 break; 7851 case RSCN_ADDRESS_FORMAT_DOMAIN: 7852 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7853 goto return_did_out; 7854 break; 7855 case RSCN_ADDRESS_FORMAT_FABRIC: 7856 goto return_did_out; 7857 } 7858 } 7859 } 7860 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7861 vport->fc_rscn_flush = 0; 7862 return 0; 7863 return_did_out: 7864 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7865 vport->fc_rscn_flush = 0; 7866 return did; 7867 } 7868 7869 /** 7870 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7871 * @vport: pointer to a host virtual N_Port data structure. 7872 * 7873 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7874 * state machine for a @vport's nodes that are with pending RSCN (Registration 7875 * State Change Notification). 7876 * 7877 * Return code 7878 * 0 - Successful (currently alway return 0) 7879 **/ 7880 static int 7881 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7882 { 7883 struct lpfc_nodelist *ndlp = NULL, *n; 7884 7885 /* Move all affected nodes by pending RSCNs to NPR state. */ 7886 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7887 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7888 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7889 continue; 7890 7891 /* NVME Target mode does not do RSCN Recovery. */ 7892 if (vport->phba->nvmet_support) 7893 continue; 7894 7895 /* If we are in the process of doing discovery on this 7896 * NPort, let it continue on its own. 7897 */ 7898 switch (ndlp->nlp_state) { 7899 case NLP_STE_PLOGI_ISSUE: 7900 case NLP_STE_ADISC_ISSUE: 7901 case NLP_STE_REG_LOGIN_ISSUE: 7902 case NLP_STE_PRLI_ISSUE: 7903 case NLP_STE_LOGO_ISSUE: 7904 continue; 7905 } 7906 7907 lpfc_disc_state_machine(vport, ndlp, NULL, 7908 NLP_EVT_DEVICE_RECOVERY); 7909 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7910 } 7911 return 0; 7912 } 7913 7914 /** 7915 * lpfc_send_rscn_event - Send an RSCN event to management application 7916 * @vport: pointer to a host virtual N_Port data structure. 7917 * @cmdiocb: pointer to lpfc command iocb data structure. 7918 * 7919 * lpfc_send_rscn_event sends an RSCN netlink event to management 7920 * applications. 7921 */ 7922 static void 7923 lpfc_send_rscn_event(struct lpfc_vport *vport, 7924 struct lpfc_iocbq *cmdiocb) 7925 { 7926 struct lpfc_dmabuf *pcmd; 7927 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7928 uint32_t *payload_ptr; 7929 uint32_t payload_len; 7930 struct lpfc_rscn_event_header *rscn_event_data; 7931 7932 pcmd = cmdiocb->cmd_dmabuf; 7933 payload_ptr = (uint32_t *) pcmd->virt; 7934 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7935 7936 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7937 payload_len, GFP_KERNEL); 7938 if (!rscn_event_data) { 7939 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7940 "0147 Failed to allocate memory for RSCN event\n"); 7941 return; 7942 } 7943 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7944 rscn_event_data->payload_length = payload_len; 7945 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7946 payload_len); 7947 7948 fc_host_post_vendor_event(shost, 7949 fc_get_event_number(), 7950 sizeof(struct lpfc_rscn_event_header) + payload_len, 7951 (char *)rscn_event_data, 7952 LPFC_NL_VENDOR_ID); 7953 7954 kfree(rscn_event_data); 7955 } 7956 7957 /** 7958 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7959 * @vport: pointer to a host virtual N_Port data structure. 7960 * @cmdiocb: pointer to lpfc command iocb data structure. 7961 * @ndlp: pointer to a node-list data structure. 7962 * 7963 * This routine processes an unsolicited RSCN (Registration State Change 7964 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7965 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7966 * discover state machine is about to begin discovery, it just accepts the 7967 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7968 * contains N_Port IDs for other vports on this HBA, it just accepts the 7969 * RSCN and ignore processing it. If the state machine is in the recovery 7970 * state, the fc_rscn_id_list of this @vport is walked and the 7971 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7972 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7973 * routine is invoked to handle the RSCN event. 7974 * 7975 * Return code 7976 * 0 - Just sent the acc response 7977 * 1 - Sent the acc response and waited for name server completion 7978 **/ 7979 static int 7980 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7981 struct lpfc_nodelist *ndlp) 7982 { 7983 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7984 struct lpfc_hba *phba = vport->phba; 7985 struct lpfc_dmabuf *pcmd; 7986 uint32_t *lp, *datap; 7987 uint32_t payload_len, length, nportid, *cmd; 7988 int rscn_cnt; 7989 int rscn_id = 0, hba_id = 0; 7990 int i, tmo; 7991 7992 pcmd = cmdiocb->cmd_dmabuf; 7993 lp = (uint32_t *) pcmd->virt; 7994 7995 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7996 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7997 /* RSCN received */ 7998 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7999 "0214 RSCN received Data: x%x x%x x%x x%x\n", 8000 vport->fc_flag, payload_len, *lp, 8001 vport->fc_rscn_id_cnt); 8002 8003 /* Send an RSCN event to the management application */ 8004 lpfc_send_rscn_event(vport, cmdiocb); 8005 8006 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8007 fc_host_post_event(shost, fc_get_event_number(), 8008 FCH_EVT_RSCN, lp[i]); 8009 8010 /* Check if RSCN is coming from a direct-connected remote NPort */ 8011 if (vport->fc_flag & FC_PT2PT) { 8012 /* If so, just ACC it, no other action needed for now */ 8013 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8014 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 8015 *lp, vport->fc_flag, payload_len); 8016 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8017 8018 /* Check to see if we need to NVME rescan this target 8019 * remoteport. 8020 */ 8021 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8022 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8023 lpfc_nvme_rescan_port(vport, ndlp); 8024 return 0; 8025 } 8026 8027 /* If we are about to begin discovery, just ACC the RSCN. 8028 * Discovery processing will satisfy it. 8029 */ 8030 if (vport->port_state <= LPFC_NS_QRY) { 8031 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8032 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8033 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8034 8035 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8036 return 0; 8037 } 8038 8039 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8040 * just ACC and ignore it. 8041 */ 8042 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8043 !(vport->cfg_peer_port_login)) { 8044 i = payload_len; 8045 datap = lp; 8046 while (i > 0) { 8047 nportid = *datap++; 8048 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8049 i -= sizeof(uint32_t); 8050 rscn_id++; 8051 if (lpfc_find_vport_by_did(phba, nportid)) 8052 hba_id++; 8053 } 8054 if (rscn_id == hba_id) { 8055 /* ALL NPortIDs in RSCN are on HBA */ 8056 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8057 "0219 Ignore RSCN " 8058 "Data: x%x x%x x%x x%x\n", 8059 vport->fc_flag, payload_len, 8060 *lp, vport->fc_rscn_id_cnt); 8061 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8062 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8063 ndlp->nlp_DID, vport->port_state, 8064 ndlp->nlp_flag); 8065 8066 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8067 ndlp, NULL); 8068 /* Restart disctmo if its already running */ 8069 if (vport->fc_flag & FC_DISC_TMO) { 8070 tmo = ((phba->fc_ratov * 3) + 3); 8071 mod_timer(&vport->fc_disctmo, 8072 jiffies + 8073 msecs_to_jiffies(1000 * tmo)); 8074 } 8075 return 0; 8076 } 8077 } 8078 8079 spin_lock_irq(shost->host_lock); 8080 if (vport->fc_rscn_flush) { 8081 /* Another thread is walking fc_rscn_id_list on this vport */ 8082 vport->fc_flag |= FC_RSCN_DISCOVERY; 8083 spin_unlock_irq(shost->host_lock); 8084 /* Send back ACC */ 8085 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8086 return 0; 8087 } 8088 /* Indicate we are walking fc_rscn_id_list on this vport */ 8089 vport->fc_rscn_flush = 1; 8090 spin_unlock_irq(shost->host_lock); 8091 /* Get the array count after successfully have the token */ 8092 rscn_cnt = vport->fc_rscn_id_cnt; 8093 /* If we are already processing an RSCN, save the received 8094 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8095 */ 8096 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 8097 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8098 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8099 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8100 8101 spin_lock_irq(shost->host_lock); 8102 vport->fc_flag |= FC_RSCN_DEFERRED; 8103 8104 /* Restart disctmo if its already running */ 8105 if (vport->fc_flag & FC_DISC_TMO) { 8106 tmo = ((phba->fc_ratov * 3) + 3); 8107 mod_timer(&vport->fc_disctmo, 8108 jiffies + msecs_to_jiffies(1000 * tmo)); 8109 } 8110 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8111 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 8112 vport->fc_flag |= FC_RSCN_MODE; 8113 spin_unlock_irq(shost->host_lock); 8114 if (rscn_cnt) { 8115 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8116 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8117 } 8118 if ((rscn_cnt) && 8119 (payload_len + length <= LPFC_BPL_SIZE)) { 8120 *cmd &= ELS_CMD_MASK; 8121 *cmd |= cpu_to_be32(payload_len + length); 8122 memcpy(((uint8_t *)cmd) + length, lp, 8123 payload_len); 8124 } else { 8125 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8126 vport->fc_rscn_id_cnt++; 8127 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8128 * routine will not try to free it. 8129 */ 8130 cmdiocb->cmd_dmabuf = NULL; 8131 } 8132 /* Deferred RSCN */ 8133 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8134 "0235 Deferred RSCN " 8135 "Data: x%x x%x x%x\n", 8136 vport->fc_rscn_id_cnt, vport->fc_flag, 8137 vport->port_state); 8138 } else { 8139 vport->fc_flag |= FC_RSCN_DISCOVERY; 8140 spin_unlock_irq(shost->host_lock); 8141 /* ReDiscovery RSCN */ 8142 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8143 "0234 ReDiscovery RSCN " 8144 "Data: x%x x%x x%x\n", 8145 vport->fc_rscn_id_cnt, vport->fc_flag, 8146 vport->port_state); 8147 } 8148 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8149 vport->fc_rscn_flush = 0; 8150 /* Send back ACC */ 8151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8152 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8153 lpfc_rscn_recovery_check(vport); 8154 return 0; 8155 } 8156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8157 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8158 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8159 8160 spin_lock_irq(shost->host_lock); 8161 vport->fc_flag |= FC_RSCN_MODE; 8162 spin_unlock_irq(shost->host_lock); 8163 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8164 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8165 vport->fc_rscn_flush = 0; 8166 /* 8167 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8168 * not try to free it. 8169 */ 8170 cmdiocb->cmd_dmabuf = NULL; 8171 lpfc_set_disctmo(vport); 8172 /* Send back ACC */ 8173 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8174 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8175 lpfc_rscn_recovery_check(vport); 8176 return lpfc_els_handle_rscn(vport); 8177 } 8178 8179 /** 8180 * lpfc_els_handle_rscn - Handle rscn for a vport 8181 * @vport: pointer to a host virtual N_Port data structure. 8182 * 8183 * This routine handles the Registration State Configuration Notification 8184 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8185 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8186 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8187 * NameServer shall be issued. If CT command to the NameServer fails to be 8188 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8189 * RSCN activities with the @vport. 8190 * 8191 * Return code 8192 * 0 - Cleaned up rscn on the @vport 8193 * 1 - Wait for plogi to name server before proceed 8194 **/ 8195 int 8196 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8197 { 8198 struct lpfc_nodelist *ndlp; 8199 struct lpfc_hba *phba = vport->phba; 8200 8201 /* Ignore RSCN if the port is being torn down. */ 8202 if (vport->load_flag & FC_UNLOADING) { 8203 lpfc_els_flush_rscn(vport); 8204 return 0; 8205 } 8206 8207 /* Start timer for RSCN processing */ 8208 lpfc_set_disctmo(vport); 8209 8210 /* RSCN processed */ 8211 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8212 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8213 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8214 vport->port_state, vport->num_disc_nodes, 8215 vport->gidft_inp); 8216 8217 /* To process RSCN, first compare RSCN data with NameServer */ 8218 vport->fc_ns_retry = 0; 8219 vport->num_disc_nodes = 0; 8220 8221 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8222 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8223 /* Good ndlp, issue CT Request to NameServer. Need to 8224 * know how many gidfts were issued. If none, then just 8225 * flush the RSCN. Otherwise, the outstanding requests 8226 * need to complete. 8227 */ 8228 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8229 if (lpfc_issue_gidft(vport) > 0) 8230 return 1; 8231 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8232 if (lpfc_issue_gidpt(vport) > 0) 8233 return 1; 8234 } else { 8235 return 1; 8236 } 8237 } else { 8238 /* Nameserver login in question. Revalidate. */ 8239 if (ndlp) { 8240 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8241 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8242 } else { 8243 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8244 if (!ndlp) { 8245 lpfc_els_flush_rscn(vport); 8246 return 0; 8247 } 8248 ndlp->nlp_prev_state = ndlp->nlp_state; 8249 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8250 } 8251 ndlp->nlp_type |= NLP_FABRIC; 8252 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8253 /* Wait for NameServer login cmpl before we can 8254 * continue 8255 */ 8256 return 1; 8257 } 8258 8259 lpfc_els_flush_rscn(vport); 8260 return 0; 8261 } 8262 8263 /** 8264 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8265 * @vport: pointer to a host virtual N_Port data structure. 8266 * @cmdiocb: pointer to lpfc command iocb data structure. 8267 * @ndlp: pointer to a node-list data structure. 8268 * 8269 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8270 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8271 * point topology. As an unsolicited FLOGI should not be received in a loop 8272 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8273 * lpfc_check_sparm() routine is invoked to check the parameters in the 8274 * unsolicited FLOGI. If parameters validation failed, the routine 8275 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8276 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8277 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8278 * will initiate PLOGI. The higher lexicographical value party shall has 8279 * higher priority (as the winning port) and will initiate PLOGI and 8280 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8281 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8282 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8283 * 8284 * Return code 8285 * 0 - Successfully processed the unsolicited flogi 8286 * 1 - Failed to process the unsolicited flogi 8287 **/ 8288 static int 8289 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8290 struct lpfc_nodelist *ndlp) 8291 { 8292 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8293 struct lpfc_hba *phba = vport->phba; 8294 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8295 uint32_t *lp = (uint32_t *) pcmd->virt; 8296 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8297 struct serv_parm *sp; 8298 LPFC_MBOXQ_t *mbox; 8299 uint32_t cmd, did; 8300 int rc; 8301 uint32_t fc_flag = 0; 8302 uint32_t port_state = 0; 8303 8304 /* Clear external loopback plug detected flag */ 8305 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8306 8307 cmd = *lp++; 8308 sp = (struct serv_parm *) lp; 8309 8310 /* FLOGI received */ 8311 8312 lpfc_set_disctmo(vport); 8313 8314 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8315 /* We should never receive a FLOGI in loop mode, ignore it */ 8316 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8317 8318 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8319 Loop Mode */ 8320 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8321 "0113 An FLOGI ELS command x%x was " 8322 "received from DID x%x in Loop Mode\n", 8323 cmd, did); 8324 return 1; 8325 } 8326 8327 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8328 8329 /* 8330 * If our portname is greater than the remote portname, 8331 * then we initiate Nport login. 8332 */ 8333 8334 rc = memcmp(&vport->fc_portname, &sp->portName, 8335 sizeof(struct lpfc_name)); 8336 8337 if (!rc) { 8338 if (phba->sli_rev < LPFC_SLI_REV4) { 8339 mbox = mempool_alloc(phba->mbox_mem_pool, 8340 GFP_KERNEL); 8341 if (!mbox) 8342 return 1; 8343 lpfc_linkdown(phba); 8344 lpfc_init_link(phba, mbox, 8345 phba->cfg_topology, 8346 phba->cfg_link_speed); 8347 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8348 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8349 mbox->vport = vport; 8350 rc = lpfc_sli_issue_mbox(phba, mbox, 8351 MBX_NOWAIT); 8352 lpfc_set_loopback_flag(phba); 8353 if (rc == MBX_NOT_FINISHED) 8354 mempool_free(mbox, phba->mbox_mem_pool); 8355 return 1; 8356 } 8357 8358 /* External loopback plug insertion detected */ 8359 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8360 8361 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8362 "1119 External Loopback plug detected\n"); 8363 8364 /* abort the flogi coming back to ourselves 8365 * due to external loopback on the port. 8366 */ 8367 lpfc_els_abort_flogi(phba); 8368 return 0; 8369 8370 } else if (rc > 0) { /* greater than */ 8371 spin_lock_irq(shost->host_lock); 8372 vport->fc_flag |= FC_PT2PT_PLOGI; 8373 spin_unlock_irq(shost->host_lock); 8374 8375 /* If we have the high WWPN we can assign our own 8376 * myDID; otherwise, we have to WAIT for a PLOGI 8377 * from the remote NPort to find out what it 8378 * will be. 8379 */ 8380 vport->fc_myDID = PT2PT_LocalID; 8381 } else { 8382 vport->fc_myDID = PT2PT_RemoteID; 8383 } 8384 8385 /* 8386 * The vport state should go to LPFC_FLOGI only 8387 * AFTER we issue a FLOGI, not receive one. 8388 */ 8389 spin_lock_irq(shost->host_lock); 8390 fc_flag = vport->fc_flag; 8391 port_state = vport->port_state; 8392 vport->fc_flag |= FC_PT2PT; 8393 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8394 8395 /* Acking an unsol FLOGI. Count 1 for link bounce 8396 * work-around. 8397 */ 8398 vport->rcv_flogi_cnt++; 8399 spin_unlock_irq(shost->host_lock); 8400 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8401 "3311 Rcv Flogi PS x%x new PS x%x " 8402 "fc_flag x%x new fc_flag x%x\n", 8403 port_state, vport->port_state, 8404 fc_flag, vport->fc_flag); 8405 8406 /* 8407 * We temporarily set fc_myDID to make it look like we are 8408 * a Fabric. This is done just so we end up with the right 8409 * did / sid on the FLOGI ACC rsp. 8410 */ 8411 did = vport->fc_myDID; 8412 vport->fc_myDID = Fabric_DID; 8413 8414 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8415 8416 /* Defer ACC response until AFTER we issue a FLOGI */ 8417 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8418 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8419 &wqe->xmit_els_rsp.wqe_com); 8420 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8421 &wqe->xmit_els_rsp.wqe_com); 8422 8423 vport->fc_myDID = did; 8424 8425 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8426 "3344 Deferring FLOGI ACC: rx_id: x%x," 8427 " ox_id: x%x, hba_flag x%x\n", 8428 phba->defer_flogi_acc_rx_id, 8429 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8430 8431 phba->defer_flogi_acc_flag = true; 8432 8433 return 0; 8434 } 8435 8436 /* Send back ACC */ 8437 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8438 8439 /* Now lets put fc_myDID back to what its supposed to be */ 8440 vport->fc_myDID = did; 8441 8442 return 0; 8443 } 8444 8445 /** 8446 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8447 * @vport: pointer to a host virtual N_Port data structure. 8448 * @cmdiocb: pointer to lpfc command iocb data structure. 8449 * @ndlp: pointer to a node-list data structure. 8450 * 8451 * This routine processes Request Node Identification Data (RNID) IOCB 8452 * received as an ELS unsolicited event. Only when the RNID specified format 8453 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8454 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8455 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8456 * rejected by invoking the lpfc_els_rsp_reject() routine. 8457 * 8458 * Return code 8459 * 0 - Successfully processed rnid iocb (currently always return 0) 8460 **/ 8461 static int 8462 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8463 struct lpfc_nodelist *ndlp) 8464 { 8465 struct lpfc_dmabuf *pcmd; 8466 uint32_t *lp; 8467 RNID *rn; 8468 struct ls_rjt stat; 8469 8470 pcmd = cmdiocb->cmd_dmabuf; 8471 lp = (uint32_t *) pcmd->virt; 8472 8473 lp++; 8474 rn = (RNID *) lp; 8475 8476 /* RNID received */ 8477 8478 switch (rn->Format) { 8479 case 0: 8480 case RNID_TOPOLOGY_DISC: 8481 /* Send back ACC */ 8482 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8483 break; 8484 default: 8485 /* Reject this request because format not supported */ 8486 stat.un.b.lsRjtRsvd0 = 0; 8487 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8488 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8489 stat.un.b.vendorUnique = 0; 8490 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8491 NULL); 8492 } 8493 return 0; 8494 } 8495 8496 /** 8497 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8498 * @vport: pointer to a host virtual N_Port data structure. 8499 * @cmdiocb: pointer to lpfc command iocb data structure. 8500 * @ndlp: pointer to a node-list data structure. 8501 * 8502 * Return code 8503 * 0 - Successfully processed echo iocb (currently always return 0) 8504 **/ 8505 static int 8506 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8507 struct lpfc_nodelist *ndlp) 8508 { 8509 uint8_t *pcmd; 8510 8511 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8512 8513 /* skip over first word of echo command to find echo data */ 8514 pcmd += sizeof(uint32_t); 8515 8516 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8517 return 0; 8518 } 8519 8520 /** 8521 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8522 * @vport: pointer to a host virtual N_Port data structure. 8523 * @cmdiocb: pointer to lpfc command iocb data structure. 8524 * @ndlp: pointer to a node-list data structure. 8525 * 8526 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8527 * received as an ELS unsolicited event. Currently, this function just invokes 8528 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8529 * 8530 * Return code 8531 * 0 - Successfully processed lirr iocb (currently always return 0) 8532 **/ 8533 static int 8534 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8535 struct lpfc_nodelist *ndlp) 8536 { 8537 struct ls_rjt stat; 8538 8539 /* For now, unconditionally reject this command */ 8540 stat.un.b.lsRjtRsvd0 = 0; 8541 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8542 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8543 stat.un.b.vendorUnique = 0; 8544 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8545 return 0; 8546 } 8547 8548 /** 8549 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8550 * @vport: pointer to a host virtual N_Port data structure. 8551 * @cmdiocb: pointer to lpfc command iocb data structure. 8552 * @ndlp: pointer to a node-list data structure. 8553 * 8554 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8555 * received as an ELS unsolicited event. A request to RRQ shall only 8556 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8557 * Nx_Port N_Port_ID of the target Exchange is the same as the 8558 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8559 * not accepted, an LS_RJT with reason code "Unable to perform 8560 * command request" and reason code explanation "Invalid Originator 8561 * S_ID" shall be returned. For now, we just unconditionally accept 8562 * RRQ from the target. 8563 **/ 8564 static void 8565 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8566 struct lpfc_nodelist *ndlp) 8567 { 8568 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8569 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8570 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8571 } 8572 8573 /** 8574 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8575 * @phba: pointer to lpfc hba data structure. 8576 * @pmb: pointer to the driver internal queue element for mailbox command. 8577 * 8578 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8579 * mailbox command. This callback function is to actually send the Accept 8580 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8581 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8582 * mailbox command, constructs the RLS response with the link statistics 8583 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8584 * response to the RLS. 8585 * 8586 * Note that the ndlp reference count will be incremented by 1 for holding the 8587 * ndlp and the reference to ndlp will be stored into the ndlp field of 8588 * the IOCB for the completion callback function to the RLS Accept Response 8589 * ELS IOCB command. 8590 * 8591 **/ 8592 static void 8593 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8594 { 8595 int rc = 0; 8596 MAILBOX_t *mb; 8597 IOCB_t *icmd; 8598 union lpfc_wqe128 *wqe; 8599 struct RLS_RSP *rls_rsp; 8600 uint8_t *pcmd; 8601 struct lpfc_iocbq *elsiocb; 8602 struct lpfc_nodelist *ndlp; 8603 uint16_t oxid; 8604 uint16_t rxid; 8605 uint32_t cmdsize; 8606 u32 ulp_context; 8607 8608 mb = &pmb->u.mb; 8609 8610 ndlp = pmb->ctx_ndlp; 8611 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8612 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8613 pmb->ctx_buf = NULL; 8614 pmb->ctx_ndlp = NULL; 8615 8616 if (mb->mbxStatus) { 8617 mempool_free(pmb, phba->mbox_mem_pool); 8618 return; 8619 } 8620 8621 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8622 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8623 lpfc_max_els_tries, ndlp, 8624 ndlp->nlp_DID, ELS_CMD_ACC); 8625 8626 /* Decrement the ndlp reference count from previous mbox command */ 8627 lpfc_nlp_put(ndlp); 8628 8629 if (!elsiocb) { 8630 mempool_free(pmb, phba->mbox_mem_pool); 8631 return; 8632 } 8633 8634 ulp_context = get_job_ulpcontext(phba, elsiocb); 8635 if (phba->sli_rev == LPFC_SLI_REV4) { 8636 wqe = &elsiocb->wqe; 8637 /* Xri / rx_id */ 8638 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8639 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8640 } else { 8641 icmd = &elsiocb->iocb; 8642 icmd->ulpContext = rxid; 8643 icmd->unsli3.rcvsli3.ox_id = oxid; 8644 } 8645 8646 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8647 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8648 pcmd += sizeof(uint32_t); /* Skip past command */ 8649 rls_rsp = (struct RLS_RSP *)pcmd; 8650 8651 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8652 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8653 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8654 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8655 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8656 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8657 mempool_free(pmb, phba->mbox_mem_pool); 8658 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8659 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8660 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8661 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8662 elsiocb->iotag, ulp_context, 8663 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8664 ndlp->nlp_rpi); 8665 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8666 phba->fc_stat.elsXmitACC++; 8667 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8668 if (!elsiocb->ndlp) { 8669 lpfc_els_free_iocb(phba, elsiocb); 8670 return; 8671 } 8672 8673 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8674 if (rc == IOCB_ERROR) { 8675 lpfc_els_free_iocb(phba, elsiocb); 8676 lpfc_nlp_put(ndlp); 8677 } 8678 return; 8679 } 8680 8681 /** 8682 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8683 * @vport: pointer to a host virtual N_Port data structure. 8684 * @cmdiocb: pointer to lpfc command iocb data structure. 8685 * @ndlp: pointer to a node-list data structure. 8686 * 8687 * This routine processes Read Link Status (RLS) IOCB received as an 8688 * ELS unsolicited event. It first checks the remote port state. If the 8689 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8690 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8691 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8692 * for reading the HBA link statistics. It is for the callback function, 8693 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8694 * to actually sending out RPL Accept (ACC) response. 8695 * 8696 * Return codes 8697 * 0 - Successfully processed rls iocb (currently always return 0) 8698 **/ 8699 static int 8700 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8701 struct lpfc_nodelist *ndlp) 8702 { 8703 struct lpfc_hba *phba = vport->phba; 8704 LPFC_MBOXQ_t *mbox; 8705 struct ls_rjt stat; 8706 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8707 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8708 8709 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8710 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8711 /* reject the unsolicited RLS request and done with it */ 8712 goto reject_out; 8713 8714 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8715 if (mbox) { 8716 lpfc_read_lnk_stat(phba, mbox); 8717 mbox->ctx_buf = (void *)((unsigned long) 8718 (ox_id << 16 | ctx)); 8719 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8720 if (!mbox->ctx_ndlp) 8721 goto node_err; 8722 mbox->vport = vport; 8723 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8724 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8725 != MBX_NOT_FINISHED) 8726 /* Mbox completion will send ELS Response */ 8727 return 0; 8728 /* Decrement reference count used for the failed mbox 8729 * command. 8730 */ 8731 lpfc_nlp_put(ndlp); 8732 node_err: 8733 mempool_free(mbox, phba->mbox_mem_pool); 8734 } 8735 reject_out: 8736 /* issue rejection response */ 8737 stat.un.b.lsRjtRsvd0 = 0; 8738 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8739 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8740 stat.un.b.vendorUnique = 0; 8741 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8742 return 0; 8743 } 8744 8745 /** 8746 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8747 * @vport: pointer to a host virtual N_Port data structure. 8748 * @cmdiocb: pointer to lpfc command iocb data structure. 8749 * @ndlp: pointer to a node-list data structure. 8750 * 8751 * This routine processes Read Timout Value (RTV) IOCB received as an 8752 * ELS unsolicited event. It first checks the remote port state. If the 8753 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8754 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8755 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8756 * Value (RTV) unsolicited IOCB event. 8757 * 8758 * Note that the ndlp reference count will be incremented by 1 for holding the 8759 * ndlp and the reference to ndlp will be stored into the ndlp field of 8760 * the IOCB for the completion callback function to the RTV Accept Response 8761 * ELS IOCB command. 8762 * 8763 * Return codes 8764 * 0 - Successfully processed rtv iocb (currently always return 0) 8765 **/ 8766 static int 8767 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8768 struct lpfc_nodelist *ndlp) 8769 { 8770 int rc = 0; 8771 IOCB_t *icmd; 8772 union lpfc_wqe128 *wqe; 8773 struct lpfc_hba *phba = vport->phba; 8774 struct ls_rjt stat; 8775 struct RTV_RSP *rtv_rsp; 8776 uint8_t *pcmd; 8777 struct lpfc_iocbq *elsiocb; 8778 uint32_t cmdsize; 8779 u32 ulp_context; 8780 8781 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8782 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8783 /* reject the unsolicited RTV request and done with it */ 8784 goto reject_out; 8785 8786 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8787 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8788 lpfc_max_els_tries, ndlp, 8789 ndlp->nlp_DID, ELS_CMD_ACC); 8790 8791 if (!elsiocb) 8792 return 1; 8793 8794 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8795 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8796 pcmd += sizeof(uint32_t); /* Skip past command */ 8797 8798 ulp_context = get_job_ulpcontext(phba, elsiocb); 8799 /* use the command's xri in the response */ 8800 if (phba->sli_rev == LPFC_SLI_REV4) { 8801 wqe = &elsiocb->wqe; 8802 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8803 get_job_ulpcontext(phba, cmdiocb)); 8804 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8805 get_job_rcvoxid(phba, cmdiocb)); 8806 } else { 8807 icmd = &elsiocb->iocb; 8808 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8809 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8810 } 8811 8812 rtv_rsp = (struct RTV_RSP *)pcmd; 8813 8814 /* populate RTV payload */ 8815 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8816 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8817 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8818 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8819 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8820 8821 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8822 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8823 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8824 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8825 "Data: x%x x%x x%x\n", 8826 elsiocb->iotag, ulp_context, 8827 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8828 ndlp->nlp_rpi, 8829 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8830 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8831 phba->fc_stat.elsXmitACC++; 8832 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8833 if (!elsiocb->ndlp) { 8834 lpfc_els_free_iocb(phba, elsiocb); 8835 return 0; 8836 } 8837 8838 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8839 if (rc == IOCB_ERROR) { 8840 lpfc_els_free_iocb(phba, elsiocb); 8841 lpfc_nlp_put(ndlp); 8842 } 8843 return 0; 8844 8845 reject_out: 8846 /* issue rejection response */ 8847 stat.un.b.lsRjtRsvd0 = 0; 8848 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8849 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8850 stat.un.b.vendorUnique = 0; 8851 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8852 return 0; 8853 } 8854 8855 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8856 * @vport: pointer to a host virtual N_Port data structure. 8857 * @ndlp: pointer to a node-list data structure. 8858 * @did: DID of the target. 8859 * @rrq: Pointer to the rrq struct. 8860 * 8861 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8862 * Successful the the completion handler will clear the RRQ. 8863 * 8864 * Return codes 8865 * 0 - Successfully sent rrq els iocb. 8866 * 1 - Failed to send rrq els iocb. 8867 **/ 8868 static int 8869 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8870 uint32_t did, struct lpfc_node_rrq *rrq) 8871 { 8872 struct lpfc_hba *phba = vport->phba; 8873 struct RRQ *els_rrq; 8874 struct lpfc_iocbq *elsiocb; 8875 uint8_t *pcmd; 8876 uint16_t cmdsize; 8877 int ret; 8878 8879 if (!ndlp) 8880 return 1; 8881 8882 /* If ndlp is not NULL, we will bump the reference count on it */ 8883 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8884 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8885 ELS_CMD_RRQ); 8886 if (!elsiocb) 8887 return 1; 8888 8889 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8890 8891 /* For RRQ request, remainder of payload is Exchange IDs */ 8892 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8893 pcmd += sizeof(uint32_t); 8894 els_rrq = (struct RRQ *) pcmd; 8895 8896 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8897 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8898 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8899 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8900 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8901 8902 8903 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8904 "Issue RRQ: did:x%x", 8905 did, rrq->xritag, rrq->rxid); 8906 elsiocb->context_un.rrq = rrq; 8907 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8908 8909 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8910 if (!elsiocb->ndlp) 8911 goto io_err; 8912 8913 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8914 if (ret == IOCB_ERROR) { 8915 lpfc_nlp_put(ndlp); 8916 goto io_err; 8917 } 8918 return 0; 8919 8920 io_err: 8921 lpfc_els_free_iocb(phba, elsiocb); 8922 return 1; 8923 } 8924 8925 /** 8926 * lpfc_send_rrq - Sends ELS RRQ if needed. 8927 * @phba: pointer to lpfc hba data structure. 8928 * @rrq: pointer to the active rrq. 8929 * 8930 * This routine will call the lpfc_issue_els_rrq if the rrq is 8931 * still active for the xri. If this function returns a failure then 8932 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8933 * 8934 * Returns 0 Success. 8935 * 1 Failure. 8936 **/ 8937 int 8938 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8939 { 8940 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8941 rrq->nlp_DID); 8942 if (!ndlp) 8943 return 1; 8944 8945 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8946 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8947 rrq->nlp_DID, rrq); 8948 else 8949 return 1; 8950 } 8951 8952 /** 8953 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8954 * @vport: pointer to a host virtual N_Port data structure. 8955 * @cmdsize: size of the ELS command. 8956 * @oldiocb: pointer to the original lpfc command iocb data structure. 8957 * @ndlp: pointer to a node-list data structure. 8958 * 8959 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8960 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8961 * 8962 * Note that the ndlp reference count will be incremented by 1 for holding the 8963 * ndlp and the reference to ndlp will be stored into the ndlp field of 8964 * the IOCB for the completion callback function to the RPL Accept Response 8965 * ELS command. 8966 * 8967 * Return code 8968 * 0 - Successfully issued ACC RPL ELS command 8969 * 1 - Failed to issue ACC RPL ELS command 8970 **/ 8971 static int 8972 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8973 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8974 { 8975 int rc = 0; 8976 struct lpfc_hba *phba = vport->phba; 8977 IOCB_t *icmd; 8978 union lpfc_wqe128 *wqe; 8979 RPL_RSP rpl_rsp; 8980 struct lpfc_iocbq *elsiocb; 8981 uint8_t *pcmd; 8982 u32 ulp_context; 8983 8984 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8985 ndlp->nlp_DID, ELS_CMD_ACC); 8986 8987 if (!elsiocb) 8988 return 1; 8989 8990 ulp_context = get_job_ulpcontext(phba, elsiocb); 8991 if (phba->sli_rev == LPFC_SLI_REV4) { 8992 wqe = &elsiocb->wqe; 8993 /* Xri / rx_id */ 8994 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8995 get_job_ulpcontext(phba, oldiocb)); 8996 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8997 get_job_rcvoxid(phba, oldiocb)); 8998 } else { 8999 icmd = &elsiocb->iocb; 9000 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9001 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9002 } 9003 9004 pcmd = elsiocb->cmd_dmabuf->virt; 9005 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9006 pcmd += sizeof(uint16_t); 9007 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9008 pcmd += sizeof(uint16_t); 9009 9010 /* Setup the RPL ACC payload */ 9011 rpl_rsp.listLen = be32_to_cpu(1); 9012 rpl_rsp.index = 0; 9013 rpl_rsp.port_num_blk.portNum = 0; 9014 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9015 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9016 sizeof(struct lpfc_name)); 9017 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9018 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9019 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9020 "0120 Xmit ELS RPL ACC response tag x%x " 9021 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9022 "rpi x%x\n", 9023 elsiocb->iotag, ulp_context, 9024 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9025 ndlp->nlp_rpi); 9026 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9027 phba->fc_stat.elsXmitACC++; 9028 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9029 if (!elsiocb->ndlp) { 9030 lpfc_els_free_iocb(phba, elsiocb); 9031 return 1; 9032 } 9033 9034 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9035 if (rc == IOCB_ERROR) { 9036 lpfc_els_free_iocb(phba, elsiocb); 9037 lpfc_nlp_put(ndlp); 9038 return 1; 9039 } 9040 9041 return 0; 9042 } 9043 9044 /** 9045 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9046 * @vport: pointer to a host virtual N_Port data structure. 9047 * @cmdiocb: pointer to lpfc command iocb data structure. 9048 * @ndlp: pointer to a node-list data structure. 9049 * 9050 * This routine processes Read Port List (RPL) IOCB received as an ELS 9051 * unsolicited event. It first checks the remote port state. If the remote 9052 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9053 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9054 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9055 * to accept the RPL. 9056 * 9057 * Return code 9058 * 0 - Successfully processed rpl iocb (currently always return 0) 9059 **/ 9060 static int 9061 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9062 struct lpfc_nodelist *ndlp) 9063 { 9064 struct lpfc_dmabuf *pcmd; 9065 uint32_t *lp; 9066 uint32_t maxsize; 9067 uint16_t cmdsize; 9068 RPL *rpl; 9069 struct ls_rjt stat; 9070 9071 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9072 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9073 /* issue rejection response */ 9074 stat.un.b.lsRjtRsvd0 = 0; 9075 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9076 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9077 stat.un.b.vendorUnique = 0; 9078 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9079 NULL); 9080 /* rejected the unsolicited RPL request and done with it */ 9081 return 0; 9082 } 9083 9084 pcmd = cmdiocb->cmd_dmabuf; 9085 lp = (uint32_t *) pcmd->virt; 9086 rpl = (RPL *) (lp + 1); 9087 maxsize = be32_to_cpu(rpl->maxsize); 9088 9089 /* We support only one port */ 9090 if ((rpl->index == 0) && 9091 ((maxsize == 0) || 9092 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9093 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9094 } else { 9095 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9096 } 9097 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9098 9099 return 0; 9100 } 9101 9102 /** 9103 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9104 * @vport: pointer to a virtual N_Port data structure. 9105 * @cmdiocb: pointer to lpfc command iocb data structure. 9106 * @ndlp: pointer to a node-list data structure. 9107 * 9108 * This routine processes Fibre Channel Address Resolution Protocol 9109 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9110 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9111 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9112 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9113 * remote PortName is compared against the FC PortName stored in the @vport 9114 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9115 * compared against the FC NodeName stored in the @vport data structure. 9116 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9117 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9118 * invoked to send out FARP Response to the remote node. Before sending the 9119 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9120 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9121 * routine is invoked to log into the remote port first. 9122 * 9123 * Return code 9124 * 0 - Either the FARP Match Mode not supported or successfully processed 9125 **/ 9126 static int 9127 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9128 struct lpfc_nodelist *ndlp) 9129 { 9130 struct lpfc_dmabuf *pcmd; 9131 uint32_t *lp; 9132 FARP *fp; 9133 uint32_t cnt, did; 9134 9135 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9136 pcmd = cmdiocb->cmd_dmabuf; 9137 lp = (uint32_t *) pcmd->virt; 9138 9139 lp++; 9140 fp = (FARP *) lp; 9141 /* FARP-REQ received from DID <did> */ 9142 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9143 "0601 FARP-REQ received from DID x%x\n", did); 9144 /* We will only support match on WWPN or WWNN */ 9145 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9146 return 0; 9147 } 9148 9149 cnt = 0; 9150 /* If this FARP command is searching for my portname */ 9151 if (fp->Mflags & FARP_MATCH_PORT) { 9152 if (memcmp(&fp->RportName, &vport->fc_portname, 9153 sizeof(struct lpfc_name)) == 0) 9154 cnt = 1; 9155 } 9156 9157 /* If this FARP command is searching for my nodename */ 9158 if (fp->Mflags & FARP_MATCH_NODE) { 9159 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9160 sizeof(struct lpfc_name)) == 0) 9161 cnt = 1; 9162 } 9163 9164 if (cnt) { 9165 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9166 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9167 /* Log back into the node before sending the FARP. */ 9168 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9169 ndlp->nlp_prev_state = ndlp->nlp_state; 9170 lpfc_nlp_set_state(vport, ndlp, 9171 NLP_STE_PLOGI_ISSUE); 9172 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9173 } 9174 9175 /* Send a FARP response to that node */ 9176 if (fp->Rflags & FARP_REQUEST_FARPR) 9177 lpfc_issue_els_farpr(vport, did, 0); 9178 } 9179 } 9180 return 0; 9181 } 9182 9183 /** 9184 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9185 * @vport: pointer to a host virtual N_Port data structure. 9186 * @cmdiocb: pointer to lpfc command iocb data structure. 9187 * @ndlp: pointer to a node-list data structure. 9188 * 9189 * This routine processes Fibre Channel Address Resolution Protocol 9190 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9191 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9192 * the FARP response request. 9193 * 9194 * Return code 9195 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9196 **/ 9197 static int 9198 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9199 struct lpfc_nodelist *ndlp) 9200 { 9201 uint32_t did; 9202 9203 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9204 9205 /* FARP-RSP received from DID <did> */ 9206 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9207 "0600 FARP-RSP received from DID x%x\n", did); 9208 /* ACCEPT the Farp resp request */ 9209 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9210 9211 return 0; 9212 } 9213 9214 /** 9215 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9216 * @vport: pointer to a host virtual N_Port data structure. 9217 * @cmdiocb: pointer to lpfc command iocb data structure. 9218 * @fan_ndlp: pointer to a node-list data structure. 9219 * 9220 * This routine processes a Fabric Address Notification (FAN) IOCB 9221 * command received as an ELS unsolicited event. The FAN ELS command will 9222 * only be processed on a physical port (i.e., the @vport represents the 9223 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9224 * compared against those in the phba data structure. If any of those is 9225 * different, the lpfc_initial_flogi() routine is invoked to initialize 9226 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9227 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9228 * is invoked to register login to the fabric. 9229 * 9230 * Return code 9231 * 0 - Successfully processed fan iocb (currently always return 0). 9232 **/ 9233 static int 9234 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9235 struct lpfc_nodelist *fan_ndlp) 9236 { 9237 struct lpfc_hba *phba = vport->phba; 9238 uint32_t *lp; 9239 FAN *fp; 9240 9241 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9242 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9243 fp = (FAN *) ++lp; 9244 /* FAN received; Fan does not have a reply sequence */ 9245 if ((vport == phba->pport) && 9246 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9247 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9248 sizeof(struct lpfc_name))) || 9249 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9250 sizeof(struct lpfc_name)))) { 9251 /* This port has switched fabrics. FLOGI is required */ 9252 lpfc_issue_init_vfi(vport); 9253 } else { 9254 /* FAN verified - skip FLOGI */ 9255 vport->fc_myDID = vport->fc_prevDID; 9256 if (phba->sli_rev < LPFC_SLI_REV4) 9257 lpfc_issue_fabric_reglogin(vport); 9258 else { 9259 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9260 "3138 Need register VFI: (x%x/%x)\n", 9261 vport->fc_prevDID, vport->fc_myDID); 9262 lpfc_issue_reg_vfi(vport); 9263 } 9264 } 9265 } 9266 return 0; 9267 } 9268 9269 /** 9270 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9271 * @vport: pointer to a host virtual N_Port data structure. 9272 * @cmdiocb: pointer to lpfc command iocb data structure. 9273 * @ndlp: pointer to a node-list data structure. 9274 * 9275 * Return code 9276 * 0 - Successfully processed echo iocb (currently always return 0) 9277 **/ 9278 static int 9279 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9280 struct lpfc_nodelist *ndlp) 9281 { 9282 struct lpfc_hba *phba = vport->phba; 9283 struct fc_els_edc *edc_req; 9284 struct fc_tlv_desc *tlv; 9285 uint8_t *payload; 9286 uint32_t *ptr, dtag; 9287 const char *dtag_nm; 9288 int desc_cnt = 0, bytes_remain; 9289 struct fc_diag_lnkflt_desc *plnkflt; 9290 9291 payload = cmdiocb->cmd_dmabuf->virt; 9292 9293 edc_req = (struct fc_els_edc *)payload; 9294 bytes_remain = be32_to_cpu(edc_req->desc_len); 9295 9296 ptr = (uint32_t *)payload; 9297 lpfc_printf_vlog(vport, KERN_INFO, 9298 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9299 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9300 bytes_remain, be32_to_cpu(*ptr), 9301 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9302 9303 /* No signal support unless there is a congestion descriptor */ 9304 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9305 phba->cgn_sig_freq = 0; 9306 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9307 9308 if (bytes_remain <= 0) 9309 goto out; 9310 9311 tlv = edc_req->desc; 9312 9313 /* 9314 * cycle through EDC diagnostic descriptors to find the 9315 * congestion signaling capability descriptor 9316 */ 9317 while (bytes_remain) { 9318 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9319 lpfc_printf_log(phba, KERN_WARNING, 9320 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9321 "6464 Truncated TLV hdr on " 9322 "Diagnostic descriptor[%d]\n", 9323 desc_cnt); 9324 goto out; 9325 } 9326 9327 dtag = be32_to_cpu(tlv->desc_tag); 9328 switch (dtag) { 9329 case ELS_DTAG_LNK_FAULT_CAP: 9330 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9331 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9332 sizeof(struct fc_diag_lnkflt_desc)) { 9333 lpfc_printf_log(phba, KERN_WARNING, 9334 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9335 "6465 Truncated Link Fault Diagnostic " 9336 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9337 desc_cnt, bytes_remain, 9338 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9339 sizeof(struct fc_diag_lnkflt_desc)); 9340 goto out; 9341 } 9342 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9343 lpfc_printf_log(phba, KERN_INFO, 9344 LOG_ELS | LOG_LDS_EVENT, 9345 "4626 Link Fault Desc Data: x%08x len x%x " 9346 "da x%x dd x%x interval x%x\n", 9347 be32_to_cpu(plnkflt->desc_tag), 9348 be32_to_cpu(plnkflt->desc_len), 9349 be32_to_cpu( 9350 plnkflt->degrade_activate_threshold), 9351 be32_to_cpu( 9352 plnkflt->degrade_deactivate_threshold), 9353 be32_to_cpu(plnkflt->fec_degrade_interval)); 9354 break; 9355 case ELS_DTAG_CG_SIGNAL_CAP: 9356 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9357 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9358 sizeof(struct fc_diag_cg_sig_desc)) { 9359 lpfc_printf_log( 9360 phba, KERN_WARNING, LOG_CGN_MGMT, 9361 "6466 Truncated cgn signal Diagnostic " 9362 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9363 desc_cnt, bytes_remain, 9364 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9365 sizeof(struct fc_diag_cg_sig_desc)); 9366 goto out; 9367 } 9368 9369 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9370 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9371 9372 /* We start negotiation with lpfc_fabric_cgn_frequency. 9373 * When we process the EDC, we will settle on the 9374 * higher frequency. 9375 */ 9376 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9377 9378 lpfc_least_capable_settings( 9379 phba, (struct fc_diag_cg_sig_desc *)tlv); 9380 break; 9381 default: 9382 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9383 lpfc_printf_log(phba, KERN_WARNING, 9384 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9385 "6467 unknown Diagnostic " 9386 "Descriptor[%d]: tag x%x (%s)\n", 9387 desc_cnt, dtag, dtag_nm); 9388 } 9389 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9390 tlv = fc_tlv_next_desc(tlv); 9391 desc_cnt++; 9392 } 9393 out: 9394 /* Need to send back an ACC */ 9395 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9396 9397 lpfc_config_cgn_signal(phba); 9398 return 0; 9399 } 9400 9401 /** 9402 * lpfc_els_timeout - Handler funciton to the els timer 9403 * @t: timer context used to obtain the vport. 9404 * 9405 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9406 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9407 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9408 * up the worker thread. It is for the worker thread to invoke the routine 9409 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9410 **/ 9411 void 9412 lpfc_els_timeout(struct timer_list *t) 9413 { 9414 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9415 struct lpfc_hba *phba = vport->phba; 9416 uint32_t tmo_posted; 9417 unsigned long iflag; 9418 9419 spin_lock_irqsave(&vport->work_port_lock, iflag); 9420 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9421 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9422 vport->work_port_events |= WORKER_ELS_TMO; 9423 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9424 9425 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9426 lpfc_worker_wake_up(phba); 9427 return; 9428 } 9429 9430 9431 /** 9432 * lpfc_els_timeout_handler - Process an els timeout event 9433 * @vport: pointer to a virtual N_Port data structure. 9434 * 9435 * This routine is the actual handler function that processes an ELS timeout 9436 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9437 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9438 * invoking the lpfc_sli_issue_abort_iotag() routine. 9439 **/ 9440 void 9441 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9442 { 9443 struct lpfc_hba *phba = vport->phba; 9444 struct lpfc_sli_ring *pring; 9445 struct lpfc_iocbq *tmp_iocb, *piocb; 9446 IOCB_t *cmd = NULL; 9447 struct lpfc_dmabuf *pcmd; 9448 uint32_t els_command = 0; 9449 uint32_t timeout; 9450 uint32_t remote_ID = 0xffffffff; 9451 LIST_HEAD(abort_list); 9452 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9453 9454 9455 timeout = (uint32_t)(phba->fc_ratov << 1); 9456 9457 pring = lpfc_phba_elsring(phba); 9458 if (unlikely(!pring)) 9459 return; 9460 9461 if (phba->pport->load_flag & FC_UNLOADING) 9462 return; 9463 9464 spin_lock_irq(&phba->hbalock); 9465 if (phba->sli_rev == LPFC_SLI_REV4) 9466 spin_lock(&pring->ring_lock); 9467 9468 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9469 ulp_command = get_job_cmnd(phba, piocb); 9470 ulp_context = get_job_ulpcontext(phba, piocb); 9471 did = get_job_els_rsp64_did(phba, piocb); 9472 9473 if (phba->sli_rev == LPFC_SLI_REV4) { 9474 iotag = get_wqe_reqtag(piocb); 9475 } else { 9476 cmd = &piocb->iocb; 9477 iotag = cmd->ulpIoTag; 9478 } 9479 9480 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9481 ulp_command == CMD_ABORT_XRI_CX || 9482 ulp_command == CMD_ABORT_XRI_CN || 9483 ulp_command == CMD_CLOSE_XRI_CN) 9484 continue; 9485 9486 if (piocb->vport != vport) 9487 continue; 9488 9489 pcmd = piocb->cmd_dmabuf; 9490 if (pcmd) 9491 els_command = *(uint32_t *) (pcmd->virt); 9492 9493 if (els_command == ELS_CMD_FARP || 9494 els_command == ELS_CMD_FARPR || 9495 els_command == ELS_CMD_FDISC) 9496 continue; 9497 9498 if (piocb->drvrTimeout > 0) { 9499 if (piocb->drvrTimeout >= timeout) 9500 piocb->drvrTimeout -= timeout; 9501 else 9502 piocb->drvrTimeout = 0; 9503 continue; 9504 } 9505 9506 remote_ID = 0xffffffff; 9507 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9508 remote_ID = did; 9509 } else { 9510 struct lpfc_nodelist *ndlp; 9511 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9512 if (ndlp) 9513 remote_ID = ndlp->nlp_DID; 9514 } 9515 list_add_tail(&piocb->dlist, &abort_list); 9516 } 9517 if (phba->sli_rev == LPFC_SLI_REV4) 9518 spin_unlock(&pring->ring_lock); 9519 spin_unlock_irq(&phba->hbalock); 9520 9521 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9522 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9523 "0127 ELS timeout Data: x%x x%x x%x " 9524 "x%x\n", els_command, 9525 remote_ID, ulp_command, iotag); 9526 9527 spin_lock_irq(&phba->hbalock); 9528 list_del_init(&piocb->dlist); 9529 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9530 spin_unlock_irq(&phba->hbalock); 9531 } 9532 9533 /* Make sure HBA is alive */ 9534 lpfc_issue_hb_tmo(phba); 9535 9536 if (!list_empty(&pring->txcmplq)) 9537 if (!(phba->pport->load_flag & FC_UNLOADING)) 9538 mod_timer(&vport->els_tmofunc, 9539 jiffies + msecs_to_jiffies(1000 * timeout)); 9540 } 9541 9542 /** 9543 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9544 * @vport: pointer to a host virtual N_Port data structure. 9545 * 9546 * This routine is used to clean up all the outstanding ELS commands on a 9547 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9548 * routine. After that, it walks the ELS transmit queue to remove all the 9549 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9550 * the IOCBs with a non-NULL completion callback function, the callback 9551 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9552 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9553 * callback function, the IOCB will simply be released. Finally, it walks 9554 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9555 * completion queue IOCB that is associated with the @vport and is not 9556 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9557 * part of the discovery state machine) out to HBA by invoking the 9558 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9559 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9560 * the IOCBs are aborted when this function returns. 9561 **/ 9562 void 9563 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9564 { 9565 LIST_HEAD(abort_list); 9566 struct lpfc_hba *phba = vport->phba; 9567 struct lpfc_sli_ring *pring; 9568 struct lpfc_iocbq *tmp_iocb, *piocb; 9569 u32 ulp_command; 9570 unsigned long iflags = 0; 9571 9572 lpfc_fabric_abort_vport(vport); 9573 9574 /* 9575 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9576 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9577 * ultimately grabs the ring_lock, the driver must splice the list into 9578 * a working list and release the locks before calling the abort. 9579 */ 9580 spin_lock_irqsave(&phba->hbalock, iflags); 9581 pring = lpfc_phba_elsring(phba); 9582 9583 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9584 if (unlikely(!pring)) { 9585 spin_unlock_irqrestore(&phba->hbalock, iflags); 9586 return; 9587 } 9588 9589 if (phba->sli_rev == LPFC_SLI_REV4) 9590 spin_lock(&pring->ring_lock); 9591 9592 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9593 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9594 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9595 continue; 9596 9597 if (piocb->vport != vport) 9598 continue; 9599 9600 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED) 9601 continue; 9602 9603 /* On the ELS ring we can have ELS_REQUESTs or 9604 * GEN_REQUESTs waiting for a response. 9605 */ 9606 ulp_command = get_job_cmnd(phba, piocb); 9607 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9608 list_add_tail(&piocb->dlist, &abort_list); 9609 9610 /* If the link is down when flushing ELS commands 9611 * the firmware will not complete them till after 9612 * the link comes back up. This may confuse 9613 * discovery for the new link up, so we need to 9614 * change the compl routine to just clean up the iocb 9615 * and avoid any retry logic. 9616 */ 9617 if (phba->link_state == LPFC_LINK_DOWN) 9618 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9619 } 9620 if (ulp_command == CMD_GEN_REQUEST64_CR) 9621 list_add_tail(&piocb->dlist, &abort_list); 9622 } 9623 9624 if (phba->sli_rev == LPFC_SLI_REV4) 9625 spin_unlock(&pring->ring_lock); 9626 spin_unlock_irqrestore(&phba->hbalock, iflags); 9627 9628 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9629 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9630 spin_lock_irqsave(&phba->hbalock, iflags); 9631 list_del_init(&piocb->dlist); 9632 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9633 spin_unlock_irqrestore(&phba->hbalock, iflags); 9634 } 9635 /* Make sure HBA is alive */ 9636 lpfc_issue_hb_tmo(phba); 9637 9638 if (!list_empty(&abort_list)) 9639 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9640 "3387 abort list for txq not empty\n"); 9641 INIT_LIST_HEAD(&abort_list); 9642 9643 spin_lock_irqsave(&phba->hbalock, iflags); 9644 if (phba->sli_rev == LPFC_SLI_REV4) 9645 spin_lock(&pring->ring_lock); 9646 9647 /* No need to abort the txq list, 9648 * just queue them up for lpfc_sli_cancel_iocbs 9649 */ 9650 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9651 ulp_command = get_job_cmnd(phba, piocb); 9652 9653 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9654 continue; 9655 9656 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9657 if (ulp_command == CMD_QUE_RING_BUF_CN || 9658 ulp_command == CMD_QUE_RING_BUF64_CN || 9659 ulp_command == CMD_CLOSE_XRI_CN || 9660 ulp_command == CMD_ABORT_XRI_CN || 9661 ulp_command == CMD_ABORT_XRI_CX) 9662 continue; 9663 9664 if (piocb->vport != vport) 9665 continue; 9666 9667 list_del_init(&piocb->list); 9668 list_add_tail(&piocb->list, &abort_list); 9669 } 9670 9671 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9672 if (vport == phba->pport) { 9673 list_for_each_entry_safe(piocb, tmp_iocb, 9674 &phba->fabric_iocb_list, list) { 9675 list_del_init(&piocb->list); 9676 list_add_tail(&piocb->list, &abort_list); 9677 } 9678 } 9679 9680 if (phba->sli_rev == LPFC_SLI_REV4) 9681 spin_unlock(&pring->ring_lock); 9682 spin_unlock_irqrestore(&phba->hbalock, iflags); 9683 9684 /* Cancel all the IOCBs from the completions list */ 9685 lpfc_sli_cancel_iocbs(phba, &abort_list, 9686 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9687 9688 return; 9689 } 9690 9691 /** 9692 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9693 * @phba: pointer to lpfc hba data structure. 9694 * 9695 * This routine is used to clean up all the outstanding ELS commands on a 9696 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9697 * routine. After that, it walks the ELS transmit queue to remove all the 9698 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9699 * the IOCBs with the completion callback function associated, the callback 9700 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9701 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9702 * callback function associated, the IOCB will simply be released. Finally, 9703 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9704 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9705 * management plane IOCBs that are not part of the discovery state machine) 9706 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9707 **/ 9708 void 9709 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9710 { 9711 struct lpfc_vport *vport; 9712 9713 spin_lock_irq(&phba->port_list_lock); 9714 list_for_each_entry(vport, &phba->port_list, listentry) 9715 lpfc_els_flush_cmd(vport); 9716 spin_unlock_irq(&phba->port_list_lock); 9717 9718 return; 9719 } 9720 9721 /** 9722 * lpfc_send_els_failure_event - Posts an ELS command failure event 9723 * @phba: Pointer to hba context object. 9724 * @cmdiocbp: Pointer to command iocb which reported error. 9725 * @rspiocbp: Pointer to response iocb which reported error. 9726 * 9727 * This function sends an event when there is an ELS command 9728 * failure. 9729 **/ 9730 void 9731 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9732 struct lpfc_iocbq *cmdiocbp, 9733 struct lpfc_iocbq *rspiocbp) 9734 { 9735 struct lpfc_vport *vport = cmdiocbp->vport; 9736 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9737 struct lpfc_lsrjt_event lsrjt_event; 9738 struct lpfc_fabric_event_header fabric_event; 9739 struct ls_rjt stat; 9740 struct lpfc_nodelist *ndlp; 9741 uint32_t *pcmd; 9742 u32 ulp_status, ulp_word4; 9743 9744 ndlp = cmdiocbp->ndlp; 9745 if (!ndlp) 9746 return; 9747 9748 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9749 ulp_word4 = get_job_word4(phba, rspiocbp); 9750 9751 if (ulp_status == IOSTAT_LS_RJT) { 9752 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9753 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9754 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9755 sizeof(struct lpfc_name)); 9756 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9757 sizeof(struct lpfc_name)); 9758 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9759 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9760 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9761 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9762 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9763 fc_host_post_vendor_event(shost, 9764 fc_get_event_number(), 9765 sizeof(lsrjt_event), 9766 (char *)&lsrjt_event, 9767 LPFC_NL_VENDOR_ID); 9768 return; 9769 } 9770 if (ulp_status == IOSTAT_NPORT_BSY || 9771 ulp_status == IOSTAT_FABRIC_BSY) { 9772 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9773 if (ulp_status == IOSTAT_NPORT_BSY) 9774 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9775 else 9776 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9777 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9778 sizeof(struct lpfc_name)); 9779 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9780 sizeof(struct lpfc_name)); 9781 fc_host_post_vendor_event(shost, 9782 fc_get_event_number(), 9783 sizeof(fabric_event), 9784 (char *)&fabric_event, 9785 LPFC_NL_VENDOR_ID); 9786 return; 9787 } 9788 9789 } 9790 9791 /** 9792 * lpfc_send_els_event - Posts unsolicited els event 9793 * @vport: Pointer to vport object. 9794 * @ndlp: Pointer FC node object. 9795 * @payload: ELS command code type. 9796 * 9797 * This function posts an event when there is an incoming 9798 * unsolicited ELS command. 9799 **/ 9800 static void 9801 lpfc_send_els_event(struct lpfc_vport *vport, 9802 struct lpfc_nodelist *ndlp, 9803 uint32_t *payload) 9804 { 9805 struct lpfc_els_event_header *els_data = NULL; 9806 struct lpfc_logo_event *logo_data = NULL; 9807 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9808 9809 if (*payload == ELS_CMD_LOGO) { 9810 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9811 if (!logo_data) { 9812 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9813 "0148 Failed to allocate memory " 9814 "for LOGO event\n"); 9815 return; 9816 } 9817 els_data = &logo_data->header; 9818 } else { 9819 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9820 GFP_KERNEL); 9821 if (!els_data) { 9822 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9823 "0149 Failed to allocate memory " 9824 "for ELS event\n"); 9825 return; 9826 } 9827 } 9828 els_data->event_type = FC_REG_ELS_EVENT; 9829 switch (*payload) { 9830 case ELS_CMD_PLOGI: 9831 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9832 break; 9833 case ELS_CMD_PRLO: 9834 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9835 break; 9836 case ELS_CMD_ADISC: 9837 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9838 break; 9839 case ELS_CMD_LOGO: 9840 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9841 /* Copy the WWPN in the LOGO payload */ 9842 memcpy(logo_data->logo_wwpn, &payload[2], 9843 sizeof(struct lpfc_name)); 9844 break; 9845 default: 9846 kfree(els_data); 9847 return; 9848 } 9849 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9850 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9851 if (*payload == ELS_CMD_LOGO) { 9852 fc_host_post_vendor_event(shost, 9853 fc_get_event_number(), 9854 sizeof(struct lpfc_logo_event), 9855 (char *)logo_data, 9856 LPFC_NL_VENDOR_ID); 9857 kfree(logo_data); 9858 } else { 9859 fc_host_post_vendor_event(shost, 9860 fc_get_event_number(), 9861 sizeof(struct lpfc_els_event_header), 9862 (char *)els_data, 9863 LPFC_NL_VENDOR_ID); 9864 kfree(els_data); 9865 } 9866 9867 return; 9868 } 9869 9870 9871 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9872 FC_FPIN_LI_EVT_TYPES_INIT); 9873 9874 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9875 FC_FPIN_DELI_EVT_TYPES_INIT); 9876 9877 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9878 FC_FPIN_CONGN_EVT_TYPES_INIT); 9879 9880 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9881 fc_fpin_congn_severity_types, 9882 FC_FPIN_CONGN_SEVERITY_INIT); 9883 9884 9885 /** 9886 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9887 * @phba: Pointer to phba object. 9888 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9889 * @cnt: count of WWPNs in FPIN payload 9890 * 9891 * This routine is called by LI and PC descriptors. 9892 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9893 */ 9894 static void 9895 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9896 { 9897 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9898 __be64 wwn; 9899 u64 wwpn; 9900 int i, len; 9901 int line = 0; 9902 int wcnt = 0; 9903 bool endit = false; 9904 9905 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9906 for (i = 0; i < cnt; i++) { 9907 /* Are we on the last WWPN */ 9908 if (i == (cnt - 1)) 9909 endit = true; 9910 9911 /* Extract the next WWPN from the payload */ 9912 wwn = *wwnlist++; 9913 wwpn = be64_to_cpu(wwn); 9914 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9915 " %016llx", wwpn); 9916 9917 /* Log a message if we are on the last WWPN 9918 * or if we hit the max allowed per message. 9919 */ 9920 wcnt++; 9921 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9922 buf[len] = 0; 9923 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9924 "4686 %s\n", buf); 9925 9926 /* Check if we reached the last WWPN */ 9927 if (endit) 9928 return; 9929 9930 /* Limit the number of log message displayed per FPIN */ 9931 line++; 9932 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9933 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9934 "4687 %d WWPNs Truncated\n", 9935 cnt - i - 1); 9936 return; 9937 } 9938 9939 /* Start over with next log message */ 9940 wcnt = 0; 9941 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9942 "Additional WWPNs:"); 9943 } 9944 } 9945 } 9946 9947 /** 9948 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9949 * @phba: Pointer to phba object. 9950 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9951 * 9952 * This function processes a Link Integrity FPIN event by logging a message. 9953 **/ 9954 static void 9955 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9956 { 9957 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9958 const char *li_evt_str; 9959 u32 li_evt, cnt; 9960 9961 li_evt = be16_to_cpu(li->event_type); 9962 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9963 cnt = be32_to_cpu(li->pname_count); 9964 9965 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9966 "4680 FPIN Link Integrity %s (x%x) " 9967 "Detecting PN x%016llx Attached PN x%016llx " 9968 "Duration %d mSecs Count %d Port Cnt %d\n", 9969 li_evt_str, li_evt, 9970 be64_to_cpu(li->detecting_wwpn), 9971 be64_to_cpu(li->attached_wwpn), 9972 be32_to_cpu(li->event_threshold), 9973 be32_to_cpu(li->event_count), cnt); 9974 9975 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9976 } 9977 9978 /** 9979 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9980 * @phba: Pointer to hba object. 9981 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9982 * 9983 * This function processes a Delivery FPIN event by logging a message. 9984 **/ 9985 static void 9986 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9987 { 9988 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9989 const char *del_rsn_str; 9990 u32 del_rsn; 9991 __be32 *frame; 9992 9993 del_rsn = be16_to_cpu(del->deli_reason_code); 9994 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9995 9996 /* Skip over desc_tag/desc_len header to payload */ 9997 frame = (__be32 *)(del + 1); 9998 9999 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10000 "4681 FPIN Delivery %s (x%x) " 10001 "Detecting PN x%016llx Attached PN x%016llx " 10002 "DiscHdr0 x%08x " 10003 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10004 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10005 del_rsn_str, del_rsn, 10006 be64_to_cpu(del->detecting_wwpn), 10007 be64_to_cpu(del->attached_wwpn), 10008 be32_to_cpu(frame[0]), 10009 be32_to_cpu(frame[1]), 10010 be32_to_cpu(frame[2]), 10011 be32_to_cpu(frame[3]), 10012 be32_to_cpu(frame[4]), 10013 be32_to_cpu(frame[5])); 10014 } 10015 10016 /** 10017 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10018 * @phba: Pointer to hba object. 10019 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10020 * 10021 * This function processes a Peer Congestion FPIN event by logging a message. 10022 **/ 10023 static void 10024 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10025 { 10026 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10027 const char *pc_evt_str; 10028 u32 pc_evt, cnt; 10029 10030 pc_evt = be16_to_cpu(pc->event_type); 10031 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10032 cnt = be32_to_cpu(pc->pname_count); 10033 10034 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10035 "4684 FPIN Peer Congestion %s (x%x) " 10036 "Duration %d mSecs " 10037 "Detecting PN x%016llx Attached PN x%016llx " 10038 "Impacted Port Cnt %d\n", 10039 pc_evt_str, pc_evt, 10040 be32_to_cpu(pc->event_period), 10041 be64_to_cpu(pc->detecting_wwpn), 10042 be64_to_cpu(pc->attached_wwpn), 10043 cnt); 10044 10045 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10046 } 10047 10048 /** 10049 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10050 * @phba: Pointer to hba object. 10051 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10052 * 10053 * This function processes an FPIN Congestion Notifiction. The notification 10054 * could be an Alarm or Warning. This routine feeds that data into driver's 10055 * running congestion algorithm. It also processes the FPIN by 10056 * logging a message. It returns 1 to indicate deliver this message 10057 * to the upper layer or 0 to indicate don't deliver it. 10058 **/ 10059 static int 10060 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10061 { 10062 struct lpfc_cgn_info *cp; 10063 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10064 const char *cgn_evt_str; 10065 u32 cgn_evt; 10066 const char *cgn_sev_str; 10067 u32 cgn_sev; 10068 uint16_t value; 10069 u32 crc; 10070 bool nm_log = false; 10071 int rc = 1; 10072 10073 cgn_evt = be16_to_cpu(cgn->event_type); 10074 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10075 cgn_sev = cgn->severity; 10076 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10077 10078 /* The driver only takes action on a Credit Stall or Oversubscription 10079 * event type to engage the IO algorithm. The driver prints an 10080 * unmaskable message only for Lost Credit and Credit Stall. 10081 * TODO: Still need to have definition of host action on clear, 10082 * lost credit and device specific event types. 10083 */ 10084 switch (cgn_evt) { 10085 case FPIN_CONGN_LOST_CREDIT: 10086 nm_log = true; 10087 break; 10088 case FPIN_CONGN_CREDIT_STALL: 10089 nm_log = true; 10090 fallthrough; 10091 case FPIN_CONGN_OVERSUBSCRIPTION: 10092 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10093 nm_log = false; 10094 switch (cgn_sev) { 10095 case FPIN_CONGN_SEVERITY_ERROR: 10096 /* Take action here for an Alarm event */ 10097 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10098 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10099 /* Track of alarm cnt for SYNC_WQE */ 10100 atomic_inc(&phba->cgn_sync_alarm_cnt); 10101 } 10102 /* Track alarm cnt for cgn_info regardless 10103 * of whether CMF is configured for Signals 10104 * or FPINs. 10105 */ 10106 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10107 goto cleanup; 10108 } 10109 break; 10110 case FPIN_CONGN_SEVERITY_WARNING: 10111 /* Take action here for a Warning event */ 10112 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10113 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10114 /* Track of warning cnt for SYNC_WQE */ 10115 atomic_inc(&phba->cgn_sync_warn_cnt); 10116 } 10117 /* Track warning cnt and freq for cgn_info 10118 * regardless of whether CMF is configured for 10119 * Signals or FPINs. 10120 */ 10121 atomic_inc(&phba->cgn_fabric_warn_cnt); 10122 cleanup: 10123 /* Save frequency in ms */ 10124 phba->cgn_fpin_frequency = 10125 be32_to_cpu(cgn->event_period); 10126 value = phba->cgn_fpin_frequency; 10127 if (phba->cgn_i) { 10128 cp = (struct lpfc_cgn_info *) 10129 phba->cgn_i->virt; 10130 cp->cgn_alarm_freq = 10131 cpu_to_le16(value); 10132 cp->cgn_warn_freq = 10133 cpu_to_le16(value); 10134 crc = lpfc_cgn_calc_crc32 10135 (cp, 10136 LPFC_CGN_INFO_SZ, 10137 LPFC_CGN_CRC32_SEED); 10138 cp->cgn_info_crc = cpu_to_le32(crc); 10139 } 10140 10141 /* Don't deliver to upper layer since 10142 * driver took action on this tlv. 10143 */ 10144 rc = 0; 10145 } 10146 break; 10147 } 10148 break; 10149 } 10150 10151 /* Change the log level to unmaskable for the following event types. */ 10152 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10153 LOG_CGN_MGMT | LOG_ELS, 10154 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10155 "Duration %d mSecs\n", 10156 cgn_sev_str, cgn_evt_str, cgn_evt, 10157 be32_to_cpu(cgn->event_period)); 10158 return rc; 10159 } 10160 10161 void 10162 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10163 { 10164 struct lpfc_hba *phba = vport->phba; 10165 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10166 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10167 const char *dtag_nm; 10168 int desc_cnt = 0, bytes_remain, cnt; 10169 u32 dtag, deliver = 0; 10170 int len; 10171 10172 /* FPINs handled only if we are in the right discovery state */ 10173 if (vport->port_state < LPFC_DISC_AUTH) 10174 return; 10175 10176 /* make sure there is the full fpin header */ 10177 if (fpin_length < sizeof(struct fc_els_fpin)) 10178 return; 10179 10180 /* Sanity check descriptor length. The desc_len value does not 10181 * include space for the ELS command and the desc_len fields. 10182 */ 10183 len = be32_to_cpu(fpin->desc_len); 10184 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10185 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10186 "4671 Bad ELS FPIN length %d: %d\n", 10187 len, fpin_length); 10188 return; 10189 } 10190 10191 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10192 first_tlv = tlv; 10193 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10194 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10195 10196 /* process each descriptor separately */ 10197 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10198 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10199 dtag = be32_to_cpu(tlv->desc_tag); 10200 switch (dtag) { 10201 case ELS_DTAG_LNK_INTEGRITY: 10202 lpfc_els_rcv_fpin_li(phba, tlv); 10203 deliver = 1; 10204 break; 10205 case ELS_DTAG_DELIVERY: 10206 lpfc_els_rcv_fpin_del(phba, tlv); 10207 deliver = 1; 10208 break; 10209 case ELS_DTAG_PEER_CONGEST: 10210 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10211 deliver = 1; 10212 break; 10213 case ELS_DTAG_CONGESTION: 10214 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10215 break; 10216 default: 10217 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10218 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10219 "4678 unknown FPIN descriptor[%d]: " 10220 "tag x%x (%s)\n", 10221 desc_cnt, dtag, dtag_nm); 10222 10223 /* If descriptor is bad, drop the rest of the data */ 10224 return; 10225 } 10226 lpfc_cgn_update_stat(phba, dtag); 10227 cnt = be32_to_cpu(tlv->desc_len); 10228 10229 /* Sanity check descriptor length. The desc_len value does not 10230 * include space for the desc_tag and the desc_len fields. 10231 */ 10232 len -= (cnt + sizeof(struct fc_tlv_desc)); 10233 if (len < 0) { 10234 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10235 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10236 "4672 Bad FPIN descriptor TLV length " 10237 "%d: %d %d %s\n", 10238 cnt, len, fpin_length, dtag_nm); 10239 return; 10240 } 10241 10242 current_tlv = tlv; 10243 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10244 tlv = fc_tlv_next_desc(tlv); 10245 10246 /* Format payload such that the FPIN delivered to the 10247 * upper layer is a single descriptor FPIN. 10248 */ 10249 if (desc_cnt) 10250 memcpy(first_tlv, current_tlv, 10251 (cnt + sizeof(struct fc_els_fpin))); 10252 10253 /* Adjust the length so that it only reflects a 10254 * single descriptor FPIN. 10255 */ 10256 fpin_length = cnt + sizeof(struct fc_els_fpin); 10257 fpin->desc_len = cpu_to_be32(fpin_length); 10258 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10259 10260 /* Send every descriptor individually to the upper layer */ 10261 if (deliver) 10262 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10263 fpin_length, (char *)fpin); 10264 desc_cnt++; 10265 } 10266 } 10267 10268 /** 10269 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10270 * @phba: pointer to lpfc hba data structure. 10271 * @pring: pointer to a SLI ring. 10272 * @vport: pointer to a host virtual N_Port data structure. 10273 * @elsiocb: pointer to lpfc els command iocb data structure. 10274 * 10275 * This routine is used for processing the IOCB associated with a unsolicited 10276 * event. It first determines whether there is an existing ndlp that matches 10277 * the DID from the unsolicited IOCB. If not, it will create a new one with 10278 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10279 * IOCB is then used to invoke the proper routine and to set up proper state 10280 * of the discovery state machine. 10281 **/ 10282 static void 10283 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10284 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10285 { 10286 struct lpfc_nodelist *ndlp; 10287 struct ls_rjt stat; 10288 u32 *payload, payload_len; 10289 u32 cmd = 0, did = 0, newnode, status = 0; 10290 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10291 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10292 LPFC_MBOXQ_t *mbox; 10293 10294 if (!vport || !elsiocb->cmd_dmabuf) 10295 goto dropit; 10296 10297 newnode = 0; 10298 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10299 payload = elsiocb->cmd_dmabuf->virt; 10300 if (phba->sli_rev == LPFC_SLI_REV4) 10301 payload_len = wcqe_cmpl->total_data_placed; 10302 else 10303 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10304 status = get_job_ulpstatus(phba, elsiocb); 10305 cmd = *payload; 10306 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10307 lpfc_sli3_post_buffer(phba, pring, 1); 10308 10309 did = get_job_els_rsp64_did(phba, elsiocb); 10310 if (status) { 10311 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10312 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10313 status, get_job_word4(phba, elsiocb), did); 10314 goto dropit; 10315 } 10316 10317 /* Check to see if link went down during discovery */ 10318 if (lpfc_els_chk_latt(vport)) 10319 goto dropit; 10320 10321 /* Ignore traffic received during vport shutdown. */ 10322 if (vport->load_flag & FC_UNLOADING) 10323 goto dropit; 10324 10325 /* If NPort discovery is delayed drop incoming ELS */ 10326 if ((vport->fc_flag & FC_DISC_DELAYED) && 10327 (cmd != ELS_CMD_PLOGI)) 10328 goto dropit; 10329 10330 ndlp = lpfc_findnode_did(vport, did); 10331 if (!ndlp) { 10332 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10333 ndlp = lpfc_nlp_init(vport, did); 10334 if (!ndlp) 10335 goto dropit; 10336 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10337 newnode = 1; 10338 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10339 ndlp->nlp_type |= NLP_FABRIC; 10340 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10342 newnode = 1; 10343 } 10344 10345 phba->fc_stat.elsRcvFrame++; 10346 10347 /* 10348 * Do not process any unsolicited ELS commands 10349 * if the ndlp is in DEV_LOSS 10350 */ 10351 spin_lock_irq(&ndlp->lock); 10352 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10353 spin_unlock_irq(&ndlp->lock); 10354 if (newnode) 10355 lpfc_nlp_put(ndlp); 10356 goto dropit; 10357 } 10358 spin_unlock_irq(&ndlp->lock); 10359 10360 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10361 if (!elsiocb->ndlp) 10362 goto dropit; 10363 elsiocb->vport = vport; 10364 10365 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10366 cmd &= ELS_CMD_MASK; 10367 } 10368 /* ELS command <elsCmd> received from NPORT <did> */ 10369 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10370 "0112 ELS command x%x received from NPORT x%x " 10371 "refcnt %d Data: x%x x%x x%x x%x\n", 10372 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10373 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10374 10375 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10376 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10377 (cmd != ELS_CMD_FLOGI) && 10378 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10379 rjt_err = LSRJT_LOGICAL_BSY; 10380 rjt_exp = LSEXP_NOTHING_MORE; 10381 goto lsrjt; 10382 } 10383 10384 switch (cmd) { 10385 case ELS_CMD_PLOGI: 10386 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10387 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10388 did, vport->port_state, ndlp->nlp_flag); 10389 10390 phba->fc_stat.elsRcvPLOGI++; 10391 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10392 if (phba->sli_rev == LPFC_SLI_REV4 && 10393 (phba->pport->fc_flag & FC_PT2PT)) { 10394 vport->fc_prevDID = vport->fc_myDID; 10395 /* Our DID needs to be updated before registering 10396 * the vfi. This is done in lpfc_rcv_plogi but 10397 * that is called after the reg_vfi. 10398 */ 10399 vport->fc_myDID = 10400 bf_get(els_rsp64_sid, 10401 &elsiocb->wqe.xmit_els_rsp); 10402 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10403 "3312 Remote port assigned DID x%x " 10404 "%x\n", vport->fc_myDID, 10405 vport->fc_prevDID); 10406 } 10407 10408 lpfc_send_els_event(vport, ndlp, payload); 10409 10410 /* If Nport discovery is delayed, reject PLOGIs */ 10411 if (vport->fc_flag & FC_DISC_DELAYED) { 10412 rjt_err = LSRJT_UNABLE_TPC; 10413 rjt_exp = LSEXP_NOTHING_MORE; 10414 break; 10415 } 10416 10417 if (vport->port_state < LPFC_DISC_AUTH) { 10418 if (!(phba->pport->fc_flag & FC_PT2PT) || 10419 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10420 rjt_err = LSRJT_UNABLE_TPC; 10421 rjt_exp = LSEXP_NOTHING_MORE; 10422 break; 10423 } 10424 } 10425 10426 spin_lock_irq(&ndlp->lock); 10427 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10428 spin_unlock_irq(&ndlp->lock); 10429 10430 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10431 NLP_EVT_RCV_PLOGI); 10432 10433 break; 10434 case ELS_CMD_FLOGI: 10435 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10436 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10437 did, vport->port_state, ndlp->nlp_flag); 10438 10439 phba->fc_stat.elsRcvFLOGI++; 10440 10441 /* If the driver believes fabric discovery is done and is ready, 10442 * bounce the link. There is some descrepancy. 10443 */ 10444 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10445 vport->fc_flag & FC_PT2PT && 10446 vport->rcv_flogi_cnt >= 1) { 10447 rjt_err = LSRJT_LOGICAL_BSY; 10448 rjt_exp = LSEXP_NOTHING_MORE; 10449 init_link++; 10450 goto lsrjt; 10451 } 10452 10453 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10454 /* retain node if our response is deferred */ 10455 if (phba->defer_flogi_acc_flag) 10456 break; 10457 if (newnode) 10458 lpfc_disc_state_machine(vport, ndlp, NULL, 10459 NLP_EVT_DEVICE_RM); 10460 break; 10461 case ELS_CMD_LOGO: 10462 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10463 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10464 did, vport->port_state, ndlp->nlp_flag); 10465 10466 phba->fc_stat.elsRcvLOGO++; 10467 lpfc_send_els_event(vport, ndlp, payload); 10468 if (vport->port_state < LPFC_DISC_AUTH) { 10469 rjt_err = LSRJT_UNABLE_TPC; 10470 rjt_exp = LSEXP_NOTHING_MORE; 10471 break; 10472 } 10473 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10474 if (newnode) 10475 lpfc_disc_state_machine(vport, ndlp, NULL, 10476 NLP_EVT_DEVICE_RM); 10477 break; 10478 case ELS_CMD_PRLO: 10479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10480 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10481 did, vport->port_state, ndlp->nlp_flag); 10482 10483 phba->fc_stat.elsRcvPRLO++; 10484 lpfc_send_els_event(vport, ndlp, payload); 10485 if (vport->port_state < LPFC_DISC_AUTH) { 10486 rjt_err = LSRJT_UNABLE_TPC; 10487 rjt_exp = LSEXP_NOTHING_MORE; 10488 break; 10489 } 10490 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10491 break; 10492 case ELS_CMD_LCB: 10493 phba->fc_stat.elsRcvLCB++; 10494 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10495 break; 10496 case ELS_CMD_RDP: 10497 phba->fc_stat.elsRcvRDP++; 10498 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10499 break; 10500 case ELS_CMD_RSCN: 10501 phba->fc_stat.elsRcvRSCN++; 10502 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10503 if (newnode) 10504 lpfc_disc_state_machine(vport, ndlp, NULL, 10505 NLP_EVT_DEVICE_RM); 10506 break; 10507 case ELS_CMD_ADISC: 10508 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10509 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10510 did, vport->port_state, ndlp->nlp_flag); 10511 10512 lpfc_send_els_event(vport, ndlp, payload); 10513 phba->fc_stat.elsRcvADISC++; 10514 if (vport->port_state < LPFC_DISC_AUTH) { 10515 rjt_err = LSRJT_UNABLE_TPC; 10516 rjt_exp = LSEXP_NOTHING_MORE; 10517 break; 10518 } 10519 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10520 NLP_EVT_RCV_ADISC); 10521 break; 10522 case ELS_CMD_PDISC: 10523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10524 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10525 did, vport->port_state, ndlp->nlp_flag); 10526 10527 phba->fc_stat.elsRcvPDISC++; 10528 if (vport->port_state < LPFC_DISC_AUTH) { 10529 rjt_err = LSRJT_UNABLE_TPC; 10530 rjt_exp = LSEXP_NOTHING_MORE; 10531 break; 10532 } 10533 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10534 NLP_EVT_RCV_PDISC); 10535 break; 10536 case ELS_CMD_FARPR: 10537 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10538 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10539 did, vport->port_state, ndlp->nlp_flag); 10540 10541 phba->fc_stat.elsRcvFARPR++; 10542 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10543 break; 10544 case ELS_CMD_FARP: 10545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10546 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10547 did, vport->port_state, ndlp->nlp_flag); 10548 10549 phba->fc_stat.elsRcvFARP++; 10550 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10551 break; 10552 case ELS_CMD_FAN: 10553 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10554 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10555 did, vport->port_state, ndlp->nlp_flag); 10556 10557 phba->fc_stat.elsRcvFAN++; 10558 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10559 break; 10560 case ELS_CMD_PRLI: 10561 case ELS_CMD_NVMEPRLI: 10562 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10563 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10564 did, vport->port_state, ndlp->nlp_flag); 10565 10566 phba->fc_stat.elsRcvPRLI++; 10567 if ((vport->port_state < LPFC_DISC_AUTH) && 10568 (vport->fc_flag & FC_FABRIC)) { 10569 rjt_err = LSRJT_UNABLE_TPC; 10570 rjt_exp = LSEXP_NOTHING_MORE; 10571 break; 10572 } 10573 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10574 break; 10575 case ELS_CMD_LIRR: 10576 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10577 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10578 did, vport->port_state, ndlp->nlp_flag); 10579 10580 phba->fc_stat.elsRcvLIRR++; 10581 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10582 if (newnode) 10583 lpfc_disc_state_machine(vport, ndlp, NULL, 10584 NLP_EVT_DEVICE_RM); 10585 break; 10586 case ELS_CMD_RLS: 10587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10588 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10589 did, vport->port_state, ndlp->nlp_flag); 10590 10591 phba->fc_stat.elsRcvRLS++; 10592 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10593 if (newnode) 10594 lpfc_disc_state_machine(vport, ndlp, NULL, 10595 NLP_EVT_DEVICE_RM); 10596 break; 10597 case ELS_CMD_RPL: 10598 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10599 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10600 did, vport->port_state, ndlp->nlp_flag); 10601 10602 phba->fc_stat.elsRcvRPL++; 10603 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10604 if (newnode) 10605 lpfc_disc_state_machine(vport, ndlp, NULL, 10606 NLP_EVT_DEVICE_RM); 10607 break; 10608 case ELS_CMD_RNID: 10609 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10610 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10611 did, vport->port_state, ndlp->nlp_flag); 10612 10613 phba->fc_stat.elsRcvRNID++; 10614 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10615 if (newnode) 10616 lpfc_disc_state_machine(vport, ndlp, NULL, 10617 NLP_EVT_DEVICE_RM); 10618 break; 10619 case ELS_CMD_RTV: 10620 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10621 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10622 did, vport->port_state, ndlp->nlp_flag); 10623 phba->fc_stat.elsRcvRTV++; 10624 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10625 if (newnode) 10626 lpfc_disc_state_machine(vport, ndlp, NULL, 10627 NLP_EVT_DEVICE_RM); 10628 break; 10629 case ELS_CMD_RRQ: 10630 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10631 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10632 did, vport->port_state, ndlp->nlp_flag); 10633 10634 phba->fc_stat.elsRcvRRQ++; 10635 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10636 if (newnode) 10637 lpfc_disc_state_machine(vport, ndlp, NULL, 10638 NLP_EVT_DEVICE_RM); 10639 break; 10640 case ELS_CMD_ECHO: 10641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10642 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10643 did, vport->port_state, ndlp->nlp_flag); 10644 10645 phba->fc_stat.elsRcvECHO++; 10646 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10647 if (newnode) 10648 lpfc_disc_state_machine(vport, ndlp, NULL, 10649 NLP_EVT_DEVICE_RM); 10650 break; 10651 case ELS_CMD_REC: 10652 /* receive this due to exchange closed */ 10653 rjt_err = LSRJT_UNABLE_TPC; 10654 rjt_exp = LSEXP_INVALID_OX_RX; 10655 break; 10656 case ELS_CMD_FPIN: 10657 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10658 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10659 did, vport->port_state, ndlp->nlp_flag); 10660 10661 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10662 payload_len); 10663 10664 /* There are no replies, so no rjt codes */ 10665 break; 10666 case ELS_CMD_EDC: 10667 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10668 break; 10669 case ELS_CMD_RDF: 10670 phba->fc_stat.elsRcvRDF++; 10671 /* Accept RDF only from fabric controller */ 10672 if (did != Fabric_Cntl_DID) { 10673 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10674 "1115 Received RDF from invalid DID " 10675 "x%x\n", did); 10676 rjt_err = LSRJT_PROTOCOL_ERR; 10677 rjt_exp = LSEXP_NOTHING_MORE; 10678 goto lsrjt; 10679 } 10680 10681 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10682 break; 10683 default: 10684 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10685 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10686 cmd, did, vport->port_state); 10687 10688 /* Unsupported ELS command, reject */ 10689 rjt_err = LSRJT_CMD_UNSUPPORTED; 10690 rjt_exp = LSEXP_NOTHING_MORE; 10691 10692 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10693 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10694 "0115 Unknown ELS command x%x " 10695 "received from NPORT x%x\n", cmd, did); 10696 if (newnode) 10697 lpfc_disc_state_machine(vport, ndlp, NULL, 10698 NLP_EVT_DEVICE_RM); 10699 break; 10700 } 10701 10702 lsrjt: 10703 /* check if need to LS_RJT received ELS cmd */ 10704 if (rjt_err) { 10705 memset(&stat, 0, sizeof(stat)); 10706 stat.un.b.lsRjtRsnCode = rjt_err; 10707 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10708 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10709 NULL); 10710 /* Remove the reference from above for new nodes. */ 10711 if (newnode) 10712 lpfc_disc_state_machine(vport, ndlp, NULL, 10713 NLP_EVT_DEVICE_RM); 10714 } 10715 10716 /* Release the reference on this elsiocb, not the ndlp. */ 10717 lpfc_nlp_put(elsiocb->ndlp); 10718 elsiocb->ndlp = NULL; 10719 10720 /* Special case. Driver received an unsolicited command that 10721 * unsupportable given the driver's current state. Reset the 10722 * link and start over. 10723 */ 10724 if (init_link) { 10725 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10726 if (!mbox) 10727 return; 10728 lpfc_linkdown(phba); 10729 lpfc_init_link(phba, mbox, 10730 phba->cfg_topology, 10731 phba->cfg_link_speed); 10732 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10733 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10734 mbox->vport = vport; 10735 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10736 MBX_NOT_FINISHED) 10737 mempool_free(mbox, phba->mbox_mem_pool); 10738 } 10739 10740 return; 10741 10742 dropit: 10743 if (vport && !(vport->load_flag & FC_UNLOADING)) 10744 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10745 "0111 Dropping received ELS cmd " 10746 "Data: x%x x%x x%x x%x\n", 10747 cmd, status, get_job_word4(phba, elsiocb), did); 10748 10749 phba->fc_stat.elsRcvDrop++; 10750 } 10751 10752 /** 10753 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10754 * @phba: pointer to lpfc hba data structure. 10755 * @pring: pointer to a SLI ring. 10756 * @elsiocb: pointer to lpfc els iocb data structure. 10757 * 10758 * This routine is used to process an unsolicited event received from a SLI 10759 * (Service Level Interface) ring. The actual processing of the data buffer 10760 * associated with the unsolicited event is done by invoking the routine 10761 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10762 * SLI ring on which the unsolicited event was received. 10763 **/ 10764 void 10765 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10766 struct lpfc_iocbq *elsiocb) 10767 { 10768 struct lpfc_vport *vport = elsiocb->vport; 10769 u32 ulp_command, status, parameter, bde_count = 0; 10770 IOCB_t *icmd; 10771 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10772 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10773 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10774 dma_addr_t paddr; 10775 10776 elsiocb->cmd_dmabuf = NULL; 10777 elsiocb->rsp_dmabuf = NULL; 10778 elsiocb->bpl_dmabuf = NULL; 10779 10780 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10781 ulp_command = get_job_cmnd(phba, elsiocb); 10782 status = get_job_ulpstatus(phba, elsiocb); 10783 parameter = get_job_word4(phba, elsiocb); 10784 if (phba->sli_rev == LPFC_SLI_REV4) 10785 bde_count = wcqe_cmpl->word3; 10786 else 10787 bde_count = elsiocb->iocb.ulpBdeCount; 10788 10789 if (status == IOSTAT_NEED_BUFFER) { 10790 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10791 } else if (status == IOSTAT_LOCAL_REJECT && 10792 (parameter & IOERR_PARAM_MASK) == 10793 IOERR_RCV_BUFFER_WAITING) { 10794 phba->fc_stat.NoRcvBuf++; 10795 /* Not enough posted buffers; Try posting more buffers */ 10796 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10797 lpfc_sli3_post_buffer(phba, pring, 0); 10798 return; 10799 } 10800 10801 if (phba->sli_rev == LPFC_SLI_REV3) { 10802 icmd = &elsiocb->iocb; 10803 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10804 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10805 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10806 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10807 vport = phba->pport; 10808 else 10809 vport = lpfc_find_vport_by_vpid(phba, 10810 icmd->unsli3.rcvsli3.vpi); 10811 } 10812 } 10813 10814 /* If there are no BDEs associated 10815 * with this IOCB, there is nothing to do. 10816 */ 10817 if (bde_count == 0) 10818 return; 10819 10820 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10821 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10822 elsiocb->cmd_dmabuf = bdeBuf1; 10823 if (bde_count == 2) 10824 elsiocb->bpl_dmabuf = bdeBuf2; 10825 } else { 10826 icmd = &elsiocb->iocb; 10827 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10828 icmd->un.cont64[0].addrLow); 10829 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10830 paddr); 10831 if (bde_count == 2) { 10832 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10833 icmd->un.cont64[1].addrLow); 10834 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10835 pring, 10836 paddr); 10837 } 10838 } 10839 10840 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10841 /* 10842 * The different unsolicited event handlers would tell us 10843 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10844 */ 10845 if (elsiocb->cmd_dmabuf) { 10846 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10847 elsiocb->cmd_dmabuf = NULL; 10848 } 10849 10850 if (elsiocb->bpl_dmabuf) { 10851 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10852 elsiocb->bpl_dmabuf = NULL; 10853 } 10854 10855 } 10856 10857 static void 10858 lpfc_start_fdmi(struct lpfc_vport *vport) 10859 { 10860 struct lpfc_nodelist *ndlp; 10861 10862 /* If this is the first time, allocate an ndlp and initialize 10863 * it. Otherwise, make sure the node is enabled and then do the 10864 * login. 10865 */ 10866 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10867 if (!ndlp) { 10868 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10869 if (ndlp) { 10870 ndlp->nlp_type |= NLP_FABRIC; 10871 } else { 10872 return; 10873 } 10874 } 10875 10876 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10877 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10878 } 10879 10880 /** 10881 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10882 * @phba: pointer to lpfc hba data structure. 10883 * @vport: pointer to a virtual N_Port data structure. 10884 * 10885 * This routine issues a Port Login (PLOGI) to the Name Server with 10886 * State Change Request (SCR) for a @vport. This routine will create an 10887 * ndlp for the Name Server associated to the @vport if such node does 10888 * not already exist. The PLOGI to Name Server is issued by invoking the 10889 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10890 * (FDMI) is configured to the @vport, a FDMI node will be created and 10891 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10892 **/ 10893 void 10894 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10895 { 10896 struct lpfc_nodelist *ndlp; 10897 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10898 10899 /* 10900 * If lpfc_delay_discovery parameter is set and the clean address 10901 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10902 * discovery. 10903 */ 10904 spin_lock_irq(shost->host_lock); 10905 if (vport->fc_flag & FC_DISC_DELAYED) { 10906 spin_unlock_irq(shost->host_lock); 10907 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10908 "3334 Delay fc port discovery for %d secs\n", 10909 phba->fc_ratov); 10910 mod_timer(&vport->delayed_disc_tmo, 10911 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10912 return; 10913 } 10914 spin_unlock_irq(shost->host_lock); 10915 10916 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10917 if (!ndlp) { 10918 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10919 if (!ndlp) { 10920 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10921 lpfc_disc_start(vport); 10922 return; 10923 } 10924 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10925 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10926 "0251 NameServer login: no memory\n"); 10927 return; 10928 } 10929 } 10930 10931 ndlp->nlp_type |= NLP_FABRIC; 10932 10933 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10934 10935 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10936 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10937 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10938 "0252 Cannot issue NameServer login\n"); 10939 return; 10940 } 10941 10942 if ((phba->cfg_enable_SmartSAN || 10943 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10944 (vport->load_flag & FC_ALLOW_FDMI)) 10945 lpfc_start_fdmi(vport); 10946 } 10947 10948 /** 10949 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10950 * @phba: pointer to lpfc hba data structure. 10951 * @pmb: pointer to the driver internal queue element for mailbox command. 10952 * 10953 * This routine is the completion callback function to register new vport 10954 * mailbox command. If the new vport mailbox command completes successfully, 10955 * the fabric registration login shall be performed on physical port (the 10956 * new vport created is actually a physical port, with VPI 0) or the port 10957 * login to Name Server for State Change Request (SCR) will be performed 10958 * on virtual port (real virtual port, with VPI greater than 0). 10959 **/ 10960 static void 10961 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10962 { 10963 struct lpfc_vport *vport = pmb->vport; 10964 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10965 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 10966 MAILBOX_t *mb = &pmb->u.mb; 10967 int rc; 10968 10969 spin_lock_irq(shost->host_lock); 10970 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10971 spin_unlock_irq(shost->host_lock); 10972 10973 if (mb->mbxStatus) { 10974 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10975 "0915 Register VPI failed : Status: x%x" 10976 " upd bit: x%x \n", mb->mbxStatus, 10977 mb->un.varRegVpi.upd); 10978 if (phba->sli_rev == LPFC_SLI_REV4 && 10979 mb->un.varRegVpi.upd) 10980 goto mbox_err_exit ; 10981 10982 switch (mb->mbxStatus) { 10983 case 0x11: /* unsupported feature */ 10984 case 0x9603: /* max_vpi exceeded */ 10985 case 0x9602: /* Link event since CLEAR_LA */ 10986 /* giving up on vport registration */ 10987 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10988 spin_lock_irq(shost->host_lock); 10989 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10990 spin_unlock_irq(shost->host_lock); 10991 lpfc_can_disctmo(vport); 10992 break; 10993 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10994 case 0x20: 10995 spin_lock_irq(shost->host_lock); 10996 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10997 spin_unlock_irq(shost->host_lock); 10998 lpfc_init_vpi(phba, pmb, vport->vpi); 10999 pmb->vport = vport; 11000 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11001 rc = lpfc_sli_issue_mbox(phba, pmb, 11002 MBX_NOWAIT); 11003 if (rc == MBX_NOT_FINISHED) { 11004 lpfc_printf_vlog(vport, KERN_ERR, 11005 LOG_TRACE_EVENT, 11006 "2732 Failed to issue INIT_VPI" 11007 " mailbox command\n"); 11008 } else { 11009 lpfc_nlp_put(ndlp); 11010 return; 11011 } 11012 fallthrough; 11013 default: 11014 /* Try to recover from this error */ 11015 if (phba->sli_rev == LPFC_SLI_REV4) 11016 lpfc_sli4_unreg_all_rpis(vport); 11017 lpfc_mbx_unreg_vpi(vport); 11018 spin_lock_irq(shost->host_lock); 11019 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11020 spin_unlock_irq(shost->host_lock); 11021 if (mb->mbxStatus == MBX_NOT_FINISHED) 11022 break; 11023 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11024 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 11025 if (phba->sli_rev == LPFC_SLI_REV4) 11026 lpfc_issue_init_vfi(vport); 11027 else 11028 lpfc_initial_flogi(vport); 11029 } else { 11030 lpfc_initial_fdisc(vport); 11031 } 11032 break; 11033 } 11034 } else { 11035 spin_lock_irq(shost->host_lock); 11036 vport->vpi_state |= LPFC_VPI_REGISTERED; 11037 spin_unlock_irq(shost->host_lock); 11038 if (vport == phba->pport) { 11039 if (phba->sli_rev < LPFC_SLI_REV4) 11040 lpfc_issue_fabric_reglogin(vport); 11041 else { 11042 /* 11043 * If the physical port is instantiated using 11044 * FDISC, do not start vport discovery. 11045 */ 11046 if (vport->port_state != LPFC_FDISC) 11047 lpfc_start_fdiscs(phba); 11048 lpfc_do_scr_ns_plogi(phba, vport); 11049 } 11050 } else { 11051 lpfc_do_scr_ns_plogi(phba, vport); 11052 } 11053 } 11054 mbox_err_exit: 11055 /* Now, we decrement the ndlp reference count held for this 11056 * callback function 11057 */ 11058 lpfc_nlp_put(ndlp); 11059 11060 mempool_free(pmb, phba->mbox_mem_pool); 11061 return; 11062 } 11063 11064 /** 11065 * lpfc_register_new_vport - Register a new vport with a HBA 11066 * @phba: pointer to lpfc hba data structure. 11067 * @vport: pointer to a host virtual N_Port data structure. 11068 * @ndlp: pointer to a node-list data structure. 11069 * 11070 * This routine registers the @vport as a new virtual port with a HBA. 11071 * It is done through a registering vpi mailbox command. 11072 **/ 11073 void 11074 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11075 struct lpfc_nodelist *ndlp) 11076 { 11077 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11078 LPFC_MBOXQ_t *mbox; 11079 11080 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11081 if (mbox) { 11082 lpfc_reg_vpi(vport, mbox); 11083 mbox->vport = vport; 11084 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11085 if (!mbox->ctx_ndlp) { 11086 mempool_free(mbox, phba->mbox_mem_pool); 11087 goto mbox_err_exit; 11088 } 11089 11090 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11091 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11092 == MBX_NOT_FINISHED) { 11093 /* mailbox command not success, decrement ndlp 11094 * reference count for this command 11095 */ 11096 lpfc_nlp_put(ndlp); 11097 mempool_free(mbox, phba->mbox_mem_pool); 11098 11099 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11100 "0253 Register VPI: Can't send mbox\n"); 11101 goto mbox_err_exit; 11102 } 11103 } else { 11104 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11105 "0254 Register VPI: no memory\n"); 11106 goto mbox_err_exit; 11107 } 11108 return; 11109 11110 mbox_err_exit: 11111 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11112 spin_lock_irq(shost->host_lock); 11113 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11114 spin_unlock_irq(shost->host_lock); 11115 return; 11116 } 11117 11118 /** 11119 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11120 * @phba: pointer to lpfc hba data structure. 11121 * 11122 * This routine cancels the retry delay timers to all the vports. 11123 **/ 11124 void 11125 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11126 { 11127 struct lpfc_vport **vports; 11128 struct lpfc_nodelist *ndlp; 11129 uint32_t link_state; 11130 int i; 11131 11132 /* Treat this failure as linkdown for all vports */ 11133 link_state = phba->link_state; 11134 lpfc_linkdown(phba); 11135 phba->link_state = link_state; 11136 11137 vports = lpfc_create_vport_work_array(phba); 11138 11139 if (vports) { 11140 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11141 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11142 if (ndlp) 11143 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11144 lpfc_els_flush_cmd(vports[i]); 11145 } 11146 lpfc_destroy_vport_work_array(phba, vports); 11147 } 11148 } 11149 11150 /** 11151 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11152 * @phba: pointer to lpfc hba data structure. 11153 * 11154 * This routine abort all pending discovery commands and 11155 * start a timer to retry FLOGI for the physical port 11156 * discovery. 11157 **/ 11158 void 11159 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11160 { 11161 struct lpfc_nodelist *ndlp; 11162 11163 /* Cancel the all vports retry delay retry timers */ 11164 lpfc_cancel_all_vport_retry_delay_timer(phba); 11165 11166 /* If fabric require FLOGI, then re-instantiate physical login */ 11167 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11168 if (!ndlp) 11169 return; 11170 11171 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11172 spin_lock_irq(&ndlp->lock); 11173 ndlp->nlp_flag |= NLP_DELAY_TMO; 11174 spin_unlock_irq(&ndlp->lock); 11175 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11176 phba->pport->port_state = LPFC_FLOGI; 11177 return; 11178 } 11179 11180 /** 11181 * lpfc_fabric_login_reqd - Check if FLOGI required. 11182 * @phba: pointer to lpfc hba data structure. 11183 * @cmdiocb: pointer to FDISC command iocb. 11184 * @rspiocb: pointer to FDISC response iocb. 11185 * 11186 * This routine checks if a FLOGI is reguired for FDISC 11187 * to succeed. 11188 **/ 11189 static int 11190 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11191 struct lpfc_iocbq *cmdiocb, 11192 struct lpfc_iocbq *rspiocb) 11193 { 11194 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11195 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11196 11197 if (ulp_status != IOSTAT_FABRIC_RJT || 11198 ulp_word4 != RJT_LOGIN_REQUIRED) 11199 return 0; 11200 else 11201 return 1; 11202 } 11203 11204 /** 11205 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11206 * @phba: pointer to lpfc hba data structure. 11207 * @cmdiocb: pointer to lpfc command iocb data structure. 11208 * @rspiocb: pointer to lpfc response iocb data structure. 11209 * 11210 * This routine is the completion callback function to a Fabric Discover 11211 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11212 * single threaded, each FDISC completion callback function will reset 11213 * the discovery timer for all vports such that the timers will not get 11214 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11215 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11216 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11217 * assigned to the vport has been changed with the completion of the FDISC 11218 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11219 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11220 * routine is invoked to register new vport with the HBA. Otherwise, the 11221 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11222 * Server for State Change Request (SCR). 11223 **/ 11224 static void 11225 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11226 struct lpfc_iocbq *rspiocb) 11227 { 11228 struct lpfc_vport *vport = cmdiocb->vport; 11229 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11230 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11231 struct lpfc_nodelist *np; 11232 struct lpfc_nodelist *next_np; 11233 struct lpfc_iocbq *piocb; 11234 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11235 struct serv_parm *sp; 11236 uint8_t fabric_param_changed; 11237 u32 ulp_status, ulp_word4; 11238 11239 ulp_status = get_job_ulpstatus(phba, rspiocb); 11240 ulp_word4 = get_job_word4(phba, rspiocb); 11241 11242 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11243 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11244 ulp_status, ulp_word4, 11245 vport->fc_prevDID); 11246 /* Since all FDISCs are being single threaded, we 11247 * must reset the discovery timer for ALL vports 11248 * waiting to send FDISC when one completes. 11249 */ 11250 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11251 lpfc_set_disctmo(piocb->vport); 11252 } 11253 11254 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11255 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11256 ulp_status, ulp_word4, vport->fc_prevDID); 11257 11258 if (ulp_status) { 11259 11260 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11261 lpfc_retry_pport_discovery(phba); 11262 goto out; 11263 } 11264 11265 /* Check for retry */ 11266 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11267 goto out; 11268 /* FDISC failed */ 11269 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11270 "0126 FDISC failed. (x%x/x%x)\n", 11271 ulp_status, ulp_word4); 11272 goto fdisc_failed; 11273 } 11274 11275 lpfc_check_nlp_post_devloss(vport, ndlp); 11276 11277 spin_lock_irq(shost->host_lock); 11278 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11279 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11280 vport->fc_flag |= FC_FABRIC; 11281 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11282 vport->fc_flag |= FC_PUBLIC_LOOP; 11283 spin_unlock_irq(shost->host_lock); 11284 11285 vport->fc_myDID = ulp_word4 & Mask_DID; 11286 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11287 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11288 if (!prsp) 11289 goto out; 11290 sp = prsp->virt + sizeof(uint32_t); 11291 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11292 memcpy(&vport->fabric_portname, &sp->portName, 11293 sizeof(struct lpfc_name)); 11294 memcpy(&vport->fabric_nodename, &sp->nodeName, 11295 sizeof(struct lpfc_name)); 11296 if (fabric_param_changed && 11297 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11298 /* If our NportID changed, we need to ensure all 11299 * remaining NPORTs get unreg_login'ed so we can 11300 * issue unreg_vpi. 11301 */ 11302 list_for_each_entry_safe(np, next_np, 11303 &vport->fc_nodes, nlp_listp) { 11304 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11305 !(np->nlp_flag & NLP_NPR_ADISC)) 11306 continue; 11307 spin_lock_irq(&ndlp->lock); 11308 np->nlp_flag &= ~NLP_NPR_ADISC; 11309 spin_unlock_irq(&ndlp->lock); 11310 lpfc_unreg_rpi(vport, np); 11311 } 11312 lpfc_cleanup_pending_mbox(vport); 11313 11314 if (phba->sli_rev == LPFC_SLI_REV4) 11315 lpfc_sli4_unreg_all_rpis(vport); 11316 11317 lpfc_mbx_unreg_vpi(vport); 11318 spin_lock_irq(shost->host_lock); 11319 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11320 if (phba->sli_rev == LPFC_SLI_REV4) 11321 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11322 else 11323 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11324 spin_unlock_irq(shost->host_lock); 11325 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11326 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11327 /* 11328 * Driver needs to re-reg VPI in order for f/w 11329 * to update the MAC address. 11330 */ 11331 lpfc_register_new_vport(phba, vport, ndlp); 11332 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11333 goto out; 11334 } 11335 11336 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11337 lpfc_issue_init_vpi(vport); 11338 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11339 lpfc_register_new_vport(phba, vport, ndlp); 11340 else 11341 lpfc_do_scr_ns_plogi(phba, vport); 11342 11343 /* The FDISC completed successfully. Move the fabric ndlp to 11344 * UNMAPPED state and register with the transport. 11345 */ 11346 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11347 goto out; 11348 11349 fdisc_failed: 11350 if (vport->fc_vport && 11351 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11352 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11353 /* Cancel discovery timer */ 11354 lpfc_can_disctmo(vport); 11355 out: 11356 lpfc_els_free_iocb(phba, cmdiocb); 11357 lpfc_nlp_put(ndlp); 11358 } 11359 11360 /** 11361 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11362 * @vport: pointer to a virtual N_Port data structure. 11363 * @ndlp: pointer to a node-list data structure. 11364 * @retry: number of retries to the command IOCB. 11365 * 11366 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11367 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11368 * routine to issue the IOCB, which makes sure only one outstanding fabric 11369 * IOCB will be sent off HBA at any given time. 11370 * 11371 * Note that the ndlp reference count will be incremented by 1 for holding the 11372 * ndlp and the reference to ndlp will be stored into the ndlp field of 11373 * the IOCB for the completion callback function to the FDISC ELS command. 11374 * 11375 * Return code 11376 * 0 - Successfully issued fdisc iocb command 11377 * 1 - Failed to issue fdisc iocb command 11378 **/ 11379 static int 11380 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11381 uint8_t retry) 11382 { 11383 struct lpfc_hba *phba = vport->phba; 11384 IOCB_t *icmd; 11385 union lpfc_wqe128 *wqe = NULL; 11386 struct lpfc_iocbq *elsiocb; 11387 struct serv_parm *sp; 11388 uint8_t *pcmd; 11389 uint16_t cmdsize; 11390 int did = ndlp->nlp_DID; 11391 int rc; 11392 11393 vport->port_state = LPFC_FDISC; 11394 vport->fc_myDID = 0; 11395 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11396 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11397 ELS_CMD_FDISC); 11398 if (!elsiocb) { 11399 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11400 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11401 "0255 Issue FDISC: no IOCB\n"); 11402 return 1; 11403 } 11404 11405 if (phba->sli_rev == LPFC_SLI_REV4) { 11406 wqe = &elsiocb->wqe; 11407 bf_set(els_req64_sid, &wqe->els_req, 0); 11408 bf_set(els_req64_sp, &wqe->els_req, 1); 11409 } else { 11410 icmd = &elsiocb->iocb; 11411 icmd->un.elsreq64.myID = 0; 11412 icmd->un.elsreq64.fl = 1; 11413 icmd->ulpCt_h = 1; 11414 icmd->ulpCt_l = 0; 11415 } 11416 11417 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11418 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11419 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11420 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11421 sp = (struct serv_parm *) pcmd; 11422 /* Setup CSPs accordingly for Fabric */ 11423 sp->cmn.e_d_tov = 0; 11424 sp->cmn.w2.r_a_tov = 0; 11425 sp->cmn.virtual_fabric_support = 0; 11426 sp->cls1.classValid = 0; 11427 sp->cls2.seqDelivery = 1; 11428 sp->cls3.seqDelivery = 1; 11429 11430 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11431 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11432 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11433 pcmd += sizeof(uint32_t); /* Port Name */ 11434 memcpy(pcmd, &vport->fc_portname, 8); 11435 pcmd += sizeof(uint32_t); /* Node Name */ 11436 pcmd += sizeof(uint32_t); /* Node Name */ 11437 memcpy(pcmd, &vport->fc_nodename, 8); 11438 sp->cmn.valid_vendor_ver_level = 0; 11439 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11440 lpfc_set_disctmo(vport); 11441 11442 phba->fc_stat.elsXmitFDISC++; 11443 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11444 11445 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11446 "Issue FDISC: did:x%x", 11447 did, 0, 0); 11448 11449 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11450 if (!elsiocb->ndlp) 11451 goto err_out; 11452 11453 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11454 if (rc == IOCB_ERROR) { 11455 lpfc_nlp_put(ndlp); 11456 goto err_out; 11457 } 11458 11459 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11460 return 0; 11461 11462 err_out: 11463 lpfc_els_free_iocb(phba, elsiocb); 11464 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11465 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11466 "0256 Issue FDISC: Cannot send IOCB\n"); 11467 return 1; 11468 } 11469 11470 /** 11471 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11472 * @phba: pointer to lpfc hba data structure. 11473 * @cmdiocb: pointer to lpfc command iocb data structure. 11474 * @rspiocb: pointer to lpfc response iocb data structure. 11475 * 11476 * This routine is the completion callback function to the issuing of a LOGO 11477 * ELS command off a vport. It frees the command IOCB and then decrement the 11478 * reference count held on ndlp for this completion function, indicating that 11479 * the reference to the ndlp is no long needed. Note that the 11480 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11481 * callback function and an additional explicit ndlp reference decrementation 11482 * will trigger the actual release of the ndlp. 11483 **/ 11484 static void 11485 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11486 struct lpfc_iocbq *rspiocb) 11487 { 11488 struct lpfc_vport *vport = cmdiocb->vport; 11489 IOCB_t *irsp; 11490 struct lpfc_nodelist *ndlp; 11491 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11492 u32 ulp_status, ulp_word4, did, tmo; 11493 11494 ndlp = cmdiocb->ndlp; 11495 11496 ulp_status = get_job_ulpstatus(phba, rspiocb); 11497 ulp_word4 = get_job_word4(phba, rspiocb); 11498 11499 if (phba->sli_rev == LPFC_SLI_REV4) { 11500 did = get_job_els_rsp64_did(phba, cmdiocb); 11501 tmo = get_wqe_tmo(cmdiocb); 11502 } else { 11503 irsp = &rspiocb->iocb; 11504 did = get_job_els_rsp64_did(phba, rspiocb); 11505 tmo = irsp->ulpTimeout; 11506 } 11507 11508 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11509 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11510 ulp_status, ulp_word4, did); 11511 11512 /* NPIV LOGO completes to NPort <nlp_DID> */ 11513 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11514 "2928 NPIV LOGO completes to NPort x%x " 11515 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11516 ndlp->nlp_DID, ulp_status, ulp_word4, 11517 tmo, vport->num_disc_nodes, 11518 kref_read(&ndlp->kref), ndlp->nlp_flag, 11519 ndlp->fc4_xpt_flags); 11520 11521 if (ulp_status == IOSTAT_SUCCESS) { 11522 spin_lock_irq(shost->host_lock); 11523 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11524 vport->fc_flag &= ~FC_FABRIC; 11525 spin_unlock_irq(shost->host_lock); 11526 lpfc_can_disctmo(vport); 11527 } 11528 11529 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11530 /* Wake up lpfc_vport_delete if waiting...*/ 11531 if (ndlp->logo_waitq) 11532 wake_up(ndlp->logo_waitq); 11533 spin_lock_irq(&ndlp->lock); 11534 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11535 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11536 spin_unlock_irq(&ndlp->lock); 11537 } 11538 11539 /* Safe to release resources now. */ 11540 lpfc_els_free_iocb(phba, cmdiocb); 11541 lpfc_nlp_put(ndlp); 11542 } 11543 11544 /** 11545 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11546 * @vport: pointer to a virtual N_Port data structure. 11547 * @ndlp: pointer to a node-list data structure. 11548 * 11549 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11550 * 11551 * Note that the ndlp reference count will be incremented by 1 for holding the 11552 * ndlp and the reference to ndlp will be stored into the ndlp field of 11553 * the IOCB for the completion callback function to the LOGO ELS command. 11554 * 11555 * Return codes 11556 * 0 - Successfully issued logo off the @vport 11557 * 1 - Failed to issue logo off the @vport 11558 **/ 11559 int 11560 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11561 { 11562 int rc = 0; 11563 struct lpfc_hba *phba = vport->phba; 11564 struct lpfc_iocbq *elsiocb; 11565 uint8_t *pcmd; 11566 uint16_t cmdsize; 11567 11568 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11569 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11570 ELS_CMD_LOGO); 11571 if (!elsiocb) 11572 return 1; 11573 11574 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11575 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11576 pcmd += sizeof(uint32_t); 11577 11578 /* Fill in LOGO payload */ 11579 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11580 pcmd += sizeof(uint32_t); 11581 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11582 11583 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11584 "Issue LOGO npiv did:x%x flg:x%x", 11585 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11586 11587 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11588 spin_lock_irq(&ndlp->lock); 11589 ndlp->nlp_flag |= NLP_LOGO_SND; 11590 spin_unlock_irq(&ndlp->lock); 11591 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11592 if (!elsiocb->ndlp) { 11593 lpfc_els_free_iocb(phba, elsiocb); 11594 goto err; 11595 } 11596 11597 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11598 if (rc == IOCB_ERROR) { 11599 lpfc_els_free_iocb(phba, elsiocb); 11600 lpfc_nlp_put(ndlp); 11601 goto err; 11602 } 11603 return 0; 11604 11605 err: 11606 spin_lock_irq(&ndlp->lock); 11607 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11608 spin_unlock_irq(&ndlp->lock); 11609 return 1; 11610 } 11611 11612 /** 11613 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11614 * @t: timer context used to obtain the lpfc hba. 11615 * 11616 * This routine is invoked by the fabric iocb block timer after 11617 * timeout. It posts the fabric iocb block timeout event by setting the 11618 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11619 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11620 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11621 * posted event WORKER_FABRIC_BLOCK_TMO. 11622 **/ 11623 void 11624 lpfc_fabric_block_timeout(struct timer_list *t) 11625 { 11626 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11627 unsigned long iflags; 11628 uint32_t tmo_posted; 11629 11630 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11631 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11632 if (!tmo_posted) 11633 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11634 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11635 11636 if (!tmo_posted) 11637 lpfc_worker_wake_up(phba); 11638 return; 11639 } 11640 11641 /** 11642 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11643 * @phba: pointer to lpfc hba data structure. 11644 * 11645 * This routine issues one fabric iocb from the driver internal list to 11646 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11647 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11648 * remove one pending fabric iocb from the driver internal list and invokes 11649 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11650 **/ 11651 static void 11652 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11653 { 11654 struct lpfc_iocbq *iocb; 11655 unsigned long iflags; 11656 int ret; 11657 11658 repeat: 11659 iocb = NULL; 11660 spin_lock_irqsave(&phba->hbalock, iflags); 11661 /* Post any pending iocb to the SLI layer */ 11662 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11663 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11664 list); 11665 if (iocb) 11666 /* Increment fabric iocb count to hold the position */ 11667 atomic_inc(&phba->fabric_iocb_count); 11668 } 11669 spin_unlock_irqrestore(&phba->hbalock, iflags); 11670 if (iocb) { 11671 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11672 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11673 iocb->cmd_flag |= LPFC_IO_FABRIC; 11674 11675 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11676 "Fabric sched1: ste:x%x", 11677 iocb->vport->port_state, 0, 0); 11678 11679 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11680 11681 if (ret == IOCB_ERROR) { 11682 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11683 iocb->fabric_cmd_cmpl = NULL; 11684 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11685 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11686 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11687 iocb->cmd_cmpl(phba, iocb, iocb); 11688 11689 atomic_dec(&phba->fabric_iocb_count); 11690 goto repeat; 11691 } 11692 } 11693 } 11694 11695 /** 11696 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11697 * @phba: pointer to lpfc hba data structure. 11698 * 11699 * This routine unblocks the issuing fabric iocb command. The function 11700 * will clear the fabric iocb block bit and then invoke the routine 11701 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11702 * from the driver internal fabric iocb list. 11703 **/ 11704 void 11705 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11706 { 11707 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11708 11709 lpfc_resume_fabric_iocbs(phba); 11710 return; 11711 } 11712 11713 /** 11714 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11715 * @phba: pointer to lpfc hba data structure. 11716 * 11717 * This routine blocks the issuing fabric iocb for a specified amount of 11718 * time (currently 100 ms). This is done by set the fabric iocb block bit 11719 * and set up a timeout timer for 100ms. When the block bit is set, no more 11720 * fabric iocb will be issued out of the HBA. 11721 **/ 11722 static void 11723 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11724 { 11725 int blocked; 11726 11727 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11728 /* Start a timer to unblock fabric iocbs after 100ms */ 11729 if (!blocked) 11730 mod_timer(&phba->fabric_block_timer, 11731 jiffies + msecs_to_jiffies(100)); 11732 11733 return; 11734 } 11735 11736 /** 11737 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11738 * @phba: pointer to lpfc hba data structure. 11739 * @cmdiocb: pointer to lpfc command iocb data structure. 11740 * @rspiocb: pointer to lpfc response iocb data structure. 11741 * 11742 * This routine is the callback function that is put to the fabric iocb's 11743 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11744 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11745 * function first restores and invokes the original iocb's callback function 11746 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11747 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11748 **/ 11749 static void 11750 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11751 struct lpfc_iocbq *rspiocb) 11752 { 11753 struct ls_rjt stat; 11754 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11755 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11756 11757 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11758 11759 switch (ulp_status) { 11760 case IOSTAT_NPORT_RJT: 11761 case IOSTAT_FABRIC_RJT: 11762 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11763 lpfc_block_fabric_iocbs(phba); 11764 break; 11765 11766 case IOSTAT_NPORT_BSY: 11767 case IOSTAT_FABRIC_BSY: 11768 lpfc_block_fabric_iocbs(phba); 11769 break; 11770 11771 case IOSTAT_LS_RJT: 11772 stat.un.ls_rjt_error_be = 11773 cpu_to_be32(ulp_word4); 11774 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11775 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11776 lpfc_block_fabric_iocbs(phba); 11777 break; 11778 } 11779 11780 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11781 11782 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11783 cmdiocb->fabric_cmd_cmpl = NULL; 11784 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11785 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11786 11787 atomic_dec(&phba->fabric_iocb_count); 11788 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11789 /* Post any pending iocbs to HBA */ 11790 lpfc_resume_fabric_iocbs(phba); 11791 } 11792 } 11793 11794 /** 11795 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11796 * @phba: pointer to lpfc hba data structure. 11797 * @iocb: pointer to lpfc command iocb data structure. 11798 * 11799 * This routine is used as the top-level API for issuing a fabric iocb command 11800 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11801 * function makes sure that only one fabric bound iocb will be outstanding at 11802 * any given time. As such, this function will first check to see whether there 11803 * is already an outstanding fabric iocb on the wire. If so, it will put the 11804 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11805 * issued later. Otherwise, it will issue the iocb on the wire and update the 11806 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11807 * 11808 * Note, this implementation has a potential sending out fabric IOCBs out of 11809 * order. The problem is caused by the construction of the "ready" boolen does 11810 * not include the condition that the internal fabric IOCB list is empty. As 11811 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11812 * ahead of the fabric IOCBs in the internal list. 11813 * 11814 * Return code 11815 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11816 * IOCB_ERROR - failed to issue fabric iocb 11817 **/ 11818 static int 11819 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11820 { 11821 unsigned long iflags; 11822 int ready; 11823 int ret; 11824 11825 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11826 11827 spin_lock_irqsave(&phba->hbalock, iflags); 11828 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11829 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11830 11831 if (ready) 11832 /* Increment fabric iocb count to hold the position */ 11833 atomic_inc(&phba->fabric_iocb_count); 11834 spin_unlock_irqrestore(&phba->hbalock, iflags); 11835 if (ready) { 11836 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11837 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11838 iocb->cmd_flag |= LPFC_IO_FABRIC; 11839 11840 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11841 "Fabric sched2: ste:x%x", 11842 iocb->vport->port_state, 0, 0); 11843 11844 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11845 11846 if (ret == IOCB_ERROR) { 11847 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11848 iocb->fabric_cmd_cmpl = NULL; 11849 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11850 atomic_dec(&phba->fabric_iocb_count); 11851 } 11852 } else { 11853 spin_lock_irqsave(&phba->hbalock, iflags); 11854 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11855 spin_unlock_irqrestore(&phba->hbalock, iflags); 11856 ret = IOCB_SUCCESS; 11857 } 11858 return ret; 11859 } 11860 11861 /** 11862 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11863 * @vport: pointer to a virtual N_Port data structure. 11864 * 11865 * This routine aborts all the IOCBs associated with a @vport from the 11866 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11867 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11868 * list, removes each IOCB associated with the @vport off the list, set the 11869 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11870 * associated with the IOCB. 11871 **/ 11872 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11873 { 11874 LIST_HEAD(completions); 11875 struct lpfc_hba *phba = vport->phba; 11876 struct lpfc_iocbq *tmp_iocb, *piocb; 11877 11878 spin_lock_irq(&phba->hbalock); 11879 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11880 list) { 11881 11882 if (piocb->vport != vport) 11883 continue; 11884 11885 list_move_tail(&piocb->list, &completions); 11886 } 11887 spin_unlock_irq(&phba->hbalock); 11888 11889 /* Cancel all the IOCBs from the completions list */ 11890 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11891 IOERR_SLI_ABORTED); 11892 } 11893 11894 /** 11895 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11896 * @ndlp: pointer to a node-list data structure. 11897 * 11898 * This routine aborts all the IOCBs associated with an @ndlp from the 11899 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11900 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11901 * list, removes each IOCB associated with the @ndlp off the list, set the 11902 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11903 * associated with the IOCB. 11904 **/ 11905 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11906 { 11907 LIST_HEAD(completions); 11908 struct lpfc_hba *phba = ndlp->phba; 11909 struct lpfc_iocbq *tmp_iocb, *piocb; 11910 struct lpfc_sli_ring *pring; 11911 11912 pring = lpfc_phba_elsring(phba); 11913 11914 if (unlikely(!pring)) 11915 return; 11916 11917 spin_lock_irq(&phba->hbalock); 11918 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11919 list) { 11920 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11921 11922 list_move_tail(&piocb->list, &completions); 11923 } 11924 } 11925 spin_unlock_irq(&phba->hbalock); 11926 11927 /* Cancel all the IOCBs from the completions list */ 11928 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11929 IOERR_SLI_ABORTED); 11930 } 11931 11932 /** 11933 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11934 * @phba: pointer to lpfc hba data structure. 11935 * 11936 * This routine aborts all the IOCBs currently on the driver internal 11937 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11938 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11939 * list, removes IOCBs off the list, set the status field to 11940 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11941 * the IOCB. 11942 **/ 11943 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11944 { 11945 LIST_HEAD(completions); 11946 11947 spin_lock_irq(&phba->hbalock); 11948 list_splice_init(&phba->fabric_iocb_list, &completions); 11949 spin_unlock_irq(&phba->hbalock); 11950 11951 /* Cancel all the IOCBs from the completions list */ 11952 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11953 IOERR_SLI_ABORTED); 11954 } 11955 11956 /** 11957 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11958 * @vport: pointer to lpfc vport data structure. 11959 * 11960 * This routine is invoked by the vport cleanup for deletions and the cleanup 11961 * for an ndlp on removal. 11962 **/ 11963 void 11964 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11965 { 11966 struct lpfc_hba *phba = vport->phba; 11967 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11968 struct lpfc_nodelist *ndlp = NULL; 11969 unsigned long iflag = 0; 11970 11971 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11972 list_for_each_entry_safe(sglq_entry, sglq_next, 11973 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11974 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11975 lpfc_nlp_put(sglq_entry->ndlp); 11976 ndlp = sglq_entry->ndlp; 11977 sglq_entry->ndlp = NULL; 11978 11979 /* If the xri on the abts_els_sgl list is for the Fport 11980 * node and the vport is unloading, the xri aborted wcqe 11981 * likely isn't coming back. Just release the sgl. 11982 */ 11983 if ((vport->load_flag & FC_UNLOADING) && 11984 ndlp->nlp_DID == Fabric_DID) { 11985 list_del(&sglq_entry->list); 11986 sglq_entry->state = SGL_FREED; 11987 list_add_tail(&sglq_entry->list, 11988 &phba->sli4_hba.lpfc_els_sgl_list); 11989 } 11990 } 11991 } 11992 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11993 return; 11994 } 11995 11996 /** 11997 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11998 * @phba: pointer to lpfc hba data structure. 11999 * @axri: pointer to the els xri abort wcqe structure. 12000 * 12001 * This routine is invoked by the worker thread to process a SLI4 slow-path 12002 * ELS aborted xri. 12003 **/ 12004 void 12005 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12006 struct sli4_wcqe_xri_aborted *axri) 12007 { 12008 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12009 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12010 uint16_t lxri = 0; 12011 12012 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12013 unsigned long iflag = 0; 12014 struct lpfc_nodelist *ndlp; 12015 struct lpfc_sli_ring *pring; 12016 12017 pring = lpfc_phba_elsring(phba); 12018 12019 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12020 list_for_each_entry_safe(sglq_entry, sglq_next, 12021 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12022 if (sglq_entry->sli4_xritag == xri) { 12023 list_del(&sglq_entry->list); 12024 ndlp = sglq_entry->ndlp; 12025 sglq_entry->ndlp = NULL; 12026 list_add_tail(&sglq_entry->list, 12027 &phba->sli4_hba.lpfc_els_sgl_list); 12028 sglq_entry->state = SGL_FREED; 12029 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12030 iflag); 12031 12032 if (ndlp) { 12033 lpfc_set_rrq_active(phba, ndlp, 12034 sglq_entry->sli4_lxritag, 12035 rxid, 1); 12036 lpfc_nlp_put(ndlp); 12037 } 12038 12039 /* Check if TXQ queue needs to be serviced */ 12040 if (pring && !list_empty(&pring->txq)) 12041 lpfc_worker_wake_up(phba); 12042 return; 12043 } 12044 } 12045 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12046 lxri = lpfc_sli4_xri_inrange(phba, xri); 12047 if (lxri == NO_XRI) 12048 return; 12049 12050 spin_lock_irqsave(&phba->hbalock, iflag); 12051 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12052 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12053 spin_unlock_irqrestore(&phba->hbalock, iflag); 12054 return; 12055 } 12056 sglq_entry->state = SGL_XRI_ABORTED; 12057 spin_unlock_irqrestore(&phba->hbalock, iflag); 12058 return; 12059 } 12060 12061 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12062 * @vport: pointer to virtual port object. 12063 * @ndlp: nodelist pointer for the impacted node. 12064 * 12065 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12066 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12067 * the driver is required to send a LOGO to the remote node before it 12068 * attempts to recover its login to the remote node. 12069 */ 12070 void 12071 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12072 struct lpfc_nodelist *ndlp) 12073 { 12074 struct Scsi_Host *shost; 12075 struct lpfc_hba *phba; 12076 unsigned long flags = 0; 12077 12078 shost = lpfc_shost_from_vport(vport); 12079 phba = vport->phba; 12080 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12081 lpfc_printf_log(phba, KERN_INFO, 12082 LOG_SLI, "3093 No rport recovery needed. " 12083 "rport in state 0x%x\n", ndlp->nlp_state); 12084 return; 12085 } 12086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12087 "3094 Start rport recovery on shost id 0x%x " 12088 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12089 "flags 0x%x\n", 12090 shost->host_no, ndlp->nlp_DID, 12091 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12092 ndlp->nlp_flag); 12093 /* 12094 * The rport is not responding. Remove the FCP-2 flag to prevent 12095 * an ADISC in the follow-up recovery code. 12096 */ 12097 spin_lock_irqsave(&ndlp->lock, flags); 12098 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12099 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12100 spin_unlock_irqrestore(&ndlp->lock, flags); 12101 lpfc_unreg_rpi(vport, ndlp); 12102 } 12103 12104 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12105 { 12106 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12107 } 12108 12109 static void 12110 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12111 { 12112 u32 i; 12113 12114 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12115 return; 12116 12117 for (i = min; i <= max; i++) 12118 set_bit(i, vport->vmid_priority_range); 12119 } 12120 12121 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12122 { 12123 set_bit(ctcl_vmid, vport->vmid_priority_range); 12124 } 12125 12126 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12127 { 12128 u32 i; 12129 12130 i = find_first_bit(vport->vmid_priority_range, 12131 LPFC_VMID_MAX_PRIORITY_RANGE); 12132 12133 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12134 return 0; 12135 12136 clear_bit(i, vport->vmid_priority_range); 12137 return i; 12138 } 12139 12140 #define MAX_PRIORITY_DESC 255 12141 12142 static void 12143 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12144 struct lpfc_iocbq *rspiocb) 12145 { 12146 struct lpfc_vport *vport = cmdiocb->vport; 12147 struct priority_range_desc *desc; 12148 struct lpfc_dmabuf *prsp = NULL; 12149 struct lpfc_vmid_priority_range *vmid_range = NULL; 12150 u32 *data; 12151 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12152 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12153 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12154 u8 *pcmd, max_desc; 12155 u32 len, i; 12156 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12157 12158 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12159 if (!prsp) 12160 goto out; 12161 12162 pcmd = prsp->virt; 12163 data = (u32 *)pcmd; 12164 if (data[0] == ELS_CMD_LS_RJT) { 12165 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12166 "3277 QFPA LS_RJT x%x x%x\n", 12167 data[0], data[1]); 12168 goto out; 12169 } 12170 if (ulp_status) { 12171 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12172 "6529 QFPA failed with status x%x x%x\n", 12173 ulp_status, ulp_word4); 12174 goto out; 12175 } 12176 12177 if (!vport->qfpa_res) { 12178 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12179 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12180 GFP_KERNEL); 12181 if (!vport->qfpa_res) 12182 goto out; 12183 } 12184 12185 len = *((u32 *)(pcmd + 4)); 12186 len = be32_to_cpu(len); 12187 memcpy(vport->qfpa_res, pcmd, len + 8); 12188 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12189 12190 desc = (struct priority_range_desc *)(pcmd + 8); 12191 vmid_range = vport->vmid_priority.vmid_range; 12192 if (!vmid_range) { 12193 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12194 GFP_KERNEL); 12195 if (!vmid_range) { 12196 kfree(vport->qfpa_res); 12197 goto out; 12198 } 12199 vport->vmid_priority.vmid_range = vmid_range; 12200 } 12201 vport->vmid_priority.num_descriptors = len; 12202 12203 for (i = 0; i < len; i++, vmid_range++, desc++) { 12204 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12205 "6539 vmid values low=%d, high=%d, qos=%d, " 12206 "local ve id=%d\n", desc->lo_range, 12207 desc->hi_range, desc->qos_priority, 12208 desc->local_ve_id); 12209 12210 vmid_range->low = desc->lo_range << 1; 12211 if (desc->local_ve_id == QFPA_ODD_ONLY) 12212 vmid_range->low++; 12213 if (desc->qos_priority) 12214 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12215 vmid_range->qos = desc->qos_priority; 12216 12217 vmid_range->high = desc->hi_range << 1; 12218 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12219 (desc->local_ve_id == QFPA_EVEN_ODD)) 12220 vmid_range->high++; 12221 } 12222 lpfc_init_cs_ctl_bitmap(vport); 12223 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12224 lpfc_vmid_set_cs_ctl_range(vport, 12225 vport->vmid_priority.vmid_range[i].low, 12226 vport->vmid_priority.vmid_range[i].high); 12227 } 12228 12229 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12230 out: 12231 lpfc_els_free_iocb(phba, cmdiocb); 12232 lpfc_nlp_put(ndlp); 12233 } 12234 12235 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12236 { 12237 struct lpfc_hba *phba = vport->phba; 12238 struct lpfc_nodelist *ndlp; 12239 struct lpfc_iocbq *elsiocb; 12240 u8 *pcmd; 12241 int ret; 12242 12243 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12244 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12245 return -ENXIO; 12246 12247 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12248 ndlp->nlp_DID, ELS_CMD_QFPA); 12249 if (!elsiocb) 12250 return -ENOMEM; 12251 12252 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12253 12254 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12255 pcmd += 4; 12256 12257 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12258 12259 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12260 if (!elsiocb->ndlp) { 12261 lpfc_els_free_iocb(vport->phba, elsiocb); 12262 return -ENXIO; 12263 } 12264 12265 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12266 if (ret != IOCB_SUCCESS) { 12267 lpfc_els_free_iocb(phba, elsiocb); 12268 lpfc_nlp_put(ndlp); 12269 return -EIO; 12270 } 12271 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12272 return 0; 12273 } 12274 12275 int 12276 lpfc_vmid_uvem(struct lpfc_vport *vport, 12277 struct lpfc_vmid *vmid, bool instantiated) 12278 { 12279 struct lpfc_vem_id_desc *vem_id_desc; 12280 struct lpfc_nodelist *ndlp; 12281 struct lpfc_iocbq *elsiocb; 12282 struct instantiated_ve_desc *inst_desc; 12283 struct lpfc_vmid_context *vmid_context; 12284 u8 *pcmd; 12285 u32 *len; 12286 int ret = 0; 12287 12288 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12289 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12290 return -ENXIO; 12291 12292 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12293 if (!vmid_context) 12294 return -ENOMEM; 12295 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12296 ndlp, Fabric_DID, ELS_CMD_UVEM); 12297 if (!elsiocb) 12298 goto out; 12299 12300 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12301 "3427 Host vmid %s %d\n", 12302 vmid->host_vmid, instantiated); 12303 vmid_context->vmp = vmid; 12304 vmid_context->nlp = ndlp; 12305 vmid_context->instantiated = instantiated; 12306 elsiocb->vmid_tag.vmid_context = vmid_context; 12307 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12308 12309 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12310 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12311 LPFC_COMPRESS_VMID_SIZE); 12312 12313 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12314 len = (u32 *)(pcmd + 4); 12315 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12316 12317 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12318 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12319 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12320 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12321 LPFC_COMPRESS_VMID_SIZE); 12322 12323 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12324 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12325 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12326 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12327 LPFC_COMPRESS_VMID_SIZE); 12328 12329 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12330 bf_set(lpfc_instantiated_local_id, inst_desc, 12331 vmid->un.cs_ctl_vmid); 12332 if (instantiated) { 12333 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12334 } else { 12335 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12336 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12337 } 12338 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12339 12340 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12341 12342 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12343 if (!elsiocb->ndlp) { 12344 lpfc_els_free_iocb(vport->phba, elsiocb); 12345 goto out; 12346 } 12347 12348 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12349 if (ret != IOCB_SUCCESS) { 12350 lpfc_els_free_iocb(vport->phba, elsiocb); 12351 lpfc_nlp_put(ndlp); 12352 goto out; 12353 } 12354 12355 return 0; 12356 out: 12357 kfree(vmid_context); 12358 return -EIO; 12359 } 12360 12361 static void 12362 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12363 struct lpfc_iocbq *rspiocb) 12364 { 12365 struct lpfc_vport *vport = icmdiocb->vport; 12366 struct lpfc_dmabuf *prsp = NULL; 12367 struct lpfc_vmid_context *vmid_context = 12368 icmdiocb->vmid_tag.vmid_context; 12369 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12370 u8 *pcmd; 12371 u32 *data; 12372 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12373 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12374 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12375 struct lpfc_vmid *vmid; 12376 12377 vmid = vmid_context->vmp; 12378 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12379 ndlp = NULL; 12380 12381 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12382 if (!prsp) 12383 goto out; 12384 pcmd = prsp->virt; 12385 data = (u32 *)pcmd; 12386 if (data[0] == ELS_CMD_LS_RJT) { 12387 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12388 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12389 goto out; 12390 } 12391 if (ulp_status) { 12392 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12393 "4533 UVEM error status %x: %x\n", 12394 ulp_status, ulp_word4); 12395 goto out; 12396 } 12397 spin_lock(&phba->hbalock); 12398 /* Set IN USE flag */ 12399 vport->vmid_flag |= LPFC_VMID_IN_USE; 12400 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12401 spin_unlock(&phba->hbalock); 12402 12403 if (vmid_context->instantiated) { 12404 write_lock(&vport->vmid_lock); 12405 vmid->flag |= LPFC_VMID_REGISTERED; 12406 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12407 write_unlock(&vport->vmid_lock); 12408 } 12409 12410 out: 12411 kfree(vmid_context); 12412 lpfc_els_free_iocb(phba, icmdiocb); 12413 lpfc_nlp_put(ndlp); 12414 } 12415