1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 bool flogi_in_retry = false; 956 957 /* Check to see if link went down during discovery */ 958 if (lpfc_els_chk_latt(vport)) { 959 /* One additional decrement on node reference count to 960 * trigger the release of the node 961 */ 962 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 963 lpfc_nlp_put(ndlp); 964 goto out; 965 } 966 967 ulp_status = get_job_ulpstatus(phba, rspiocb); 968 ulp_word4 = get_job_word4(phba, rspiocb); 969 970 if (phba->sli_rev == LPFC_SLI_REV4) { 971 tmo = get_wqe_tmo(cmdiocb); 972 } else { 973 irsp = &rspiocb->iocb; 974 tmo = irsp->ulpTimeout; 975 } 976 977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 978 "FLOGI cmpl: status:x%x/x%x state:x%x", 979 ulp_status, ulp_word4, 980 vport->port_state); 981 982 if (ulp_status) { 983 /* 984 * In case of FIP mode, perform roundrobin FCF failover 985 * due to new FCF discovery 986 */ 987 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 988 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 989 if (phba->link_state < LPFC_LINK_UP) 990 goto stop_rr_fcf_flogi; 991 if ((phba->fcoe_cvl_eventtag_attn == 992 phba->fcoe_cvl_eventtag) && 993 (ulp_status == IOSTAT_LOCAL_REJECT) && 994 ((ulp_word4 & IOERR_PARAM_MASK) == 995 IOERR_SLI_ABORTED)) 996 goto stop_rr_fcf_flogi; 997 else 998 phba->fcoe_cvl_eventtag_attn = 999 phba->fcoe_cvl_eventtag; 1000 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1001 "2611 FLOGI failed on FCF (x%x), " 1002 "status:x%x/x%x, tmo:x%x, perform " 1003 "roundrobin FCF failover\n", 1004 phba->fcf.current_rec.fcf_indx, 1005 ulp_status, ulp_word4, tmo); 1006 lpfc_sli4_set_fcf_flogi_fail(phba, 1007 phba->fcf.current_rec.fcf_indx); 1008 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1009 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1010 if (rc) 1011 goto out; 1012 } 1013 1014 stop_rr_fcf_flogi: 1015 /* FLOGI failure */ 1016 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1017 ((ulp_word4 & IOERR_PARAM_MASK) == 1018 IOERR_LOOP_OPEN_FAILURE))) 1019 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1020 "2858 FLOGI failure Status:x%x/x%x TMO" 1021 ":x%x Data x%x x%x\n", 1022 ulp_status, ulp_word4, tmo, 1023 phba->hba_flag, phba->fcf.fcf_flag); 1024 1025 /* Check for retry */ 1026 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1027 /* Address a timing race with dev_loss. If dev_loss 1028 * is active on this FPort node, put the initial ref 1029 * count back to stop premature node release actions. 1030 */ 1031 lpfc_check_nlp_post_devloss(vport, ndlp); 1032 flogi_in_retry = true; 1033 goto out; 1034 } 1035 1036 /* The FLOGI will not be retried. If the FPort node is not 1037 * registered with the SCSI transport, remove the initial 1038 * reference to trigger node release. 1039 */ 1040 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1041 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1042 lpfc_nlp_put(ndlp); 1043 1044 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1045 "0150 FLOGI failure Status:x%x/x%x " 1046 "xri x%x TMO:x%x refcnt %d\n", 1047 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1048 tmo, kref_read(&ndlp->kref)); 1049 1050 /* If this is not a loop open failure, bail out */ 1051 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1052 ((ulp_word4 & IOERR_PARAM_MASK) == 1053 IOERR_LOOP_OPEN_FAILURE))) { 1054 /* FLOGI failure */ 1055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1056 "0100 FLOGI failure Status:x%x/x%x " 1057 "TMO:x%x\n", 1058 ulp_status, ulp_word4, tmo); 1059 goto flogifail; 1060 } 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1065 FC_PT2PT_NO_NVME); 1066 spin_unlock_irq(shost->host_lock); 1067 1068 /* If private loop, then allow max outstanding els to be 1069 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1070 * alpa map would take too long otherwise. 1071 */ 1072 if (phba->alpa_map[0] == 0) 1073 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1074 if ((phba->sli_rev == LPFC_SLI_REV4) && 1075 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1076 (vport->fc_prevDID != vport->fc_myDID) || 1077 phba->fc_topology_changed)) { 1078 if (vport->fc_flag & FC_VFI_REGISTERED) { 1079 if (phba->fc_topology_changed) { 1080 lpfc_unregister_fcf_prep(phba); 1081 spin_lock_irq(shost->host_lock); 1082 vport->fc_flag &= ~FC_VFI_REGISTERED; 1083 spin_unlock_irq(shost->host_lock); 1084 phba->fc_topology_changed = 0; 1085 } else { 1086 lpfc_sli4_unreg_all_rpis(vport); 1087 } 1088 } 1089 1090 /* Do not register VFI if the driver aborted FLOGI */ 1091 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1092 lpfc_issue_reg_vfi(vport); 1093 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLOGI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x " 1115 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 ulp_word4, sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag, 1120 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1121 1122 /* reinitialize the VMID datastructure before returning */ 1123 if (lpfc_is_vmid_enabled(phba)) 1124 lpfc_reinit_vmid(vport); 1125 if (sp->cmn.priority_tagging) 1126 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1127 LPFC_VMID_TYPE_PRIO); 1128 1129 /* 1130 * Address a timing race with dev_loss. If dev_loss is active on 1131 * this FPort node, put the initial ref count back to stop premature 1132 * node release actions. 1133 */ 1134 lpfc_check_nlp_post_devloss(vport, ndlp); 1135 if (vport->port_state == LPFC_FLOGI) { 1136 /* 1137 * If Common Service Parameters indicate Nport 1138 * we are point to point, if Fport we are Fabric. 1139 */ 1140 if (sp->cmn.fPort) 1141 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1142 ulp_word4); 1143 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1144 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1145 else { 1146 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1147 "2831 FLOGI response with cleared Fabric " 1148 "bit fcf_index 0x%x " 1149 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1150 "Fabric Name " 1151 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1152 phba->fcf.current_rec.fcf_indx, 1153 phba->fcf.current_rec.switch_name[0], 1154 phba->fcf.current_rec.switch_name[1], 1155 phba->fcf.current_rec.switch_name[2], 1156 phba->fcf.current_rec.switch_name[3], 1157 phba->fcf.current_rec.switch_name[4], 1158 phba->fcf.current_rec.switch_name[5], 1159 phba->fcf.current_rec.switch_name[6], 1160 phba->fcf.current_rec.switch_name[7], 1161 phba->fcf.current_rec.fabric_name[0], 1162 phba->fcf.current_rec.fabric_name[1], 1163 phba->fcf.current_rec.fabric_name[2], 1164 phba->fcf.current_rec.fabric_name[3], 1165 phba->fcf.current_rec.fabric_name[4], 1166 phba->fcf.current_rec.fabric_name[5], 1167 phba->fcf.current_rec.fabric_name[6], 1168 phba->fcf.current_rec.fabric_name[7]); 1169 1170 lpfc_nlp_put(ndlp); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 if (!rc) { 1179 /* Mark the FCF discovery process done */ 1180 if (phba->hba_flag & HBA_FIP_SUPPORT) 1181 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1182 LOG_ELS, 1183 "2769 FLOGI to FCF (x%x) " 1184 "completed successfully\n", 1185 phba->fcf.current_rec.fcf_indx); 1186 spin_lock_irq(&phba->hbalock); 1187 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1188 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1189 spin_unlock_irq(&phba->hbalock); 1190 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1191 goto out; 1192 } 1193 } else if (vport->port_state > LPFC_FLOGI && 1194 vport->fc_flag & FC_PT2PT) { 1195 /* 1196 * In a p2p topology, it is possible that discovery has 1197 * already progressed, and this completion can be ignored. 1198 * Recheck the indicated topology. 1199 */ 1200 if (!sp->cmn.fPort) 1201 goto out; 1202 } 1203 1204 flogifail: 1205 spin_lock_irq(&phba->hbalock); 1206 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1207 spin_unlock_irq(&phba->hbalock); 1208 1209 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1210 /* FLOGI failed, so just use loop map to make discovery list */ 1211 lpfc_disc_list_loopmap(vport); 1212 1213 /* Start discovery */ 1214 lpfc_disc_start(vport); 1215 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1216 (((ulp_word4 & IOERR_PARAM_MASK) != 1217 IOERR_SLI_ABORTED) && 1218 ((ulp_word4 & IOERR_PARAM_MASK) != 1219 IOERR_SLI_DOWN))) && 1220 (phba->link_state != LPFC_CLEAR_LA)) { 1221 /* If FLOGI failed enable link interrupt. */ 1222 lpfc_issue_clear_la(phba, vport); 1223 } 1224 out: 1225 if (!flogi_in_retry) 1226 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1227 1228 lpfc_els_free_iocb(phba, cmdiocb); 1229 lpfc_nlp_put(ndlp); 1230 } 1231 1232 /** 1233 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1234 * aborted during a link down 1235 * @phba: pointer to lpfc hba data structure. 1236 * @cmdiocb: pointer to lpfc command iocb data structure. 1237 * @rspiocb: pointer to lpfc response iocb data structure. 1238 * 1239 */ 1240 static void 1241 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1242 struct lpfc_iocbq *rspiocb) 1243 { 1244 uint32_t *pcmd; 1245 uint32_t cmd; 1246 u32 ulp_status, ulp_word4; 1247 1248 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1249 cmd = *pcmd; 1250 1251 ulp_status = get_job_ulpstatus(phba, rspiocb); 1252 ulp_word4 = get_job_word4(phba, rspiocb); 1253 1254 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1255 "6445 ELS completes after LINK_DOWN: " 1256 " Status %x/%x cmd x%x flg x%x\n", 1257 ulp_status, ulp_word4, cmd, 1258 cmdiocb->cmd_flag); 1259 1260 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1261 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1262 atomic_dec(&phba->fabric_iocb_count); 1263 } 1264 lpfc_els_free_iocb(phba, cmdiocb); 1265 } 1266 1267 /** 1268 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1269 * @vport: pointer to a host virtual N_Port data structure. 1270 * @ndlp: pointer to a node-list data structure. 1271 * @retry: number of retries to the command IOCB. 1272 * 1273 * This routine issues a Fabric Login (FLOGI) Request ELS command 1274 * for a @vport. The initiator service parameters are put into the payload 1275 * of the FLOGI Request IOCB and the top-level callback function pointer 1276 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1277 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1278 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1279 * 1280 * Note that the ndlp reference count will be incremented by 1 for holding the 1281 * ndlp and the reference to ndlp will be stored into the ndlp field of 1282 * the IOCB for the completion callback function to the FLOGI ELS command. 1283 * 1284 * Return code 1285 * 0 - successfully issued flogi iocb for @vport 1286 * 1 - failed to issue flogi iocb for @vport 1287 **/ 1288 static int 1289 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1290 uint8_t retry) 1291 { 1292 struct lpfc_hba *phba = vport->phba; 1293 struct serv_parm *sp; 1294 union lpfc_wqe128 *wqe = NULL; 1295 IOCB_t *icmd = NULL; 1296 struct lpfc_iocbq *elsiocb; 1297 struct lpfc_iocbq defer_flogi_acc; 1298 u8 *pcmd, ct; 1299 uint16_t cmdsize; 1300 uint32_t tmo, did; 1301 int rc; 1302 1303 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1304 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1305 ndlp->nlp_DID, ELS_CMD_FLOGI); 1306 1307 if (!elsiocb) 1308 return 1; 1309 1310 wqe = &elsiocb->wqe; 1311 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1312 icmd = &elsiocb->iocb; 1313 1314 /* For FLOGI request, remainder of payload is service parameters */ 1315 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1316 pcmd += sizeof(uint32_t); 1317 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1318 sp = (struct serv_parm *) pcmd; 1319 1320 /* Setup CSPs accordingly for Fabric */ 1321 sp->cmn.e_d_tov = 0; 1322 sp->cmn.w2.r_a_tov = 0; 1323 sp->cmn.virtual_fabric_support = 0; 1324 sp->cls1.classValid = 0; 1325 if (sp->cmn.fcphLow < FC_PH3) 1326 sp->cmn.fcphLow = FC_PH3; 1327 if (sp->cmn.fcphHigh < FC_PH3) 1328 sp->cmn.fcphHigh = FC_PH3; 1329 1330 /* Determine if switch supports priority tagging */ 1331 if (phba->cfg_vmid_priority_tagging) { 1332 sp->cmn.priority_tagging = 1; 1333 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1334 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 1335 sizeof(vport->lpfc_vmid_host_uuid))) { 1336 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1337 sizeof(phba->wwpn)); 1338 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1339 sizeof(phba->wwnn)); 1340 } 1341 } 1342 1343 if (phba->sli_rev == LPFC_SLI_REV4) { 1344 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1345 LPFC_SLI_INTF_IF_TYPE_0) { 1346 /* FLOGI needs to be 3 for WQE FCFI */ 1347 ct = SLI4_CT_FCFI; 1348 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1349 1350 /* Set the fcfi to the fcfi we registered with */ 1351 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1352 phba->fcf.fcfi); 1353 } 1354 1355 /* Can't do SLI4 class2 without support sequence coalescing */ 1356 sp->cls2.classValid = 0; 1357 sp->cls2.seqDelivery = 0; 1358 } else { 1359 /* Historical, setting sequential-delivery bit for SLI3 */ 1360 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1361 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1362 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1363 sp->cmn.request_multiple_Nport = 1; 1364 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1365 icmd->ulpCt_h = 1; 1366 icmd->ulpCt_l = 0; 1367 } else { 1368 sp->cmn.request_multiple_Nport = 0; 1369 } 1370 1371 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1372 icmd->un.elsreq64.myID = 0; 1373 icmd->un.elsreq64.fl = 1; 1374 } 1375 } 1376 1377 tmo = phba->fc_ratov; 1378 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1379 lpfc_set_disctmo(vport); 1380 phba->fc_ratov = tmo; 1381 1382 phba->fc_stat.elsXmitFLOGI++; 1383 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1384 1385 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1386 "Issue FLOGI: opt:x%x", 1387 phba->sli3_options, 0, 0); 1388 1389 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1390 if (!elsiocb->ndlp) { 1391 lpfc_els_free_iocb(phba, elsiocb); 1392 return 1; 1393 } 1394 1395 /* Avoid race with FLOGI completion and hba_flags. */ 1396 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1397 1398 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1399 if (rc == IOCB_ERROR) { 1400 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1401 lpfc_els_free_iocb(phba, elsiocb); 1402 lpfc_nlp_put(ndlp); 1403 return 1; 1404 } 1405 1406 /* Clear external loopback plug detected flag */ 1407 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1408 1409 /* Check for a deferred FLOGI ACC condition */ 1410 if (phba->defer_flogi_acc_flag) { 1411 /* lookup ndlp for received FLOGI */ 1412 ndlp = lpfc_findnode_did(vport, 0); 1413 if (!ndlp) 1414 return 0; 1415 1416 did = vport->fc_myDID; 1417 vport->fc_myDID = Fabric_DID; 1418 1419 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1420 1421 if (phba->sli_rev == LPFC_SLI_REV4) { 1422 bf_set(wqe_ctxt_tag, 1423 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1424 phba->defer_flogi_acc_rx_id); 1425 bf_set(wqe_rcvoxid, 1426 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1427 phba->defer_flogi_acc_ox_id); 1428 } else { 1429 icmd = &defer_flogi_acc.iocb; 1430 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1431 icmd->unsli3.rcvsli3.ox_id = 1432 phba->defer_flogi_acc_ox_id; 1433 } 1434 1435 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1436 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1437 " ox_id: x%x, hba_flag x%x\n", 1438 phba->defer_flogi_acc_rx_id, 1439 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1440 1441 /* Send deferred FLOGI ACC */ 1442 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1443 ndlp, NULL); 1444 1445 phba->defer_flogi_acc_flag = false; 1446 vport->fc_myDID = did; 1447 1448 /* Decrement ndlp reference count to indicate the node can be 1449 * released when other references are removed. 1450 */ 1451 lpfc_nlp_put(ndlp); 1452 } 1453 1454 return 0; 1455 } 1456 1457 /** 1458 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1459 * @phba: pointer to lpfc hba data structure. 1460 * 1461 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1462 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1463 * list and issues an abort IOCB commond on each outstanding IOCB that 1464 * contains a active Fabric_DID ndlp. Note that this function is to issue 1465 * the abort IOCB command on all the outstanding IOCBs, thus when this 1466 * function returns, it does not guarantee all the IOCBs are actually aborted. 1467 * 1468 * Return code 1469 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1470 **/ 1471 int 1472 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1473 { 1474 struct lpfc_sli_ring *pring; 1475 struct lpfc_iocbq *iocb, *next_iocb; 1476 struct lpfc_nodelist *ndlp; 1477 u32 ulp_command; 1478 1479 /* Abort outstanding I/O on NPort <nlp_DID> */ 1480 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1481 "0201 Abort outstanding I/O on NPort x%x\n", 1482 Fabric_DID); 1483 1484 pring = lpfc_phba_elsring(phba); 1485 if (unlikely(!pring)) 1486 return -EIO; 1487 1488 /* 1489 * Check the txcmplq for an iocb that matches the nport the driver is 1490 * searching for. 1491 */ 1492 spin_lock_irq(&phba->hbalock); 1493 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1494 ulp_command = get_job_cmnd(phba, iocb); 1495 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1496 ndlp = iocb->ndlp; 1497 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1498 if ((phba->pport->fc_flag & FC_PT2PT) && 1499 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1500 iocb->fabric_cmd_cmpl = 1501 lpfc_ignore_els_cmpl; 1502 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1503 NULL); 1504 } 1505 } 1506 } 1507 /* Make sure HBA is alive */ 1508 lpfc_issue_hb_tmo(phba); 1509 1510 spin_unlock_irq(&phba->hbalock); 1511 1512 return 0; 1513 } 1514 1515 /** 1516 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1517 * @vport: pointer to a host virtual N_Port data structure. 1518 * 1519 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1520 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1521 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1522 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1523 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1524 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1525 * @vport. 1526 * 1527 * Return code 1528 * 0 - failed to issue initial flogi for @vport 1529 * 1 - successfully issued initial flogi for @vport 1530 **/ 1531 int 1532 lpfc_initial_flogi(struct lpfc_vport *vport) 1533 { 1534 struct lpfc_nodelist *ndlp; 1535 1536 vport->port_state = LPFC_FLOGI; 1537 lpfc_set_disctmo(vport); 1538 1539 /* First look for the Fabric ndlp */ 1540 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1541 if (!ndlp) { 1542 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1543 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1544 if (!ndlp) 1545 return 0; 1546 /* Set the node type */ 1547 ndlp->nlp_type |= NLP_FABRIC; 1548 1549 /* Put ndlp onto node list */ 1550 lpfc_enqueue_node(vport, ndlp); 1551 } 1552 1553 /* Reset the Fabric flag, topology change may have happened */ 1554 vport->fc_flag &= ~FC_FABRIC; 1555 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1556 /* A node reference should be retained while registered with a 1557 * transport or dev-loss-evt work is pending. 1558 * Otherwise, decrement node reference to trigger release. 1559 */ 1560 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1561 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1562 lpfc_nlp_put(ndlp); 1563 return 0; 1564 } 1565 return 1; 1566 } 1567 1568 /** 1569 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1570 * @vport: pointer to a host virtual N_Port data structure. 1571 * 1572 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1573 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1574 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1575 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1576 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1577 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1578 * @vport. 1579 * 1580 * Return code 1581 * 0 - failed to issue initial fdisc for @vport 1582 * 1 - successfully issued initial fdisc for @vport 1583 **/ 1584 int 1585 lpfc_initial_fdisc(struct lpfc_vport *vport) 1586 { 1587 struct lpfc_nodelist *ndlp; 1588 1589 /* First look for the Fabric ndlp */ 1590 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1591 if (!ndlp) { 1592 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1593 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1594 if (!ndlp) 1595 return 0; 1596 1597 /* NPIV is only supported in Fabrics. */ 1598 ndlp->nlp_type |= NLP_FABRIC; 1599 1600 /* Put ndlp onto node list */ 1601 lpfc_enqueue_node(vport, ndlp); 1602 } 1603 1604 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1605 /* A node reference should be retained while registered with a 1606 * transport or dev-loss-evt work is pending. 1607 * Otherwise, decrement node reference to trigger release. 1608 */ 1609 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1610 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1611 lpfc_nlp_put(ndlp); 1612 return 0; 1613 } 1614 return 1; 1615 } 1616 1617 /** 1618 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1619 * @vport: pointer to a host virtual N_Port data structure. 1620 * 1621 * This routine checks whether there are more remaining Port Logins 1622 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1623 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1624 * to issue ELS PLOGIs up to the configured discover threads with the 1625 * @vport (@vport->cfg_discovery_threads). The function also decrement 1626 * the @vport's num_disc_node by 1 if it is not already 0. 1627 **/ 1628 void 1629 lpfc_more_plogi(struct lpfc_vport *vport) 1630 { 1631 if (vport->num_disc_nodes) 1632 vport->num_disc_nodes--; 1633 1634 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1635 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1636 "0232 Continue discovery with %d PLOGIs to go " 1637 "Data: x%x x%x x%x\n", 1638 vport->num_disc_nodes, vport->fc_plogi_cnt, 1639 vport->fc_flag, vport->port_state); 1640 /* Check to see if there are more PLOGIs to be sent */ 1641 if (vport->fc_flag & FC_NLP_MORE) 1642 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1643 lpfc_els_disc_plogi(vport); 1644 1645 return; 1646 } 1647 1648 /** 1649 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1650 * @phba: pointer to lpfc hba data structure. 1651 * @prsp: pointer to response IOCB payload. 1652 * @ndlp: pointer to a node-list data structure. 1653 * 1654 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1655 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1656 * The following cases are considered N_Port confirmed: 1657 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1658 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1659 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1660 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1661 * 1) if there is a node on vport list other than the @ndlp with the same 1662 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1663 * on that node to release the RPI associated with the node; 2) if there is 1664 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1665 * into, a new node shall be allocated (or activated). In either case, the 1666 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1667 * be released and the new_ndlp shall be put on to the vport node list and 1668 * its pointer returned as the confirmed node. 1669 * 1670 * Note that before the @ndlp got "released", the keepDID from not-matching 1671 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1672 * of the @ndlp. This is because the release of @ndlp is actually to put it 1673 * into an inactive state on the vport node list and the vport node list 1674 * management algorithm does not allow two node with a same DID. 1675 * 1676 * Return code 1677 * pointer to the PLOGI N_Port @ndlp 1678 **/ 1679 static struct lpfc_nodelist * 1680 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1681 struct lpfc_nodelist *ndlp) 1682 { 1683 struct lpfc_vport *vport = ndlp->vport; 1684 struct lpfc_nodelist *new_ndlp; 1685 struct serv_parm *sp; 1686 uint8_t name[sizeof(struct lpfc_name)]; 1687 uint32_t keepDID = 0, keep_nlp_flag = 0; 1688 uint32_t keep_new_nlp_flag = 0; 1689 uint16_t keep_nlp_state; 1690 u32 keep_nlp_fc4_type = 0; 1691 struct lpfc_nvme_rport *keep_nrport = NULL; 1692 unsigned long *active_rrqs_xri_bitmap = NULL; 1693 1694 /* Fabric nodes can have the same WWPN so we don't bother searching 1695 * by WWPN. Just return the ndlp that was given to us. 1696 */ 1697 if (ndlp->nlp_type & NLP_FABRIC) 1698 return ndlp; 1699 1700 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1701 memset(name, 0, sizeof(struct lpfc_name)); 1702 1703 /* Now we find out if the NPort we are logging into, matches the WWPN 1704 * we have for that ndlp. If not, we have some work to do. 1705 */ 1706 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1707 1708 /* return immediately if the WWPN matches ndlp */ 1709 if (!new_ndlp || (new_ndlp == ndlp)) 1710 return ndlp; 1711 1712 /* 1713 * Unregister from backend if not done yet. Could have been skipped 1714 * due to ADISC 1715 */ 1716 lpfc_nlp_unreg_node(vport, new_ndlp); 1717 1718 if (phba->sli_rev == LPFC_SLI_REV4) { 1719 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1720 GFP_KERNEL); 1721 if (active_rrqs_xri_bitmap) 1722 memset(active_rrqs_xri_bitmap, 0, 1723 phba->cfg_rrq_xri_bitmap_sz); 1724 } 1725 1726 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1727 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1728 "new_ndlp x%x x%x x%x\n", 1729 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1730 (new_ndlp ? new_ndlp->nlp_DID : 0), 1731 (new_ndlp ? new_ndlp->nlp_flag : 0), 1732 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1733 1734 keepDID = new_ndlp->nlp_DID; 1735 1736 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1737 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1738 phba->cfg_rrq_xri_bitmap_sz); 1739 1740 /* At this point in this routine, we know new_ndlp will be 1741 * returned. however, any previous GID_FTs that were done 1742 * would have updated nlp_fc4_type in ndlp, so we must ensure 1743 * new_ndlp has the right value. 1744 */ 1745 if (vport->fc_flag & FC_FABRIC) { 1746 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1747 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1748 } 1749 1750 lpfc_unreg_rpi(vport, new_ndlp); 1751 new_ndlp->nlp_DID = ndlp->nlp_DID; 1752 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1753 if (phba->sli_rev == LPFC_SLI_REV4) 1754 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1755 ndlp->active_rrqs_xri_bitmap, 1756 phba->cfg_rrq_xri_bitmap_sz); 1757 1758 /* Lock both ndlps */ 1759 spin_lock_irq(&ndlp->lock); 1760 spin_lock_irq(&new_ndlp->lock); 1761 keep_new_nlp_flag = new_ndlp->nlp_flag; 1762 keep_nlp_flag = ndlp->nlp_flag; 1763 new_ndlp->nlp_flag = ndlp->nlp_flag; 1764 1765 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1766 if (keep_new_nlp_flag & NLP_UNREG_INP) 1767 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1768 else 1769 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1770 1771 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1772 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1773 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1774 else 1775 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1776 1777 /* 1778 * Retain the DROPPED flag. This will take care of the init 1779 * refcount when affecting the state change 1780 */ 1781 if (keep_new_nlp_flag & NLP_DROPPED) 1782 new_ndlp->nlp_flag |= NLP_DROPPED; 1783 else 1784 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1785 1786 ndlp->nlp_flag = keep_new_nlp_flag; 1787 1788 /* if ndlp had NLP_UNREG_INP set, keep it */ 1789 if (keep_nlp_flag & NLP_UNREG_INP) 1790 ndlp->nlp_flag |= NLP_UNREG_INP; 1791 else 1792 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1793 1794 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1795 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1796 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1797 else 1798 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1799 1800 /* 1801 * Retain the DROPPED flag. This will take care of the init 1802 * refcount when affecting the state change 1803 */ 1804 if (keep_nlp_flag & NLP_DROPPED) 1805 ndlp->nlp_flag |= NLP_DROPPED; 1806 else 1807 ndlp->nlp_flag &= ~NLP_DROPPED; 1808 1809 spin_unlock_irq(&new_ndlp->lock); 1810 spin_unlock_irq(&ndlp->lock); 1811 1812 /* Set nlp_states accordingly */ 1813 keep_nlp_state = new_ndlp->nlp_state; 1814 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1815 1816 /* interchange the nvme remoteport structs */ 1817 keep_nrport = new_ndlp->nrport; 1818 new_ndlp->nrport = ndlp->nrport; 1819 1820 /* Move this back to NPR state */ 1821 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1822 /* The ndlp doesn't have a portname yet, but does have an 1823 * NPort ID. The new_ndlp portname matches the Rport's 1824 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1825 */ 1826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1827 "3179 PLOGI confirm NEW: %x %x\n", 1828 new_ndlp->nlp_DID, keepDID); 1829 1830 /* Two ndlps cannot have the same did on the nodelist. 1831 * The KeepDID and keep_nlp_fc4_type need to be swapped 1832 * because ndlp is inflight with no WWPN. 1833 */ 1834 ndlp->nlp_DID = keepDID; 1835 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1836 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1837 if (phba->sli_rev == LPFC_SLI_REV4 && 1838 active_rrqs_xri_bitmap) 1839 memcpy(ndlp->active_rrqs_xri_bitmap, 1840 active_rrqs_xri_bitmap, 1841 phba->cfg_rrq_xri_bitmap_sz); 1842 1843 } else { 1844 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1845 "3180 PLOGI confirm SWAP: %x %x\n", 1846 new_ndlp->nlp_DID, keepDID); 1847 1848 lpfc_unreg_rpi(vport, ndlp); 1849 1850 /* The ndlp and new_ndlp both have WWPNs but are swapping 1851 * NPort Ids and attributes. 1852 */ 1853 ndlp->nlp_DID = keepDID; 1854 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1855 1856 if (phba->sli_rev == LPFC_SLI_REV4 && 1857 active_rrqs_xri_bitmap) 1858 memcpy(ndlp->active_rrqs_xri_bitmap, 1859 active_rrqs_xri_bitmap, 1860 phba->cfg_rrq_xri_bitmap_sz); 1861 1862 /* Since we are switching over to the new_ndlp, 1863 * reset the old ndlp state 1864 */ 1865 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1866 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1867 keep_nlp_state = NLP_STE_NPR_NODE; 1868 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1869 ndlp->nrport = keep_nrport; 1870 } 1871 1872 /* 1873 * If ndlp is not associated with any rport we can drop it here else 1874 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1875 */ 1876 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1877 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1878 1879 if (phba->sli_rev == LPFC_SLI_REV4 && 1880 active_rrqs_xri_bitmap) 1881 mempool_free(active_rrqs_xri_bitmap, 1882 phba->active_rrq_pool); 1883 1884 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1885 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1886 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1887 new_ndlp->nlp_fc4_type); 1888 1889 return new_ndlp; 1890 } 1891 1892 /** 1893 * lpfc_end_rscn - Check and handle more rscn for a vport 1894 * @vport: pointer to a host virtual N_Port data structure. 1895 * 1896 * This routine checks whether more Registration State Change 1897 * Notifications (RSCNs) came in while the discovery state machine was in 1898 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1899 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1900 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1901 * handling the RSCNs. 1902 **/ 1903 void 1904 lpfc_end_rscn(struct lpfc_vport *vport) 1905 { 1906 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1907 1908 if (vport->fc_flag & FC_RSCN_MODE) { 1909 /* 1910 * Check to see if more RSCNs came in while we were 1911 * processing this one. 1912 */ 1913 if (vport->fc_rscn_id_cnt || 1914 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1915 lpfc_els_handle_rscn(vport); 1916 else { 1917 spin_lock_irq(shost->host_lock); 1918 vport->fc_flag &= ~FC_RSCN_MODE; 1919 spin_unlock_irq(shost->host_lock); 1920 } 1921 } 1922 } 1923 1924 /** 1925 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1926 * @phba: pointer to lpfc hba data structure. 1927 * @cmdiocb: pointer to lpfc command iocb data structure. 1928 * @rspiocb: pointer to lpfc response iocb data structure. 1929 * 1930 * This routine will call the clear rrq function to free the rrq and 1931 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1932 * exist then the clear_rrq is still called because the rrq needs to 1933 * be freed. 1934 **/ 1935 1936 static void 1937 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1938 struct lpfc_iocbq *rspiocb) 1939 { 1940 struct lpfc_vport *vport = cmdiocb->vport; 1941 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1942 struct lpfc_node_rrq *rrq; 1943 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1944 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1945 1946 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1947 rrq = cmdiocb->context_un.rrq; 1948 cmdiocb->rsp_iocb = rspiocb; 1949 1950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1951 "RRQ cmpl: status:x%x/x%x did:x%x", 1952 ulp_status, ulp_word4, 1953 get_job_els_rsp64_did(phba, cmdiocb)); 1954 1955 1956 /* rrq completes to NPort <nlp_DID> */ 1957 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1958 "2880 RRQ completes to DID x%x " 1959 "Data: x%x x%x x%x x%x x%x\n", 1960 ndlp->nlp_DID, ulp_status, ulp_word4, 1961 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1962 1963 if (ulp_status) { 1964 /* Check for retry */ 1965 /* RRQ failed Don't print the vport to vport rjts */ 1966 if (ulp_status != IOSTAT_LS_RJT || 1967 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1968 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1969 (phba)->pport->cfg_log_verbose & LOG_ELS) 1970 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1971 "2881 RRQ failure DID:%06X Status:" 1972 "x%x/x%x\n", 1973 ndlp->nlp_DID, ulp_status, 1974 ulp_word4); 1975 } 1976 1977 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1978 lpfc_els_free_iocb(phba, cmdiocb); 1979 lpfc_nlp_put(ndlp); 1980 return; 1981 } 1982 /** 1983 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1984 * @phba: pointer to lpfc hba data structure. 1985 * @cmdiocb: pointer to lpfc command iocb data structure. 1986 * @rspiocb: pointer to lpfc response iocb data structure. 1987 * 1988 * This routine is the completion callback function for issuing the Port 1989 * Login (PLOGI) command. For PLOGI completion, there must be an active 1990 * ndlp on the vport node list that matches the remote node ID from the 1991 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1992 * ignored and command IOCB released. The PLOGI response IOCB status is 1993 * checked for error conditions. If there is error status reported, PLOGI 1994 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1995 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1996 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1997 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1998 * there are additional N_Port nodes with the vport that need to perform 1999 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 2000 * PLOGIs. 2001 **/ 2002 static void 2003 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2004 struct lpfc_iocbq *rspiocb) 2005 { 2006 struct lpfc_vport *vport = cmdiocb->vport; 2007 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2008 IOCB_t *irsp; 2009 struct lpfc_nodelist *ndlp, *free_ndlp; 2010 struct lpfc_dmabuf *prsp; 2011 int disc; 2012 struct serv_parm *sp = NULL; 2013 u32 ulp_status, ulp_word4, did, iotag; 2014 bool release_node = false; 2015 2016 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2017 cmdiocb->rsp_iocb = rspiocb; 2018 2019 ulp_status = get_job_ulpstatus(phba, rspiocb); 2020 ulp_word4 = get_job_word4(phba, rspiocb); 2021 did = get_job_els_rsp64_did(phba, cmdiocb); 2022 2023 if (phba->sli_rev == LPFC_SLI_REV4) { 2024 iotag = get_wqe_reqtag(cmdiocb); 2025 } else { 2026 irsp = &rspiocb->iocb; 2027 iotag = irsp->ulpIoTag; 2028 } 2029 2030 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2031 "PLOGI cmpl: status:x%x/x%x did:x%x", 2032 ulp_status, ulp_word4, did); 2033 2034 ndlp = lpfc_findnode_did(vport, did); 2035 if (!ndlp) { 2036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2037 "0136 PLOGI completes to NPort x%x " 2038 "with no ndlp. Data: x%x x%x x%x\n", 2039 did, ulp_status, ulp_word4, iotag); 2040 goto out_freeiocb; 2041 } 2042 2043 /* Since ndlp can be freed in the disc state machine, note if this node 2044 * is being used during discovery. 2045 */ 2046 spin_lock_irq(&ndlp->lock); 2047 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2048 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2049 spin_unlock_irq(&ndlp->lock); 2050 2051 /* PLOGI completes to NPort <nlp_DID> */ 2052 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2053 "0102 PLOGI completes to NPort x%06x " 2054 "Data: x%x x%x x%x x%x x%x\n", 2055 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2056 ulp_status, ulp_word4, 2057 disc, vport->num_disc_nodes); 2058 2059 /* Check to see if link went down during discovery */ 2060 if (lpfc_els_chk_latt(vport)) { 2061 spin_lock_irq(&ndlp->lock); 2062 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2063 spin_unlock_irq(&ndlp->lock); 2064 goto out; 2065 } 2066 2067 if (ulp_status) { 2068 /* Check for retry */ 2069 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2070 /* ELS command is being retried */ 2071 if (disc) { 2072 spin_lock_irq(&ndlp->lock); 2073 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2074 spin_unlock_irq(&ndlp->lock); 2075 } 2076 goto out; 2077 } 2078 /* PLOGI failed Don't print the vport to vport rjts */ 2079 if (ulp_status != IOSTAT_LS_RJT || 2080 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2081 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2082 (phba)->pport->cfg_log_verbose & LOG_ELS) 2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2084 "2753 PLOGI failure DID:%06X " 2085 "Status:x%x/x%x\n", 2086 ndlp->nlp_DID, ulp_status, 2087 ulp_word4); 2088 2089 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2090 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2091 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2092 NLP_EVT_CMPL_PLOGI); 2093 2094 /* If a PLOGI collision occurred, the node needs to continue 2095 * with the reglogin process. 2096 */ 2097 spin_lock_irq(&ndlp->lock); 2098 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2099 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2100 spin_unlock_irq(&ndlp->lock); 2101 goto out; 2102 } 2103 2104 /* No PLOGI collision and the node is not registered with the 2105 * scsi or nvme transport. It is no longer an active node. Just 2106 * start the device remove process. 2107 */ 2108 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2109 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2110 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2111 release_node = true; 2112 } 2113 spin_unlock_irq(&ndlp->lock); 2114 2115 if (release_node) 2116 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2117 NLP_EVT_DEVICE_RM); 2118 } else { 2119 /* Good status, call state machine */ 2120 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2121 struct lpfc_dmabuf, list); 2122 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2123 2124 sp = (struct serv_parm *)((u8 *)prsp->virt + 2125 sizeof(u32)); 2126 2127 ndlp->vmid_support = 0; 2128 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2129 (phba->cfg_vmid_priority_tagging && 2130 sp->cmn.priority_tagging)) { 2131 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2132 "4018 app_hdr_support %d tagging %d DID x%x\n", 2133 sp->cmn.app_hdr_support, 2134 sp->cmn.priority_tagging, 2135 ndlp->nlp_DID); 2136 /* if the dest port supports VMID, mark it in ndlp */ 2137 ndlp->vmid_support = 1; 2138 } 2139 2140 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2141 NLP_EVT_CMPL_PLOGI); 2142 } 2143 2144 if (disc && vport->num_disc_nodes) { 2145 /* Check to see if there are more PLOGIs to be sent */ 2146 lpfc_more_plogi(vport); 2147 2148 if (vport->num_disc_nodes == 0) { 2149 spin_lock_irq(shost->host_lock); 2150 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2151 spin_unlock_irq(shost->host_lock); 2152 2153 lpfc_can_disctmo(vport); 2154 lpfc_end_rscn(vport); 2155 } 2156 } 2157 2158 out: 2159 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2160 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2161 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2162 2163 out_freeiocb: 2164 /* Release the reference on the original I/O request. */ 2165 free_ndlp = cmdiocb->ndlp; 2166 2167 lpfc_els_free_iocb(phba, cmdiocb); 2168 lpfc_nlp_put(free_ndlp); 2169 return; 2170 } 2171 2172 /** 2173 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2174 * @vport: pointer to a host virtual N_Port data structure. 2175 * @did: destination port identifier. 2176 * @retry: number of retries to the command IOCB. 2177 * 2178 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2179 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2180 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2181 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2182 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2183 * 2184 * Note that the ndlp reference count will be incremented by 1 for holding 2185 * the ndlp and the reference to ndlp will be stored into the ndlp field 2186 * of the IOCB for the completion callback function to the PLOGI ELS command. 2187 * 2188 * Return code 2189 * 0 - Successfully issued a plogi for @vport 2190 * 1 - failed to issue a plogi for @vport 2191 **/ 2192 int 2193 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2194 { 2195 struct lpfc_hba *phba = vport->phba; 2196 struct serv_parm *sp; 2197 struct lpfc_nodelist *ndlp; 2198 struct lpfc_iocbq *elsiocb; 2199 uint8_t *pcmd; 2200 uint16_t cmdsize; 2201 int ret; 2202 2203 ndlp = lpfc_findnode_did(vport, did); 2204 if (!ndlp) 2205 return 1; 2206 2207 /* Defer the processing of the issue PLOGI until after the 2208 * outstanding UNREG_RPI mbox command completes, unless we 2209 * are going offline. This logic does not apply for Fabric DIDs 2210 */ 2211 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && 2212 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2213 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2215 "4110 Issue PLOGI x%x deferred " 2216 "on NPort x%x rpi x%x flg x%x Data:" 2217 " x%px\n", 2218 ndlp->nlp_defer_did, ndlp->nlp_DID, 2219 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2220 2221 /* We can only defer 1st PLOGI */ 2222 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2223 ndlp->nlp_defer_did = did; 2224 return 0; 2225 } 2226 2227 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2228 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2229 ELS_CMD_PLOGI); 2230 if (!elsiocb) 2231 return 1; 2232 2233 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2234 2235 /* For PLOGI request, remainder of payload is service parameters */ 2236 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2237 pcmd += sizeof(uint32_t); 2238 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2239 sp = (struct serv_parm *) pcmd; 2240 2241 /* 2242 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2243 * to device on remote loops work. 2244 */ 2245 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2246 sp->cmn.altBbCredit = 1; 2247 2248 if (sp->cmn.fcphLow < FC_PH_4_3) 2249 sp->cmn.fcphLow = FC_PH_4_3; 2250 2251 if (sp->cmn.fcphHigh < FC_PH3) 2252 sp->cmn.fcphHigh = FC_PH3; 2253 2254 sp->cmn.valid_vendor_ver_level = 0; 2255 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2256 sp->cmn.bbRcvSizeMsb &= 0xF; 2257 2258 /* Check if the destination port supports VMID */ 2259 ndlp->vmid_support = 0; 2260 if (vport->vmid_priority_tagging) 2261 sp->cmn.priority_tagging = 1; 2262 else if (phba->cfg_vmid_app_header && 2263 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2264 sp->cmn.app_hdr_support = 1; 2265 2266 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2267 "Issue PLOGI: did:x%x", 2268 did, 0, 0); 2269 2270 /* If our firmware supports this feature, convey that 2271 * information to the target using the vendor specific field. 2272 */ 2273 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2274 sp->cmn.valid_vendor_ver_level = 1; 2275 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2276 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2277 } 2278 2279 phba->fc_stat.elsXmitPLOGI++; 2280 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2281 2282 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2283 "Issue PLOGI: did:x%x refcnt %d", 2284 did, kref_read(&ndlp->kref), 0); 2285 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2286 if (!elsiocb->ndlp) { 2287 lpfc_els_free_iocb(phba, elsiocb); 2288 return 1; 2289 } 2290 2291 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2292 if (ret) { 2293 lpfc_els_free_iocb(phba, elsiocb); 2294 lpfc_nlp_put(ndlp); 2295 return 1; 2296 } 2297 2298 return 0; 2299 } 2300 2301 /** 2302 * lpfc_cmpl_els_prli - Completion callback function for prli 2303 * @phba: pointer to lpfc hba data structure. 2304 * @cmdiocb: pointer to lpfc command iocb data structure. 2305 * @rspiocb: pointer to lpfc response iocb data structure. 2306 * 2307 * This routine is the completion callback function for a Process Login 2308 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2309 * status. If there is error status reported, PRLI retry shall be attempted 2310 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2311 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2312 * ndlp to mark the PRLI completion. 2313 **/ 2314 static void 2315 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2316 struct lpfc_iocbq *rspiocb) 2317 { 2318 struct lpfc_vport *vport = cmdiocb->vport; 2319 struct lpfc_nodelist *ndlp; 2320 char *mode; 2321 u32 loglevel; 2322 u32 ulp_status; 2323 u32 ulp_word4; 2324 bool release_node = false; 2325 2326 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2327 cmdiocb->rsp_iocb = rspiocb; 2328 2329 ndlp = cmdiocb->ndlp; 2330 2331 ulp_status = get_job_ulpstatus(phba, rspiocb); 2332 ulp_word4 = get_job_word4(phba, rspiocb); 2333 2334 spin_lock_irq(&ndlp->lock); 2335 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2336 2337 /* Driver supports multiple FC4 types. Counters matter. */ 2338 vport->fc_prli_sent--; 2339 ndlp->fc4_prli_sent--; 2340 spin_unlock_irq(&ndlp->lock); 2341 2342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2343 "PRLI cmpl: status:x%x/x%x did:x%x", 2344 ulp_status, ulp_word4, 2345 ndlp->nlp_DID); 2346 2347 /* PRLI completes to NPort <nlp_DID> */ 2348 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2349 "0103 PRLI completes to NPort x%06x " 2350 "Data: x%x x%x x%x x%x\n", 2351 ndlp->nlp_DID, ulp_status, ulp_word4, 2352 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2353 2354 /* Check to see if link went down during discovery */ 2355 if (lpfc_els_chk_latt(vport)) 2356 goto out; 2357 2358 if (ulp_status) { 2359 /* Check for retry */ 2360 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2361 /* ELS command is being retried */ 2362 goto out; 2363 } 2364 2365 /* If we don't send GFT_ID to Fabric, a PRLI error 2366 * could be expected. 2367 */ 2368 if ((vport->fc_flag & FC_FABRIC) || 2369 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2370 mode = KERN_ERR; 2371 loglevel = LOG_TRACE_EVENT; 2372 } else { 2373 mode = KERN_INFO; 2374 loglevel = LOG_ELS; 2375 } 2376 2377 /* PRLI failed */ 2378 lpfc_printf_vlog(vport, mode, loglevel, 2379 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2380 "data: x%x x%x x%x\n", 2381 ndlp->nlp_DID, ulp_status, 2382 ulp_word4, ndlp->nlp_state, 2383 ndlp->fc4_prli_sent, ndlp->nlp_flag); 2384 2385 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2386 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2387 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2388 NLP_EVT_CMPL_PRLI); 2389 2390 /* The following condition catches an inflight transition 2391 * mismatch typically caused by an RSCN. Skip any 2392 * processing to allow recovery. 2393 */ 2394 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2395 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || 2396 (ndlp->nlp_state == NLP_STE_NPR_NODE && 2397 ndlp->nlp_flag & NLP_DELAY_TMO)) { 2398 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2399 "2784 PRLI cmpl: Allow Node recovery " 2400 "DID x%06x nstate x%x nflag x%x\n", 2401 ndlp->nlp_DID, ndlp->nlp_state, 2402 ndlp->nlp_flag); 2403 goto out; 2404 } 2405 2406 /* 2407 * For P2P topology, retain the node so that PLOGI can be 2408 * attempted on it again. 2409 */ 2410 if (vport->fc_flag & FC_PT2PT) 2411 goto out; 2412 2413 /* As long as this node is not registered with the SCSI 2414 * or NVMe transport and no other PRLIs are outstanding, 2415 * it is no longer an active node. Otherwise devloss 2416 * handles the final cleanup. 2417 */ 2418 spin_lock_irq(&ndlp->lock); 2419 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2420 !ndlp->fc4_prli_sent) { 2421 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2422 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2423 release_node = true; 2424 } 2425 spin_unlock_irq(&ndlp->lock); 2426 2427 if (release_node) 2428 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2429 NLP_EVT_DEVICE_RM); 2430 } else { 2431 /* Good status, call state machine. However, if another 2432 * PRLI is outstanding, don't call the state machine 2433 * because final disposition to Mapped or Unmapped is 2434 * completed there. 2435 */ 2436 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2437 NLP_EVT_CMPL_PRLI); 2438 } 2439 2440 out: 2441 lpfc_els_free_iocb(phba, cmdiocb); 2442 lpfc_nlp_put(ndlp); 2443 return; 2444 } 2445 2446 /** 2447 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2448 * @vport: pointer to a host virtual N_Port data structure. 2449 * @ndlp: pointer to a node-list data structure. 2450 * @retry: number of retries to the command IOCB. 2451 * 2452 * This routine issues a Process Login (PRLI) ELS command for the 2453 * @vport. The PRLI service parameters are set up in the payload of the 2454 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2455 * is put to the IOCB completion callback func field before invoking the 2456 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2457 * 2458 * Note that the ndlp reference count will be incremented by 1 for holding the 2459 * ndlp and the reference to ndlp will be stored into the ndlp field of 2460 * the IOCB for the completion callback function to the PRLI ELS command. 2461 * 2462 * Return code 2463 * 0 - successfully issued prli iocb command for @vport 2464 * 1 - failed to issue prli iocb command for @vport 2465 **/ 2466 int 2467 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2468 uint8_t retry) 2469 { 2470 int rc = 0; 2471 struct lpfc_hba *phba = vport->phba; 2472 PRLI *npr; 2473 struct lpfc_nvme_prli *npr_nvme; 2474 struct lpfc_iocbq *elsiocb; 2475 uint8_t *pcmd; 2476 uint16_t cmdsize; 2477 u32 local_nlp_type, elscmd; 2478 2479 /* 2480 * If we are in RSCN mode, the FC4 types supported from a 2481 * previous GFT_ID command may not be accurate. So, if we 2482 * are a NVME Initiator, always look for the possibility of 2483 * the remote NPort beng a NVME Target. 2484 */ 2485 if (phba->sli_rev == LPFC_SLI_REV4 && 2486 vport->fc_flag & FC_RSCN_MODE && 2487 vport->nvmei_support) 2488 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2489 local_nlp_type = ndlp->nlp_fc4_type; 2490 2491 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2492 * fields here before any of them can complete. 2493 */ 2494 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2495 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2496 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2497 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2498 ndlp->nvme_fb_size = 0; 2499 2500 send_next_prli: 2501 if (local_nlp_type & NLP_FC4_FCP) { 2502 /* Payload is 4 + 16 = 20 x14 bytes. */ 2503 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2504 elscmd = ELS_CMD_PRLI; 2505 } else if (local_nlp_type & NLP_FC4_NVME) { 2506 /* Payload is 4 + 20 = 24 x18 bytes. */ 2507 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2508 elscmd = ELS_CMD_NVMEPRLI; 2509 } else { 2510 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2511 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2512 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2513 return 1; 2514 } 2515 2516 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2517 * FC4 type, implicitly LOGO. 2518 */ 2519 if (phba->sli_rev == LPFC_SLI_REV3 && 2520 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2521 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2522 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2523 ndlp->nlp_type); 2524 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2525 return 1; 2526 } 2527 2528 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2529 ndlp->nlp_DID, elscmd); 2530 if (!elsiocb) 2531 return 1; 2532 2533 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2534 2535 /* For PRLI request, remainder of payload is service parameters */ 2536 memset(pcmd, 0, cmdsize); 2537 2538 if (local_nlp_type & NLP_FC4_FCP) { 2539 /* Remainder of payload is FCP PRLI parameter page. 2540 * Note: this data structure is defined as 2541 * BE/LE in the structure definition so no 2542 * byte swap call is made. 2543 */ 2544 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2545 pcmd += sizeof(uint32_t); 2546 npr = (PRLI *)pcmd; 2547 2548 /* 2549 * If our firmware version is 3.20 or later, 2550 * set the following bits for FC-TAPE support. 2551 */ 2552 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2553 npr->ConfmComplAllowed = 1; 2554 npr->Retry = 1; 2555 npr->TaskRetryIdReq = 1; 2556 } 2557 npr->estabImagePair = 1; 2558 npr->readXferRdyDis = 1; 2559 if (vport->cfg_first_burst_size) 2560 npr->writeXferRdyDis = 1; 2561 2562 /* For FCP support */ 2563 npr->prliType = PRLI_FCP_TYPE; 2564 npr->initiatorFunc = 1; 2565 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2566 2567 /* Remove FCP type - processed. */ 2568 local_nlp_type &= ~NLP_FC4_FCP; 2569 } else if (local_nlp_type & NLP_FC4_NVME) { 2570 /* Remainder of payload is NVME PRLI parameter page. 2571 * This data structure is the newer definition that 2572 * uses bf macros so a byte swap is required. 2573 */ 2574 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2575 pcmd += sizeof(uint32_t); 2576 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2577 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2578 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2579 if (phba->nsler) { 2580 bf_set(prli_nsler, npr_nvme, 1); 2581 bf_set(prli_conf, npr_nvme, 1); 2582 } 2583 2584 /* Only initiators request first burst. */ 2585 if ((phba->cfg_nvme_enable_fb) && 2586 !phba->nvmet_support) 2587 bf_set(prli_fba, npr_nvme, 1); 2588 2589 if (phba->nvmet_support) { 2590 bf_set(prli_tgt, npr_nvme, 1); 2591 bf_set(prli_disc, npr_nvme, 1); 2592 } else { 2593 bf_set(prli_init, npr_nvme, 1); 2594 bf_set(prli_conf, npr_nvme, 1); 2595 } 2596 2597 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2598 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2599 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2600 2601 /* Remove NVME type - processed. */ 2602 local_nlp_type &= ~NLP_FC4_NVME; 2603 } 2604 2605 phba->fc_stat.elsXmitPRLI++; 2606 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2607 2608 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2609 "Issue PRLI: did:x%x refcnt %d", 2610 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2611 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2612 if (!elsiocb->ndlp) { 2613 lpfc_els_free_iocb(phba, elsiocb); 2614 return 1; 2615 } 2616 2617 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2618 if (rc == IOCB_ERROR) { 2619 lpfc_els_free_iocb(phba, elsiocb); 2620 lpfc_nlp_put(ndlp); 2621 return 1; 2622 } 2623 2624 /* The vport counters are used for lpfc_scan_finished, but 2625 * the ndlp is used to track outstanding PRLIs for different 2626 * FC4 types. 2627 */ 2628 spin_lock_irq(&ndlp->lock); 2629 ndlp->nlp_flag |= NLP_PRLI_SND; 2630 vport->fc_prli_sent++; 2631 ndlp->fc4_prli_sent++; 2632 spin_unlock_irq(&ndlp->lock); 2633 2634 /* The driver supports 2 FC4 types. Make sure 2635 * a PRLI is issued for all types before exiting. 2636 */ 2637 if (phba->sli_rev == LPFC_SLI_REV4 && 2638 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2639 goto send_next_prli; 2640 else 2641 return 0; 2642 } 2643 2644 /** 2645 * lpfc_rscn_disc - Perform rscn discovery for a vport 2646 * @vport: pointer to a host virtual N_Port data structure. 2647 * 2648 * This routine performs Registration State Change Notification (RSCN) 2649 * discovery for a @vport. If the @vport's node port recovery count is not 2650 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2651 * the nodes that need recovery. If none of the PLOGI were needed through 2652 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2653 * invoked to check and handle possible more RSCN came in during the period 2654 * of processing the current ones. 2655 **/ 2656 static void 2657 lpfc_rscn_disc(struct lpfc_vport *vport) 2658 { 2659 lpfc_can_disctmo(vport); 2660 2661 /* RSCN discovery */ 2662 /* go thru NPR nodes and issue ELS PLOGIs */ 2663 if (vport->fc_npr_cnt) 2664 if (lpfc_els_disc_plogi(vport)) 2665 return; 2666 2667 lpfc_end_rscn(vport); 2668 } 2669 2670 /** 2671 * lpfc_adisc_done - Complete the adisc phase of discovery 2672 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2673 * 2674 * This function is called when the final ADISC is completed during discovery. 2675 * This function handles clearing link attention or issuing reg_vpi depending 2676 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2677 * discovery. 2678 * This function is called with no locks held. 2679 **/ 2680 static void 2681 lpfc_adisc_done(struct lpfc_vport *vport) 2682 { 2683 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2684 struct lpfc_hba *phba = vport->phba; 2685 2686 /* 2687 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2688 * and continue discovery. 2689 */ 2690 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2691 !(vport->fc_flag & FC_RSCN_MODE) && 2692 (phba->sli_rev < LPFC_SLI_REV4)) { 2693 2694 /* 2695 * If link is down, clear_la and reg_vpi will be done after 2696 * flogi following a link up event 2697 */ 2698 if (!lpfc_is_link_up(phba)) 2699 return; 2700 2701 /* The ADISCs are complete. Doesn't matter if they 2702 * succeeded or failed because the ADISC completion 2703 * routine guarantees to call the state machine and 2704 * the RPI is either unregistered (failed ADISC response) 2705 * or the RPI is still valid and the node is marked 2706 * mapped for a target. The exchanges should be in the 2707 * correct state. This code is specific to SLI3. 2708 */ 2709 lpfc_issue_clear_la(phba, vport); 2710 lpfc_issue_reg_vpi(phba, vport); 2711 return; 2712 } 2713 /* 2714 * For SLI2, we need to set port_state to READY 2715 * and continue discovery. 2716 */ 2717 if (vport->port_state < LPFC_VPORT_READY) { 2718 /* If we get here, there is nothing to ADISC */ 2719 lpfc_issue_clear_la(phba, vport); 2720 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2721 vport->num_disc_nodes = 0; 2722 /* go thru NPR list, issue ELS PLOGIs */ 2723 if (vport->fc_npr_cnt) 2724 lpfc_els_disc_plogi(vport); 2725 if (!vport->num_disc_nodes) { 2726 spin_lock_irq(shost->host_lock); 2727 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2728 spin_unlock_irq(shost->host_lock); 2729 lpfc_can_disctmo(vport); 2730 lpfc_end_rscn(vport); 2731 } 2732 } 2733 vport->port_state = LPFC_VPORT_READY; 2734 } else 2735 lpfc_rscn_disc(vport); 2736 } 2737 2738 /** 2739 * lpfc_more_adisc - Issue more adisc as needed 2740 * @vport: pointer to a host virtual N_Port data structure. 2741 * 2742 * This routine determines whether there are more ndlps on a @vport 2743 * node list need to have Address Discover (ADISC) issued. If so, it will 2744 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2745 * remaining nodes which need to have ADISC sent. 2746 **/ 2747 void 2748 lpfc_more_adisc(struct lpfc_vport *vport) 2749 { 2750 if (vport->num_disc_nodes) 2751 vport->num_disc_nodes--; 2752 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2753 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2754 "0210 Continue discovery with %d ADISCs to go " 2755 "Data: x%x x%x x%x\n", 2756 vport->num_disc_nodes, vport->fc_adisc_cnt, 2757 vport->fc_flag, vport->port_state); 2758 /* Check to see if there are more ADISCs to be sent */ 2759 if (vport->fc_flag & FC_NLP_MORE) { 2760 lpfc_set_disctmo(vport); 2761 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2762 lpfc_els_disc_adisc(vport); 2763 } 2764 if (!vport->num_disc_nodes) 2765 lpfc_adisc_done(vport); 2766 return; 2767 } 2768 2769 /** 2770 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2771 * @phba: pointer to lpfc hba data structure. 2772 * @cmdiocb: pointer to lpfc command iocb data structure. 2773 * @rspiocb: pointer to lpfc response iocb data structure. 2774 * 2775 * This routine is the completion function for issuing the Address Discover 2776 * (ADISC) command. It first checks to see whether link went down during 2777 * the discovery process. If so, the node will be marked as node port 2778 * recovery for issuing discover IOCB by the link attention handler and 2779 * exit. Otherwise, the response status is checked. If error was reported 2780 * in the response status, the ADISC command shall be retried by invoking 2781 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2782 * the response status, the state machine is invoked to set transition 2783 * with respect to NLP_EVT_CMPL_ADISC event. 2784 **/ 2785 static void 2786 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2787 struct lpfc_iocbq *rspiocb) 2788 { 2789 struct lpfc_vport *vport = cmdiocb->vport; 2790 IOCB_t *irsp; 2791 struct lpfc_nodelist *ndlp; 2792 int disc; 2793 u32 ulp_status, ulp_word4, tmo; 2794 bool release_node = false; 2795 2796 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2797 cmdiocb->rsp_iocb = rspiocb; 2798 2799 ndlp = cmdiocb->ndlp; 2800 2801 ulp_status = get_job_ulpstatus(phba, rspiocb); 2802 ulp_word4 = get_job_word4(phba, rspiocb); 2803 2804 if (phba->sli_rev == LPFC_SLI_REV4) { 2805 tmo = get_wqe_tmo(cmdiocb); 2806 } else { 2807 irsp = &rspiocb->iocb; 2808 tmo = irsp->ulpTimeout; 2809 } 2810 2811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2812 "ADISC cmpl: status:x%x/x%x did:x%x", 2813 ulp_status, ulp_word4, 2814 ndlp->nlp_DID); 2815 2816 /* Since ndlp can be freed in the disc state machine, note if this node 2817 * is being used during discovery. 2818 */ 2819 spin_lock_irq(&ndlp->lock); 2820 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2821 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2822 spin_unlock_irq(&ndlp->lock); 2823 /* ADISC completes to NPort <nlp_DID> */ 2824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2825 "0104 ADISC completes to NPort x%x " 2826 "Data: x%x x%x x%x x%x x%x\n", 2827 ndlp->nlp_DID, ulp_status, ulp_word4, 2828 tmo, disc, vport->num_disc_nodes); 2829 /* Check to see if link went down during discovery */ 2830 if (lpfc_els_chk_latt(vport)) { 2831 spin_lock_irq(&ndlp->lock); 2832 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2833 spin_unlock_irq(&ndlp->lock); 2834 goto out; 2835 } 2836 2837 if (ulp_status) { 2838 /* Check for retry */ 2839 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2840 /* ELS command is being retried */ 2841 if (disc) { 2842 spin_lock_irq(&ndlp->lock); 2843 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2844 spin_unlock_irq(&ndlp->lock); 2845 lpfc_set_disctmo(vport); 2846 } 2847 goto out; 2848 } 2849 /* ADISC failed */ 2850 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2851 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2852 ndlp->nlp_DID, ulp_status, 2853 ulp_word4); 2854 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2855 NLP_EVT_CMPL_ADISC); 2856 2857 /* As long as this node is not registered with the SCSI or NVMe 2858 * transport, it is no longer an active node. Otherwise 2859 * devloss handles the final cleanup. 2860 */ 2861 spin_lock_irq(&ndlp->lock); 2862 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2863 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2864 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2865 release_node = true; 2866 } 2867 spin_unlock_irq(&ndlp->lock); 2868 2869 if (release_node) 2870 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2871 NLP_EVT_DEVICE_RM); 2872 } else 2873 /* Good status, call state machine */ 2874 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2875 NLP_EVT_CMPL_ADISC); 2876 2877 /* Check to see if there are more ADISCs to be sent */ 2878 if (disc && vport->num_disc_nodes) 2879 lpfc_more_adisc(vport); 2880 out: 2881 lpfc_els_free_iocb(phba, cmdiocb); 2882 lpfc_nlp_put(ndlp); 2883 return; 2884 } 2885 2886 /** 2887 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2888 * @vport: pointer to a virtual N_Port data structure. 2889 * @ndlp: pointer to a node-list data structure. 2890 * @retry: number of retries to the command IOCB. 2891 * 2892 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2893 * @vport. It prepares the payload of the ADISC ELS command, updates the 2894 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2895 * to issue the ADISC ELS command. 2896 * 2897 * Note that the ndlp reference count will be incremented by 1 for holding the 2898 * ndlp and the reference to ndlp will be stored into the ndlp field of 2899 * the IOCB for the completion callback function to the ADISC ELS command. 2900 * 2901 * Return code 2902 * 0 - successfully issued adisc 2903 * 1 - failed to issue adisc 2904 **/ 2905 int 2906 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2907 uint8_t retry) 2908 { 2909 int rc = 0; 2910 struct lpfc_hba *phba = vport->phba; 2911 ADISC *ap; 2912 struct lpfc_iocbq *elsiocb; 2913 uint8_t *pcmd; 2914 uint16_t cmdsize; 2915 2916 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2917 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2918 ndlp->nlp_DID, ELS_CMD_ADISC); 2919 if (!elsiocb) 2920 return 1; 2921 2922 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2923 2924 /* For ADISC request, remainder of payload is service parameters */ 2925 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2926 pcmd += sizeof(uint32_t); 2927 2928 /* Fill in ADISC payload */ 2929 ap = (ADISC *) pcmd; 2930 ap->hardAL_PA = phba->fc_pref_ALPA; 2931 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2932 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2933 ap->DID = be32_to_cpu(vport->fc_myDID); 2934 2935 phba->fc_stat.elsXmitADISC++; 2936 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2937 spin_lock_irq(&ndlp->lock); 2938 ndlp->nlp_flag |= NLP_ADISC_SND; 2939 spin_unlock_irq(&ndlp->lock); 2940 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2941 if (!elsiocb->ndlp) { 2942 lpfc_els_free_iocb(phba, elsiocb); 2943 goto err; 2944 } 2945 2946 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2947 "Issue ADISC: did:x%x refcnt %d", 2948 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2949 2950 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2951 if (rc == IOCB_ERROR) { 2952 lpfc_els_free_iocb(phba, elsiocb); 2953 lpfc_nlp_put(ndlp); 2954 goto err; 2955 } 2956 2957 return 0; 2958 2959 err: 2960 spin_lock_irq(&ndlp->lock); 2961 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2962 spin_unlock_irq(&ndlp->lock); 2963 return 1; 2964 } 2965 2966 /** 2967 * lpfc_cmpl_els_logo - Completion callback function for logo 2968 * @phba: pointer to lpfc hba data structure. 2969 * @cmdiocb: pointer to lpfc command iocb data structure. 2970 * @rspiocb: pointer to lpfc response iocb data structure. 2971 * 2972 * This routine is the completion function for issuing the ELS Logout (LOGO) 2973 * command. If no error status was reported from the LOGO response, the 2974 * state machine of the associated ndlp shall be invoked for transition with 2975 * respect to NLP_EVT_CMPL_LOGO event. 2976 **/ 2977 static void 2978 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2979 struct lpfc_iocbq *rspiocb) 2980 { 2981 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2982 struct lpfc_vport *vport = ndlp->vport; 2983 IOCB_t *irsp; 2984 unsigned long flags; 2985 uint32_t skip_recovery = 0; 2986 int wake_up_waiter = 0; 2987 u32 ulp_status; 2988 u32 ulp_word4; 2989 u32 tmo; 2990 2991 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2992 cmdiocb->rsp_iocb = rspiocb; 2993 2994 ulp_status = get_job_ulpstatus(phba, rspiocb); 2995 ulp_word4 = get_job_word4(phba, rspiocb); 2996 2997 if (phba->sli_rev == LPFC_SLI_REV4) { 2998 tmo = get_wqe_tmo(cmdiocb); 2999 } else { 3000 irsp = &rspiocb->iocb; 3001 tmo = irsp->ulpTimeout; 3002 } 3003 3004 spin_lock_irq(&ndlp->lock); 3005 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3006 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 3007 wake_up_waiter = 1; 3008 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 3009 } 3010 spin_unlock_irq(&ndlp->lock); 3011 3012 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3013 "LOGO cmpl: status:x%x/x%x did:x%x", 3014 ulp_status, ulp_word4, 3015 ndlp->nlp_DID); 3016 3017 /* LOGO completes to NPort <nlp_DID> */ 3018 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3019 "0105 LOGO completes to NPort x%x " 3020 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 3021 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 3022 ulp_status, ulp_word4, 3023 tmo, vport->num_disc_nodes); 3024 3025 if (lpfc_els_chk_latt(vport)) { 3026 skip_recovery = 1; 3027 goto out; 3028 } 3029 3030 /* The LOGO will not be retried on failure. A LOGO was 3031 * issued to the remote rport and a ACC or RJT or no Answer are 3032 * all acceptable. Note the failure and move forward with 3033 * discovery. The PLOGI will retry. 3034 */ 3035 if (ulp_status) { 3036 /* LOGO failed */ 3037 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3038 "2756 LOGO failure, No Retry DID:%06X " 3039 "Status:x%x/x%x\n", 3040 ndlp->nlp_DID, ulp_status, 3041 ulp_word4); 3042 3043 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3044 skip_recovery = 1; 3045 } 3046 3047 /* Call state machine. This will unregister the rpi if needed. */ 3048 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3049 3050 if (skip_recovery) 3051 goto out; 3052 3053 /* The driver sets this flag for an NPIV instance that doesn't want to 3054 * log into the remote port. 3055 */ 3056 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3057 spin_lock_irq(&ndlp->lock); 3058 if (phba->sli_rev == LPFC_SLI_REV4) 3059 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3060 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3061 spin_unlock_irq(&ndlp->lock); 3062 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3063 NLP_EVT_DEVICE_RM); 3064 goto out_rsrc_free; 3065 } 3066 3067 out: 3068 /* At this point, the LOGO processing is complete. NOTE: For a 3069 * pt2pt topology, we are assuming the NPortID will only change 3070 * on link up processing. For a LOGO / PLOGI initiated by the 3071 * Initiator, we are assuming the NPortID is not going to change. 3072 */ 3073 3074 if (wake_up_waiter && ndlp->logo_waitq) 3075 wake_up(ndlp->logo_waitq); 3076 /* 3077 * If the node is a target, the handling attempts to recover the port. 3078 * For any other port type, the rpi is unregistered as an implicit 3079 * LOGO. 3080 */ 3081 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3082 skip_recovery == 0) { 3083 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3084 spin_lock_irqsave(&ndlp->lock, flags); 3085 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3086 spin_unlock_irqrestore(&ndlp->lock, flags); 3087 3088 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3089 "3187 LOGO completes to NPort x%x: Start " 3090 "Recovery Data: x%x x%x x%x x%x\n", 3091 ndlp->nlp_DID, ulp_status, 3092 ulp_word4, tmo, 3093 vport->num_disc_nodes); 3094 3095 lpfc_els_free_iocb(phba, cmdiocb); 3096 lpfc_nlp_put(ndlp); 3097 3098 lpfc_disc_start(vport); 3099 return; 3100 } 3101 3102 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3103 * driver sends a LOGO to the rport to cleanup. For fabric and 3104 * initiator ports cleanup the node as long as it the node is not 3105 * register with the transport. 3106 */ 3107 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3108 spin_lock_irq(&ndlp->lock); 3109 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3110 spin_unlock_irq(&ndlp->lock); 3111 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3112 NLP_EVT_DEVICE_RM); 3113 } 3114 out_rsrc_free: 3115 /* Driver is done with the I/O. */ 3116 lpfc_els_free_iocb(phba, cmdiocb); 3117 lpfc_nlp_put(ndlp); 3118 } 3119 3120 /** 3121 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3122 * @vport: pointer to a virtual N_Port data structure. 3123 * @ndlp: pointer to a node-list data structure. 3124 * @retry: number of retries to the command IOCB. 3125 * 3126 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3127 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3128 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3129 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3130 * 3131 * Note that the ndlp reference count will be incremented by 1 for holding the 3132 * ndlp and the reference to ndlp will be stored into the ndlp field of 3133 * the IOCB for the completion callback function to the LOGO ELS command. 3134 * 3135 * Callers of this routine are expected to unregister the RPI first 3136 * 3137 * Return code 3138 * 0 - successfully issued logo 3139 * 1 - failed to issue logo 3140 **/ 3141 int 3142 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3143 uint8_t retry) 3144 { 3145 struct lpfc_hba *phba = vport->phba; 3146 struct lpfc_iocbq *elsiocb; 3147 uint8_t *pcmd; 3148 uint16_t cmdsize; 3149 int rc; 3150 3151 spin_lock_irq(&ndlp->lock); 3152 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3153 spin_unlock_irq(&ndlp->lock); 3154 return 0; 3155 } 3156 spin_unlock_irq(&ndlp->lock); 3157 3158 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3159 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3160 ndlp->nlp_DID, ELS_CMD_LOGO); 3161 if (!elsiocb) 3162 return 1; 3163 3164 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3165 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3166 pcmd += sizeof(uint32_t); 3167 3168 /* Fill in LOGO payload */ 3169 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3170 pcmd += sizeof(uint32_t); 3171 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3172 3173 phba->fc_stat.elsXmitLOGO++; 3174 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3175 spin_lock_irq(&ndlp->lock); 3176 ndlp->nlp_flag |= NLP_LOGO_SND; 3177 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3178 spin_unlock_irq(&ndlp->lock); 3179 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3180 if (!elsiocb->ndlp) { 3181 lpfc_els_free_iocb(phba, elsiocb); 3182 goto err; 3183 } 3184 3185 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3186 "Issue LOGO: did:x%x refcnt %d", 3187 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3188 3189 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3190 if (rc == IOCB_ERROR) { 3191 lpfc_els_free_iocb(phba, elsiocb); 3192 lpfc_nlp_put(ndlp); 3193 goto err; 3194 } 3195 3196 spin_lock_irq(&ndlp->lock); 3197 ndlp->nlp_prev_state = ndlp->nlp_state; 3198 spin_unlock_irq(&ndlp->lock); 3199 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3200 return 0; 3201 3202 err: 3203 spin_lock_irq(&ndlp->lock); 3204 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3205 spin_unlock_irq(&ndlp->lock); 3206 return 1; 3207 } 3208 3209 /** 3210 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3211 * @phba: pointer to lpfc hba data structure. 3212 * @cmdiocb: pointer to lpfc command iocb data structure. 3213 * @rspiocb: pointer to lpfc response iocb data structure. 3214 * 3215 * This routine is a generic completion callback function for ELS commands. 3216 * Specifically, it is the callback function which does not need to perform 3217 * any command specific operations. It is currently used by the ELS command 3218 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3219 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3220 * Other than certain debug loggings, this callback function simply invokes the 3221 * lpfc_els_chk_latt() routine to check whether link went down during the 3222 * discovery process. 3223 **/ 3224 static void 3225 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3226 struct lpfc_iocbq *rspiocb) 3227 { 3228 struct lpfc_vport *vport = cmdiocb->vport; 3229 struct lpfc_nodelist *free_ndlp; 3230 IOCB_t *irsp; 3231 u32 ulp_status, ulp_word4, tmo, did, iotag; 3232 3233 ulp_status = get_job_ulpstatus(phba, rspiocb); 3234 ulp_word4 = get_job_word4(phba, rspiocb); 3235 did = get_job_els_rsp64_did(phba, cmdiocb); 3236 3237 if (phba->sli_rev == LPFC_SLI_REV4) { 3238 tmo = get_wqe_tmo(cmdiocb); 3239 iotag = get_wqe_reqtag(cmdiocb); 3240 } else { 3241 irsp = &rspiocb->iocb; 3242 tmo = irsp->ulpTimeout; 3243 iotag = irsp->ulpIoTag; 3244 } 3245 3246 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3247 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3248 ulp_status, ulp_word4, did); 3249 3250 /* ELS cmd tag <ulpIoTag> completes */ 3251 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3252 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3253 iotag, ulp_status, ulp_word4, tmo); 3254 3255 /* Check to see if link went down during discovery */ 3256 lpfc_els_chk_latt(vport); 3257 3258 free_ndlp = cmdiocb->ndlp; 3259 3260 lpfc_els_free_iocb(phba, cmdiocb); 3261 lpfc_nlp_put(free_ndlp); 3262 } 3263 3264 /** 3265 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3266 * @vport: pointer to lpfc_vport data structure. 3267 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3268 * 3269 * This routine registers the rpi assigned to the fabric controller 3270 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3271 * state triggering a registration with the SCSI transport. 3272 * 3273 * This routine is single out because the fabric controller node 3274 * does not receive a PLOGI. This routine is consumed by the 3275 * SCR and RDF ELS commands. Callers are expected to qualify 3276 * with SLI4 first. 3277 **/ 3278 static int 3279 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3280 { 3281 int rc = 0; 3282 struct lpfc_hba *phba = vport->phba; 3283 struct lpfc_nodelist *ns_ndlp; 3284 LPFC_MBOXQ_t *mbox; 3285 3286 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3287 return rc; 3288 3289 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3290 if (!ns_ndlp) 3291 return -ENODEV; 3292 3293 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3294 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3295 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3296 ns_ndlp->nlp_state); 3297 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3298 return -ENODEV; 3299 3300 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3301 if (!mbox) { 3302 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3303 "0936 %s: no memory for reg_login " 3304 "Data: x%x x%x x%x x%x\n", __func__, 3305 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3306 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3307 return -ENOMEM; 3308 } 3309 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3310 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3311 if (rc) { 3312 rc = -EACCES; 3313 goto out; 3314 } 3315 3316 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3317 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3318 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3319 if (!mbox->ctx_ndlp) { 3320 rc = -ENOMEM; 3321 goto out; 3322 } 3323 3324 mbox->vport = vport; 3325 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3326 if (rc == MBX_NOT_FINISHED) { 3327 rc = -ENODEV; 3328 lpfc_nlp_put(fc_ndlp); 3329 goto out; 3330 } 3331 /* Success path. Exit. */ 3332 lpfc_nlp_set_state(vport, fc_ndlp, 3333 NLP_STE_REG_LOGIN_ISSUE); 3334 return 0; 3335 3336 out: 3337 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3338 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3339 "0938 %s: failed to format reg_login " 3340 "Data: x%x x%x x%x x%x\n", __func__, 3341 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3342 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3343 return rc; 3344 } 3345 3346 /** 3347 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3348 * @phba: pointer to lpfc hba data structure. 3349 * @cmdiocb: pointer to lpfc command iocb data structure. 3350 * @rspiocb: pointer to lpfc response iocb data structure. 3351 * 3352 * This routine is a generic completion callback function for Discovery ELS cmd. 3353 * Currently used by the ELS command issuing routines for the ELS State Change 3354 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3355 * These commands will be retried once only for ELS timeout errors. 3356 **/ 3357 static void 3358 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3359 struct lpfc_iocbq *rspiocb) 3360 { 3361 struct lpfc_vport *vport = cmdiocb->vport; 3362 IOCB_t *irsp; 3363 struct lpfc_els_rdf_rsp *prdf; 3364 struct lpfc_dmabuf *pcmd, *prsp; 3365 u32 *pdata; 3366 u32 cmd; 3367 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3368 u32 ulp_status, ulp_word4, tmo, did, iotag; 3369 3370 ulp_status = get_job_ulpstatus(phba, rspiocb); 3371 ulp_word4 = get_job_word4(phba, rspiocb); 3372 did = get_job_els_rsp64_did(phba, cmdiocb); 3373 3374 if (phba->sli_rev == LPFC_SLI_REV4) { 3375 tmo = get_wqe_tmo(cmdiocb); 3376 iotag = get_wqe_reqtag(cmdiocb); 3377 } else { 3378 irsp = &rspiocb->iocb; 3379 tmo = irsp->ulpTimeout; 3380 iotag = irsp->ulpIoTag; 3381 } 3382 3383 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3384 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3385 ulp_status, ulp_word4, did); 3386 3387 /* ELS cmd tag <ulpIoTag> completes */ 3388 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3389 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3390 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3391 3392 pcmd = cmdiocb->cmd_dmabuf; 3393 if (!pcmd) 3394 goto out; 3395 3396 pdata = (u32 *)pcmd->virt; 3397 if (!pdata) 3398 goto out; 3399 cmd = *pdata; 3400 3401 /* Only 1 retry for ELS Timeout only */ 3402 if (ulp_status == IOSTAT_LOCAL_REJECT && 3403 ((ulp_word4 & IOERR_PARAM_MASK) == 3404 IOERR_SEQUENCE_TIMEOUT)) { 3405 cmdiocb->retry++; 3406 if (cmdiocb->retry <= 1) { 3407 switch (cmd) { 3408 case ELS_CMD_SCR: 3409 lpfc_issue_els_scr(vport, cmdiocb->retry); 3410 break; 3411 case ELS_CMD_EDC: 3412 lpfc_issue_els_edc(vport, cmdiocb->retry); 3413 break; 3414 case ELS_CMD_RDF: 3415 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3416 break; 3417 } 3418 goto out; 3419 } 3420 phba->fc_stat.elsRetryExceeded++; 3421 } 3422 if (cmd == ELS_CMD_EDC) { 3423 /* must be called before checking uplStatus and returning */ 3424 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3425 return; 3426 } 3427 if (ulp_status) { 3428 /* ELS discovery cmd completes with error */ 3429 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3430 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3431 ulp_status, ulp_word4); 3432 goto out; 3433 } 3434 3435 /* The RDF response doesn't have any impact on the running driver 3436 * but the notification descriptors are dumped here for support. 3437 */ 3438 if (cmd == ELS_CMD_RDF) { 3439 int i; 3440 3441 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3442 if (!prsp) 3443 goto out; 3444 3445 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3446 if (!prdf) 3447 goto out; 3448 3449 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3450 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3451 lpfc_printf_vlog(vport, KERN_INFO, 3452 LOG_ELS | LOG_CGN_MGMT, 3453 "4677 Fabric RDF Notification Grant " 3454 "Data: 0x%08x Reg: %x %x\n", 3455 be32_to_cpu( 3456 prdf->reg_d1.desc_tags[i]), 3457 phba->cgn_reg_signal, 3458 phba->cgn_reg_fpin); 3459 } 3460 3461 out: 3462 /* Check to see if link went down during discovery */ 3463 lpfc_els_chk_latt(vport); 3464 lpfc_els_free_iocb(phba, cmdiocb); 3465 lpfc_nlp_put(ndlp); 3466 return; 3467 } 3468 3469 /** 3470 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3471 * @vport: pointer to a host virtual N_Port data structure. 3472 * @retry: retry counter for the command IOCB. 3473 * 3474 * This routine issues a State Change Request (SCR) to a fabric node 3475 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3476 * first search the @vport node list to find the matching ndlp. If no such 3477 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3478 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3479 * routine is invoked to send the SCR IOCB. 3480 * 3481 * Note that the ndlp reference count will be incremented by 1 for holding the 3482 * ndlp and the reference to ndlp will be stored into the ndlp field of 3483 * the IOCB for the completion callback function to the SCR ELS command. 3484 * 3485 * Return code 3486 * 0 - Successfully issued scr command 3487 * 1 - Failed to issue scr command 3488 **/ 3489 int 3490 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3491 { 3492 int rc = 0; 3493 struct lpfc_hba *phba = vport->phba; 3494 struct lpfc_iocbq *elsiocb; 3495 uint8_t *pcmd; 3496 uint16_t cmdsize; 3497 struct lpfc_nodelist *ndlp; 3498 3499 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3500 3501 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3502 if (!ndlp) { 3503 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3504 if (!ndlp) 3505 return 1; 3506 lpfc_enqueue_node(vport, ndlp); 3507 } 3508 3509 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3510 ndlp->nlp_DID, ELS_CMD_SCR); 3511 if (!elsiocb) 3512 return 1; 3513 3514 if (phba->sli_rev == LPFC_SLI_REV4) { 3515 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3516 if (rc) { 3517 lpfc_els_free_iocb(phba, elsiocb); 3518 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3519 "0937 %s: Failed to reg fc node, rc %d\n", 3520 __func__, rc); 3521 return 1; 3522 } 3523 } 3524 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3525 3526 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3527 pcmd += sizeof(uint32_t); 3528 3529 /* For SCR, remainder of payload is SCR parameter page */ 3530 memset(pcmd, 0, sizeof(SCR)); 3531 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3532 3533 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3534 "Issue SCR: did:x%x", 3535 ndlp->nlp_DID, 0, 0); 3536 3537 phba->fc_stat.elsXmitSCR++; 3538 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3539 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3540 if (!elsiocb->ndlp) { 3541 lpfc_els_free_iocb(phba, elsiocb); 3542 return 1; 3543 } 3544 3545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3546 "Issue SCR: did:x%x refcnt %d", 3547 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3548 3549 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3550 if (rc == IOCB_ERROR) { 3551 lpfc_els_free_iocb(phba, elsiocb); 3552 lpfc_nlp_put(ndlp); 3553 return 1; 3554 } 3555 3556 return 0; 3557 } 3558 3559 /** 3560 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3561 * or the other nport (pt2pt). 3562 * @vport: pointer to a host virtual N_Port data structure. 3563 * @retry: number of retries to the command IOCB. 3564 * 3565 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3566 * when connected to a fabric, or to the remote port when connected 3567 * in point-to-point mode. When sent to the Fabric Controller, it will 3568 * replay the RSCN to registered recipients. 3569 * 3570 * Note that the ndlp reference count will be incremented by 1 for holding the 3571 * ndlp and the reference to ndlp will be stored into the ndlp field of 3572 * the IOCB for the completion callback function to the RSCN ELS command. 3573 * 3574 * Return code 3575 * 0 - Successfully issued RSCN command 3576 * 1 - Failed to issue RSCN command 3577 **/ 3578 int 3579 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3580 { 3581 int rc = 0; 3582 struct lpfc_hba *phba = vport->phba; 3583 struct lpfc_iocbq *elsiocb; 3584 struct lpfc_nodelist *ndlp; 3585 struct { 3586 struct fc_els_rscn rscn; 3587 struct fc_els_rscn_page portid; 3588 } *event; 3589 uint32_t nportid; 3590 uint16_t cmdsize = sizeof(*event); 3591 3592 /* Not supported for private loop */ 3593 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3594 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3595 return 1; 3596 3597 if (vport->fc_flag & FC_PT2PT) { 3598 /* find any mapped nport - that would be the other nport */ 3599 ndlp = lpfc_findnode_mapped(vport); 3600 if (!ndlp) 3601 return 1; 3602 } else { 3603 nportid = FC_FID_FCTRL; 3604 /* find the fabric controller node */ 3605 ndlp = lpfc_findnode_did(vport, nportid); 3606 if (!ndlp) { 3607 /* if one didn't exist, make one */ 3608 ndlp = lpfc_nlp_init(vport, nportid); 3609 if (!ndlp) 3610 return 1; 3611 lpfc_enqueue_node(vport, ndlp); 3612 } 3613 } 3614 3615 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3616 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3617 3618 if (!elsiocb) 3619 return 1; 3620 3621 event = elsiocb->cmd_dmabuf->virt; 3622 3623 event->rscn.rscn_cmd = ELS_RSCN; 3624 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3625 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3626 3627 nportid = vport->fc_myDID; 3628 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3629 event->portid.rscn_page_flags = 0; 3630 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3631 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3632 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3633 3634 phba->fc_stat.elsXmitRSCN++; 3635 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3636 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3637 if (!elsiocb->ndlp) { 3638 lpfc_els_free_iocb(phba, elsiocb); 3639 return 1; 3640 } 3641 3642 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3643 "Issue RSCN: did:x%x", 3644 ndlp->nlp_DID, 0, 0); 3645 3646 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3647 if (rc == IOCB_ERROR) { 3648 lpfc_els_free_iocb(phba, elsiocb); 3649 lpfc_nlp_put(ndlp); 3650 return 1; 3651 } 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3658 * @vport: pointer to a host virtual N_Port data structure. 3659 * @nportid: N_Port identifier to the remote node. 3660 * @retry: number of retries to the command IOCB. 3661 * 3662 * This routine issues a Fibre Channel Address Resolution Response 3663 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3664 * is passed into the function. It first search the @vport node list to find 3665 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3666 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3667 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3668 * 3669 * Note that the ndlp reference count will be incremented by 1 for holding the 3670 * ndlp and the reference to ndlp will be stored into the ndlp field of 3671 * the IOCB for the completion callback function to the FARPR ELS command. 3672 * 3673 * Return code 3674 * 0 - Successfully issued farpr command 3675 * 1 - Failed to issue farpr command 3676 **/ 3677 static int 3678 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3679 { 3680 int rc = 0; 3681 struct lpfc_hba *phba = vport->phba; 3682 struct lpfc_iocbq *elsiocb; 3683 FARP *fp; 3684 uint8_t *pcmd; 3685 uint32_t *lp; 3686 uint16_t cmdsize; 3687 struct lpfc_nodelist *ondlp; 3688 struct lpfc_nodelist *ndlp; 3689 3690 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3691 3692 ndlp = lpfc_findnode_did(vport, nportid); 3693 if (!ndlp) { 3694 ndlp = lpfc_nlp_init(vport, nportid); 3695 if (!ndlp) 3696 return 1; 3697 lpfc_enqueue_node(vport, ndlp); 3698 } 3699 3700 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3701 ndlp->nlp_DID, ELS_CMD_FARPR); 3702 if (!elsiocb) 3703 return 1; 3704 3705 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3706 3707 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3708 pcmd += sizeof(uint32_t); 3709 3710 /* Fill in FARPR payload */ 3711 fp = (FARP *) (pcmd); 3712 memset(fp, 0, sizeof(FARP)); 3713 lp = (uint32_t *) pcmd; 3714 *lp++ = be32_to_cpu(nportid); 3715 *lp++ = be32_to_cpu(vport->fc_myDID); 3716 fp->Rflags = 0; 3717 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3718 3719 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3720 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3721 ondlp = lpfc_findnode_did(vport, nportid); 3722 if (ondlp) { 3723 memcpy(&fp->OportName, &ondlp->nlp_portname, 3724 sizeof(struct lpfc_name)); 3725 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3726 sizeof(struct lpfc_name)); 3727 } 3728 3729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3730 "Issue FARPR: did:x%x", 3731 ndlp->nlp_DID, 0, 0); 3732 3733 phba->fc_stat.elsXmitFARPR++; 3734 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3735 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3736 if (!elsiocb->ndlp) { 3737 lpfc_els_free_iocb(phba, elsiocb); 3738 return 1; 3739 } 3740 3741 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3742 if (rc == IOCB_ERROR) { 3743 /* The additional lpfc_nlp_put will cause the following 3744 * lpfc_els_free_iocb routine to trigger the release of 3745 * the node. 3746 */ 3747 lpfc_els_free_iocb(phba, elsiocb); 3748 lpfc_nlp_put(ndlp); 3749 return 1; 3750 } 3751 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3752 * trigger the release of the node. 3753 */ 3754 /* Don't release reference count as RDF is likely outstanding */ 3755 return 0; 3756 } 3757 3758 /** 3759 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3760 * @vport: pointer to a host virtual N_Port data structure. 3761 * @retry: retry counter for the command IOCB. 3762 * 3763 * This routine issues an ELS RDF to the Fabric Controller to register 3764 * for diagnostic functions. 3765 * 3766 * Note that the ndlp reference count will be incremented by 1 for holding the 3767 * ndlp and the reference to ndlp will be stored into the ndlp field of 3768 * the IOCB for the completion callback function to the RDF ELS command. 3769 * 3770 * Return code 3771 * 0 - Successfully issued rdf command 3772 * 1 - Failed to issue rdf command 3773 **/ 3774 int 3775 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3776 { 3777 struct lpfc_hba *phba = vport->phba; 3778 struct lpfc_iocbq *elsiocb; 3779 struct lpfc_els_rdf_req *prdf; 3780 struct lpfc_nodelist *ndlp; 3781 uint16_t cmdsize; 3782 int rc; 3783 3784 cmdsize = sizeof(*prdf); 3785 3786 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3787 if (!ndlp) { 3788 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3789 if (!ndlp) 3790 return -ENODEV; 3791 lpfc_enqueue_node(vport, ndlp); 3792 } 3793 3794 /* RDF ELS is not required on an NPIV VN_Port. */ 3795 if (vport->port_type == LPFC_NPIV_PORT) 3796 return -EACCES; 3797 3798 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3799 ndlp->nlp_DID, ELS_CMD_RDF); 3800 if (!elsiocb) 3801 return -ENOMEM; 3802 3803 /* Configure the payload for the supported FPIN events. */ 3804 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3805 memset(prdf, 0, cmdsize); 3806 prdf->rdf.fpin_cmd = ELS_RDF; 3807 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3808 sizeof(struct fc_els_rdf)); 3809 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3810 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3811 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3812 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3813 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3814 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3815 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3816 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3817 3818 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3819 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3820 ndlp->nlp_DID, phba->cgn_reg_signal, 3821 phba->cgn_reg_fpin); 3822 3823 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3824 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3825 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3826 if (!elsiocb->ndlp) { 3827 lpfc_els_free_iocb(phba, elsiocb); 3828 return -EIO; 3829 } 3830 3831 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3832 "Issue RDF: did:x%x refcnt %d", 3833 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3834 3835 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3836 if (rc == IOCB_ERROR) { 3837 lpfc_els_free_iocb(phba, elsiocb); 3838 lpfc_nlp_put(ndlp); 3839 return -EIO; 3840 } 3841 return 0; 3842 } 3843 3844 /** 3845 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3846 * @vport: pointer to a host virtual N_Port data structure. 3847 * @cmdiocb: pointer to lpfc command iocb data structure. 3848 * @ndlp: pointer to a node-list data structure. 3849 * 3850 * A received RDF implies a possible change to fabric supported diagnostic 3851 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3852 * RDF request to reregister for supported diagnostic functions. 3853 * 3854 * Return code 3855 * 0 - Success 3856 * -EIO - Failed to process received RDF 3857 **/ 3858 static int 3859 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3860 struct lpfc_nodelist *ndlp) 3861 { 3862 /* Send LS_ACC */ 3863 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3864 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3865 "1623 Failed to RDF_ACC from x%x for x%x\n", 3866 ndlp->nlp_DID, vport->fc_myDID); 3867 return -EIO; 3868 } 3869 3870 /* Issue new RDF for reregistering */ 3871 if (lpfc_issue_els_rdf(vport, 0)) { 3872 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3873 "2623 Failed to re register RDF for x%x\n", 3874 vport->fc_myDID); 3875 return -EIO; 3876 } 3877 3878 return 0; 3879 } 3880 3881 /** 3882 * lpfc_least_capable_settings - helper function for EDC rsp processing 3883 * @phba: pointer to lpfc hba data structure. 3884 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3885 * 3886 * This helper routine determines the least capable setting for 3887 * congestion signals, signal freq, including scale, from the 3888 * congestion detection descriptor in the EDC rsp. The routine 3889 * sets @phba values in preparation for a set_featues mailbox. 3890 **/ 3891 static void 3892 lpfc_least_capable_settings(struct lpfc_hba *phba, 3893 struct fc_diag_cg_sig_desc *pcgd) 3894 { 3895 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3896 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3897 3898 /* Get rsp signal and frequency capabilities. */ 3899 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3900 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3901 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3902 3903 /* If the Fport does not support signals. Set FPIN only */ 3904 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3905 goto out_no_support; 3906 3907 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3908 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3909 * to milliSeconds. 3910 */ 3911 switch (rsp_sig_freq_scale) { 3912 case EDC_CG_SIGFREQ_SEC: 3913 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3914 break; 3915 case EDC_CG_SIGFREQ_MSEC: 3916 rsp_sig_freq_cyc = 1; 3917 break; 3918 default: 3919 goto out_no_support; 3920 } 3921 3922 /* Convenient shorthand. */ 3923 drv_sig_cap = phba->cgn_reg_signal; 3924 3925 /* Choose the least capable frequency. */ 3926 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3927 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3928 3929 /* Should be some common signals support. Settle on least capable 3930 * signal and adjust FPIN values. Initialize defaults to ease the 3931 * decision. 3932 */ 3933 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3934 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3935 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3936 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3937 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3938 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3939 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3940 } 3941 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3942 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3943 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3944 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3945 } 3946 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3947 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3948 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3949 } 3950 } 3951 3952 /* We are NOT recording signal frequency in congestion info buffer */ 3953 return; 3954 3955 out_no_support: 3956 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3957 phba->cgn_sig_freq = 0; 3958 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3959 } 3960 3961 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3962 FC_LS_TLV_DTAG_INIT); 3963 3964 /** 3965 * lpfc_cmpl_els_edc - Completion callback function for EDC 3966 * @phba: pointer to lpfc hba data structure. 3967 * @cmdiocb: pointer to lpfc command iocb data structure. 3968 * @rspiocb: pointer to lpfc response iocb data structure. 3969 * 3970 * This routine is the completion callback function for issuing the Exchange 3971 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3972 * notify the FPort of its Congestion and Link Fault capabilities. This 3973 * routine parses the FPort's response and decides on the least common 3974 * values applicable to both FPort and NPort for Warnings and Alarms that 3975 * are communicated via hardware signals. 3976 **/ 3977 static void 3978 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3979 struct lpfc_iocbq *rspiocb) 3980 { 3981 IOCB_t *irsp_iocb; 3982 struct fc_els_edc_resp *edc_rsp; 3983 struct fc_tlv_desc *tlv; 3984 struct fc_diag_cg_sig_desc *pcgd; 3985 struct fc_diag_lnkflt_desc *plnkflt; 3986 struct lpfc_dmabuf *pcmd, *prsp; 3987 const char *dtag_nm; 3988 u32 *pdata, dtag; 3989 int desc_cnt = 0, bytes_remain; 3990 bool rcv_cap_desc = false; 3991 struct lpfc_nodelist *ndlp; 3992 u32 ulp_status, ulp_word4, tmo, did, iotag; 3993 3994 ndlp = cmdiocb->ndlp; 3995 3996 ulp_status = get_job_ulpstatus(phba, rspiocb); 3997 ulp_word4 = get_job_word4(phba, rspiocb); 3998 did = get_job_els_rsp64_did(phba, rspiocb); 3999 4000 if (phba->sli_rev == LPFC_SLI_REV4) { 4001 tmo = get_wqe_tmo(rspiocb); 4002 iotag = get_wqe_reqtag(rspiocb); 4003 } else { 4004 irsp_iocb = &rspiocb->iocb; 4005 tmo = irsp_iocb->ulpTimeout; 4006 iotag = irsp_iocb->ulpIoTag; 4007 } 4008 4009 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4010 "EDC cmpl: status:x%x/x%x did:x%x", 4011 ulp_status, ulp_word4, did); 4012 4013 /* ELS cmd tag <ulpIoTag> completes */ 4014 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4015 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 4016 iotag, ulp_status, ulp_word4, tmo); 4017 4018 pcmd = cmdiocb->cmd_dmabuf; 4019 if (!pcmd) 4020 goto out; 4021 4022 pdata = (u32 *)pcmd->virt; 4023 if (!pdata) 4024 goto out; 4025 4026 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4027 if (ulp_status) 4028 goto out; 4029 4030 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4031 if (!prsp) 4032 goto out; 4033 4034 edc_rsp = prsp->virt; 4035 if (!edc_rsp) 4036 goto out; 4037 4038 /* ELS cmd tag <ulpIoTag> completes */ 4039 lpfc_printf_log(phba, KERN_INFO, 4040 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4041 "4676 Fabric EDC Rsp: " 4042 "0x%02x, 0x%08x\n", 4043 edc_rsp->acc_hdr.la_cmd, 4044 be32_to_cpu(edc_rsp->desc_list_len)); 4045 4046 /* 4047 * Payload length in bytes is the response descriptor list 4048 * length minus the 12 bytes of Link Service Request 4049 * Information descriptor in the reply. 4050 */ 4051 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4052 sizeof(struct fc_els_lsri_desc); 4053 if (bytes_remain <= 0) 4054 goto out; 4055 4056 tlv = edc_rsp->desc; 4057 4058 /* 4059 * cycle through EDC diagnostic descriptors to find the 4060 * congestion signaling capability descriptor 4061 */ 4062 while (bytes_remain) { 4063 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4064 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4065 "6461 Truncated TLV hdr on " 4066 "Diagnostic descriptor[%d]\n", 4067 desc_cnt); 4068 goto out; 4069 } 4070 4071 dtag = be32_to_cpu(tlv->desc_tag); 4072 switch (dtag) { 4073 case ELS_DTAG_LNK_FAULT_CAP: 4074 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4075 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4076 sizeof(struct fc_diag_lnkflt_desc)) { 4077 lpfc_printf_log(phba, KERN_WARNING, 4078 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4079 "6462 Truncated Link Fault Diagnostic " 4080 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4081 desc_cnt, bytes_remain, 4082 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4083 sizeof(struct fc_diag_lnkflt_desc)); 4084 goto out; 4085 } 4086 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4087 lpfc_printf_log(phba, KERN_INFO, 4088 LOG_ELS | LOG_LDS_EVENT, 4089 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4090 "0x%08x 0x%08x 0x%08x\n", 4091 be32_to_cpu(plnkflt->desc_tag), 4092 be32_to_cpu(plnkflt->desc_len), 4093 be32_to_cpu( 4094 plnkflt->degrade_activate_threshold), 4095 be32_to_cpu( 4096 plnkflt->degrade_deactivate_threshold), 4097 be32_to_cpu(plnkflt->fec_degrade_interval)); 4098 break; 4099 case ELS_DTAG_CG_SIGNAL_CAP: 4100 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4101 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4102 sizeof(struct fc_diag_cg_sig_desc)) { 4103 lpfc_printf_log( 4104 phba, KERN_WARNING, LOG_CGN_MGMT, 4105 "6463 Truncated Cgn Signal Diagnostic " 4106 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4107 desc_cnt, bytes_remain, 4108 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4109 sizeof(struct fc_diag_cg_sig_desc)); 4110 goto out; 4111 } 4112 4113 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4114 lpfc_printf_log( 4115 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4116 "4616 CGN Desc Data: 0x%08x 0x%08x " 4117 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4118 be32_to_cpu(pcgd->desc_tag), 4119 be32_to_cpu(pcgd->desc_len), 4120 be32_to_cpu(pcgd->xmt_signal_capability), 4121 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4122 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4123 be32_to_cpu(pcgd->rcv_signal_capability), 4124 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4125 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4126 4127 /* Compare driver and Fport capabilities and choose 4128 * least common. 4129 */ 4130 lpfc_least_capable_settings(phba, pcgd); 4131 rcv_cap_desc = true; 4132 break; 4133 default: 4134 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4135 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4136 "4919 unknown Diagnostic " 4137 "Descriptor[%d]: tag x%x (%s)\n", 4138 desc_cnt, dtag, dtag_nm); 4139 } 4140 4141 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4142 tlv = fc_tlv_next_desc(tlv); 4143 desc_cnt++; 4144 } 4145 4146 out: 4147 if (!rcv_cap_desc) { 4148 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4149 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4150 phba->cgn_sig_freq = 0; 4151 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4152 "4202 EDC rsp error - sending RDF " 4153 "for FPIN only.\n"); 4154 } 4155 4156 lpfc_config_cgn_signal(phba); 4157 4158 /* Check to see if link went down during discovery */ 4159 lpfc_els_chk_latt(phba->pport); 4160 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4161 "EDC Cmpl: did:x%x refcnt %d", 4162 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4163 lpfc_els_free_iocb(phba, cmdiocb); 4164 lpfc_nlp_put(ndlp); 4165 } 4166 4167 static void 4168 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4169 { 4170 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4171 4172 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4173 lft->desc_len = cpu_to_be32( 4174 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4175 4176 lft->degrade_activate_threshold = 4177 cpu_to_be32(phba->degrade_activate_threshold); 4178 lft->degrade_deactivate_threshold = 4179 cpu_to_be32(phba->degrade_deactivate_threshold); 4180 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4181 } 4182 4183 static void 4184 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4185 { 4186 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4187 4188 /* We are assuming cgd was zero'ed before calling this routine */ 4189 4190 /* Configure the congestion detection capability */ 4191 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4192 4193 /* Descriptor len doesn't include the tag or len fields. */ 4194 cgd->desc_len = cpu_to_be32( 4195 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4196 4197 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4198 * xmt_signal_frequency.count already set to 0. 4199 * xmt_signal_frequency.units already set to 0. 4200 */ 4201 4202 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4203 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4204 * rcv_signal_frequency.count already set to 0. 4205 * rcv_signal_frequency.units already set to 0. 4206 */ 4207 phba->cgn_sig_freq = 0; 4208 return; 4209 } 4210 switch (phba->cgn_reg_signal) { 4211 case EDC_CG_SIG_WARN_ONLY: 4212 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4213 break; 4214 case EDC_CG_SIG_WARN_ALARM: 4215 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4216 break; 4217 default: 4218 /* rcv_signal_capability left 0 thus no support */ 4219 break; 4220 } 4221 4222 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4223 * the completion we settle on the higher frequency. 4224 */ 4225 cgd->rcv_signal_frequency.count = 4226 cpu_to_be16(lpfc_fabric_cgn_frequency); 4227 cgd->rcv_signal_frequency.units = 4228 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4229 } 4230 4231 static bool 4232 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4233 { 4234 if (!(phba->lmt & LMT_64Gb)) 4235 return false; 4236 if (phba->sli_rev != LPFC_SLI_REV4) 4237 return false; 4238 4239 if (phba->sli4_hba.conf_trunk) { 4240 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4241 return true; 4242 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4243 return true; 4244 } 4245 return false; 4246 } 4247 4248 /** 4249 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4250 * @vport: pointer to a host virtual N_Port data structure. 4251 * @retry: retry counter for the command iocb. 4252 * 4253 * This routine issues an ELS EDC to the F-Port Controller to communicate 4254 * this N_Port's support of hardware signals in its Congestion 4255 * Capabilities Descriptor. 4256 * 4257 * Note: This routine does not check if one or more signals are 4258 * set in the cgn_reg_signal parameter. The caller makes the 4259 * decision to enforce cgn_reg_signal as nonzero or zero depending 4260 * on the conditions. During Fabric requests, the driver 4261 * requires cgn_reg_signals to be nonzero. But a dynamic request 4262 * to set the congestion mode to OFF from Monitor or Manage 4263 * would correctly issue an EDC with no signals enabled to 4264 * turn off switch functionality and then update the FW. 4265 * 4266 * Return code 4267 * 0 - Successfully issued edc command 4268 * 1 - Failed to issue edc command 4269 **/ 4270 int 4271 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4272 { 4273 struct lpfc_hba *phba = vport->phba; 4274 struct lpfc_iocbq *elsiocb; 4275 struct fc_els_edc *edc_req; 4276 struct fc_tlv_desc *tlv; 4277 u16 cmdsize; 4278 struct lpfc_nodelist *ndlp; 4279 u8 *pcmd = NULL; 4280 u32 cgn_desc_size, lft_desc_size; 4281 int rc; 4282 4283 if (vport->port_type == LPFC_NPIV_PORT) 4284 return -EACCES; 4285 4286 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4287 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4288 return -ENODEV; 4289 4290 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4291 sizeof(struct fc_diag_cg_sig_desc) : 0; 4292 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4293 sizeof(struct fc_diag_lnkflt_desc) : 0; 4294 cmdsize = cgn_desc_size + lft_desc_size; 4295 4296 /* Skip EDC if no applicable descriptors */ 4297 if (!cmdsize) 4298 goto try_rdf; 4299 4300 cmdsize += sizeof(struct fc_els_edc); 4301 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4302 ndlp->nlp_DID, ELS_CMD_EDC); 4303 if (!elsiocb) 4304 goto try_rdf; 4305 4306 /* Configure the payload for the supported Diagnostics capabilities. */ 4307 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4308 memset(pcmd, 0, cmdsize); 4309 edc_req = (struct fc_els_edc *)pcmd; 4310 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4311 edc_req->edc_cmd = ELS_EDC; 4312 tlv = edc_req->desc; 4313 4314 if (cgn_desc_size) { 4315 lpfc_format_edc_cgn_desc(phba, tlv); 4316 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4317 tlv = fc_tlv_next_desc(tlv); 4318 } 4319 4320 if (lft_desc_size) 4321 lpfc_format_edc_lft_desc(phba, tlv); 4322 4323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4324 "4623 Xmit EDC to remote " 4325 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4326 ndlp->nlp_DID, phba->cgn_reg_signal, 4327 phba->cgn_reg_fpin); 4328 4329 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4330 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4331 if (!elsiocb->ndlp) { 4332 lpfc_els_free_iocb(phba, elsiocb); 4333 return -EIO; 4334 } 4335 4336 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4337 "Issue EDC: did:x%x refcnt %d", 4338 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4339 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4340 if (rc == IOCB_ERROR) { 4341 /* The additional lpfc_nlp_put will cause the following 4342 * lpfc_els_free_iocb routine to trigger the rlease of 4343 * the node. 4344 */ 4345 lpfc_els_free_iocb(phba, elsiocb); 4346 lpfc_nlp_put(ndlp); 4347 goto try_rdf; 4348 } 4349 return 0; 4350 try_rdf: 4351 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4352 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4353 rc = lpfc_issue_els_rdf(vport, 0); 4354 return rc; 4355 } 4356 4357 /** 4358 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4359 * @vport: pointer to a host virtual N_Port data structure. 4360 * @nlp: pointer to a node-list data structure. 4361 * 4362 * This routine cancels the timer with a delayed IOCB-command retry for 4363 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4364 * removes the ELS retry event if it presents. In addition, if the 4365 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4366 * commands are sent for the @vport's nodes that require issuing discovery 4367 * ADISC. 4368 **/ 4369 void 4370 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4371 { 4372 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4373 struct lpfc_work_evt *evtp; 4374 4375 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4376 return; 4377 spin_lock_irq(&nlp->lock); 4378 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4379 spin_unlock_irq(&nlp->lock); 4380 del_timer_sync(&nlp->nlp_delayfunc); 4381 nlp->nlp_last_elscmd = 0; 4382 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4383 list_del_init(&nlp->els_retry_evt.evt_listp); 4384 /* Decrement nlp reference count held for the delayed retry */ 4385 evtp = &nlp->els_retry_evt; 4386 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4387 } 4388 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4389 spin_lock_irq(&nlp->lock); 4390 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4391 spin_unlock_irq(&nlp->lock); 4392 if (vport->num_disc_nodes) { 4393 if (vport->port_state < LPFC_VPORT_READY) { 4394 /* Check if there are more ADISCs to be sent */ 4395 lpfc_more_adisc(vport); 4396 } else { 4397 /* Check if there are more PLOGIs to be sent */ 4398 lpfc_more_plogi(vport); 4399 if (vport->num_disc_nodes == 0) { 4400 spin_lock_irq(shost->host_lock); 4401 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4402 spin_unlock_irq(shost->host_lock); 4403 lpfc_can_disctmo(vport); 4404 lpfc_end_rscn(vport); 4405 } 4406 } 4407 } 4408 } 4409 return; 4410 } 4411 4412 /** 4413 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4414 * @t: pointer to the timer function associated data (ndlp). 4415 * 4416 * This routine is invoked by the ndlp delayed-function timer to check 4417 * whether there is any pending ELS retry event(s) with the node. If not, it 4418 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4419 * adds the delayed events to the HBA work list and invokes the 4420 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4421 * event. Note that lpfc_nlp_get() is called before posting the event to 4422 * the work list to hold reference count of ndlp so that it guarantees the 4423 * reference to ndlp will still be available when the worker thread gets 4424 * to the event associated with the ndlp. 4425 **/ 4426 void 4427 lpfc_els_retry_delay(struct timer_list *t) 4428 { 4429 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4430 struct lpfc_vport *vport = ndlp->vport; 4431 struct lpfc_hba *phba = vport->phba; 4432 unsigned long flags; 4433 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4434 4435 /* Hold a node reference for outstanding queued work */ 4436 if (!lpfc_nlp_get(ndlp)) 4437 return; 4438 4439 spin_lock_irqsave(&phba->hbalock, flags); 4440 if (!list_empty(&evtp->evt_listp)) { 4441 spin_unlock_irqrestore(&phba->hbalock, flags); 4442 lpfc_nlp_put(ndlp); 4443 return; 4444 } 4445 4446 evtp->evt_arg1 = ndlp; 4447 evtp->evt = LPFC_EVT_ELS_RETRY; 4448 list_add_tail(&evtp->evt_listp, &phba->work_list); 4449 spin_unlock_irqrestore(&phba->hbalock, flags); 4450 4451 lpfc_worker_wake_up(phba); 4452 } 4453 4454 /** 4455 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4456 * @ndlp: pointer to a node-list data structure. 4457 * 4458 * This routine is the worker-thread handler for processing the @ndlp delayed 4459 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4460 * the last ELS command from the associated ndlp and invokes the proper ELS 4461 * function according to the delayed ELS command to retry the command. 4462 **/ 4463 void 4464 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4465 { 4466 struct lpfc_vport *vport = ndlp->vport; 4467 uint32_t cmd, retry; 4468 4469 spin_lock_irq(&ndlp->lock); 4470 cmd = ndlp->nlp_last_elscmd; 4471 ndlp->nlp_last_elscmd = 0; 4472 4473 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4474 spin_unlock_irq(&ndlp->lock); 4475 return; 4476 } 4477 4478 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4479 spin_unlock_irq(&ndlp->lock); 4480 /* 4481 * If a discovery event readded nlp_delayfunc after timer 4482 * firing and before processing the timer, cancel the 4483 * nlp_delayfunc. 4484 */ 4485 del_timer_sync(&ndlp->nlp_delayfunc); 4486 retry = ndlp->nlp_retry; 4487 ndlp->nlp_retry = 0; 4488 4489 switch (cmd) { 4490 case ELS_CMD_FLOGI: 4491 lpfc_issue_els_flogi(vport, ndlp, retry); 4492 break; 4493 case ELS_CMD_PLOGI: 4494 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4495 ndlp->nlp_prev_state = ndlp->nlp_state; 4496 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4497 } 4498 break; 4499 case ELS_CMD_ADISC: 4500 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4501 ndlp->nlp_prev_state = ndlp->nlp_state; 4502 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4503 } 4504 break; 4505 case ELS_CMD_PRLI: 4506 case ELS_CMD_NVMEPRLI: 4507 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4508 ndlp->nlp_prev_state = ndlp->nlp_state; 4509 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4510 } 4511 break; 4512 case ELS_CMD_LOGO: 4513 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4514 ndlp->nlp_prev_state = ndlp->nlp_state; 4515 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4516 } 4517 break; 4518 case ELS_CMD_FDISC: 4519 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4520 lpfc_issue_els_fdisc(vport, ndlp, retry); 4521 break; 4522 } 4523 return; 4524 } 4525 4526 /** 4527 * lpfc_link_reset - Issue link reset 4528 * @vport: pointer to a virtual N_Port data structure. 4529 * 4530 * This routine performs link reset by sending INIT_LINK mailbox command. 4531 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4532 * INIT_LINK mailbox command. 4533 * 4534 * Return code 4535 * 0 - Link reset initiated successfully 4536 * 1 - Failed to initiate link reset 4537 **/ 4538 int 4539 lpfc_link_reset(struct lpfc_vport *vport) 4540 { 4541 struct lpfc_hba *phba = vport->phba; 4542 LPFC_MBOXQ_t *mbox; 4543 uint32_t control; 4544 int rc; 4545 4546 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4547 "2851 Attempt link reset\n"); 4548 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4549 if (!mbox) { 4550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4551 "2852 Failed to allocate mbox memory"); 4552 return 1; 4553 } 4554 4555 /* Enable Link attention interrupts */ 4556 if (phba->sli_rev <= LPFC_SLI_REV3) { 4557 spin_lock_irq(&phba->hbalock); 4558 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4559 control = readl(phba->HCregaddr); 4560 control |= HC_LAINT_ENA; 4561 writel(control, phba->HCregaddr); 4562 readl(phba->HCregaddr); /* flush */ 4563 spin_unlock_irq(&phba->hbalock); 4564 } 4565 4566 lpfc_init_link(phba, mbox, phba->cfg_topology, 4567 phba->cfg_link_speed); 4568 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4569 mbox->vport = vport; 4570 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4571 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4572 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4573 "2853 Failed to issue INIT_LINK " 4574 "mbox command, rc:x%x\n", rc); 4575 mempool_free(mbox, phba->mbox_mem_pool); 4576 return 1; 4577 } 4578 4579 return 0; 4580 } 4581 4582 /** 4583 * lpfc_els_retry - Make retry decision on an els command iocb 4584 * @phba: pointer to lpfc hba data structure. 4585 * @cmdiocb: pointer to lpfc command iocb data structure. 4586 * @rspiocb: pointer to lpfc response iocb data structure. 4587 * 4588 * This routine makes a retry decision on an ELS command IOCB, which has 4589 * failed. The following ELS IOCBs use this function for retrying the command 4590 * when previously issued command responsed with error status: FLOGI, PLOGI, 4591 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4592 * returned error status, it makes the decision whether a retry shall be 4593 * issued for the command, and whether a retry shall be made immediately or 4594 * delayed. In the former case, the corresponding ELS command issuing-function 4595 * is called to retry the command. In the later case, the ELS command shall 4596 * be posted to the ndlp delayed event and delayed function timer set to the 4597 * ndlp for the delayed command issusing. 4598 * 4599 * Return code 4600 * 0 - No retry of els command is made 4601 * 1 - Immediate or delayed retry of els command is made 4602 **/ 4603 static int 4604 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4605 struct lpfc_iocbq *rspiocb) 4606 { 4607 struct lpfc_vport *vport = cmdiocb->vport; 4608 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4609 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4610 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4611 uint32_t *elscmd; 4612 struct ls_rjt stat; 4613 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4614 int logerr = 0; 4615 uint32_t cmd = 0; 4616 uint32_t did; 4617 int link_reset = 0, rc; 4618 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4619 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4620 4621 4622 /* Note: cmd_dmabuf may be 0 for internal driver abort 4623 * of delays ELS command. 4624 */ 4625 4626 if (pcmd && pcmd->virt) { 4627 elscmd = (uint32_t *) (pcmd->virt); 4628 cmd = *elscmd++; 4629 } 4630 4631 if (ndlp) 4632 did = ndlp->nlp_DID; 4633 else { 4634 /* We should only hit this case for retrying PLOGI */ 4635 did = get_job_els_rsp64_did(phba, rspiocb); 4636 ndlp = lpfc_findnode_did(vport, did); 4637 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4638 return 0; 4639 } 4640 4641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4642 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4643 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4644 4645 switch (ulp_status) { 4646 case IOSTAT_FCP_RSP_ERROR: 4647 break; 4648 case IOSTAT_REMOTE_STOP: 4649 if (phba->sli_rev == LPFC_SLI_REV4) { 4650 /* This IO was aborted by the target, we don't 4651 * know the rxid and because we did not send the 4652 * ABTS we cannot generate and RRQ. 4653 */ 4654 lpfc_set_rrq_active(phba, ndlp, 4655 cmdiocb->sli4_lxritag, 0, 0); 4656 } 4657 break; 4658 case IOSTAT_LOCAL_REJECT: 4659 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4660 case IOERR_LOOP_OPEN_FAILURE: 4661 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4662 delay = 1000; 4663 retry = 1; 4664 break; 4665 4666 case IOERR_ILLEGAL_COMMAND: 4667 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4668 "0124 Retry illegal cmd x%x " 4669 "retry:x%x delay:x%x\n", 4670 cmd, cmdiocb->retry, delay); 4671 retry = 1; 4672 /* All command's retry policy */ 4673 maxretry = 8; 4674 if (cmdiocb->retry > 2) 4675 delay = 1000; 4676 break; 4677 4678 case IOERR_NO_RESOURCES: 4679 logerr = 1; /* HBA out of resources */ 4680 retry = 1; 4681 if (cmdiocb->retry > 100) 4682 delay = 100; 4683 maxretry = 250; 4684 break; 4685 4686 case IOERR_ILLEGAL_FRAME: 4687 delay = 100; 4688 retry = 1; 4689 break; 4690 4691 case IOERR_INVALID_RPI: 4692 if (cmd == ELS_CMD_PLOGI && 4693 did == NameServer_DID) { 4694 /* Continue forever if plogi to */ 4695 /* the nameserver fails */ 4696 maxretry = 0; 4697 delay = 100; 4698 } else if (cmd == ELS_CMD_PRLI && 4699 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4700 /* State-command disagreement. The PRLI was 4701 * failed with an invalid rpi meaning there 4702 * some unexpected state change. Don't retry. 4703 */ 4704 maxretry = 0; 4705 retry = 0; 4706 break; 4707 } 4708 retry = 1; 4709 break; 4710 4711 case IOERR_SEQUENCE_TIMEOUT: 4712 if (cmd == ELS_CMD_PLOGI && 4713 did == NameServer_DID && 4714 (cmdiocb->retry + 1) == maxretry) { 4715 /* Reset the Link */ 4716 link_reset = 1; 4717 break; 4718 } 4719 retry = 1; 4720 delay = 100; 4721 break; 4722 case IOERR_SLI_ABORTED: 4723 /* Retry ELS PLOGI command? 4724 * Possibly the rport just wasn't ready. 4725 */ 4726 if (cmd == ELS_CMD_PLOGI) { 4727 /* No retry if state change */ 4728 if (ndlp && 4729 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4730 goto out_retry; 4731 retry = 1; 4732 maxretry = 2; 4733 } 4734 break; 4735 } 4736 break; 4737 4738 case IOSTAT_NPORT_RJT: 4739 case IOSTAT_FABRIC_RJT: 4740 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4741 retry = 1; 4742 break; 4743 } 4744 break; 4745 4746 case IOSTAT_NPORT_BSY: 4747 case IOSTAT_FABRIC_BSY: 4748 logerr = 1; /* Fabric / Remote NPort out of resources */ 4749 retry = 1; 4750 break; 4751 4752 case IOSTAT_LS_RJT: 4753 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4754 /* Added for Vendor specifc support 4755 * Just keep retrying for these Rsn / Exp codes 4756 */ 4757 if ((vport->fc_flag & FC_PT2PT) && 4758 cmd == ELS_CMD_NVMEPRLI) { 4759 switch (stat.un.b.lsRjtRsnCode) { 4760 case LSRJT_UNABLE_TPC: 4761 case LSRJT_INVALID_CMD: 4762 case LSRJT_LOGICAL_ERR: 4763 case LSRJT_CMD_UNSUPPORTED: 4764 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4765 "0168 NVME PRLI LS_RJT " 4766 "reason %x port doesn't " 4767 "support NVME, disabling NVME\n", 4768 stat.un.b.lsRjtRsnCode); 4769 retry = 0; 4770 vport->fc_flag |= FC_PT2PT_NO_NVME; 4771 goto out_retry; 4772 } 4773 } 4774 switch (stat.un.b.lsRjtRsnCode) { 4775 case LSRJT_UNABLE_TPC: 4776 /* Special case for PRLI LS_RJTs. Recall that lpfc 4777 * uses a single routine to issue both PRLI FC4 types. 4778 * If the PRLI is rejected because that FC4 type 4779 * isn't really supported, don't retry and cause 4780 * multiple transport registrations. Otherwise, parse 4781 * the reason code/reason code explanation and take the 4782 * appropriate action. 4783 */ 4784 lpfc_printf_vlog(vport, KERN_INFO, 4785 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4786 "0153 ELS cmd x%x LS_RJT by x%x. " 4787 "RsnCode x%x RsnCodeExp x%x\n", 4788 cmd, did, stat.un.b.lsRjtRsnCode, 4789 stat.un.b.lsRjtRsnCodeExp); 4790 4791 switch (stat.un.b.lsRjtRsnCodeExp) { 4792 case LSEXP_CANT_GIVE_DATA: 4793 case LSEXP_CMD_IN_PROGRESS: 4794 if (cmd == ELS_CMD_PLOGI) { 4795 delay = 1000; 4796 maxretry = 48; 4797 } 4798 retry = 1; 4799 break; 4800 case LSEXP_REQ_UNSUPPORTED: 4801 case LSEXP_NO_RSRC_ASSIGN: 4802 /* These explanation codes get no retry. */ 4803 if (cmd == ELS_CMD_PRLI || 4804 cmd == ELS_CMD_NVMEPRLI) 4805 break; 4806 fallthrough; 4807 default: 4808 /* Limit the delay and retry action to a limited 4809 * cmd set. There are other ELS commands where 4810 * a retry is not expected. 4811 */ 4812 if (cmd == ELS_CMD_PLOGI || 4813 cmd == ELS_CMD_PRLI || 4814 cmd == ELS_CMD_NVMEPRLI) { 4815 delay = 1000; 4816 maxretry = lpfc_max_els_tries + 1; 4817 retry = 1; 4818 } 4819 break; 4820 } 4821 4822 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4823 (cmd == ELS_CMD_FDISC) && 4824 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4825 lpfc_printf_vlog(vport, KERN_ERR, 4826 LOG_TRACE_EVENT, 4827 "0125 FDISC Failed (x%x). " 4828 "Fabric out of resources\n", 4829 stat.un.lsRjtError); 4830 lpfc_vport_set_state(vport, 4831 FC_VPORT_NO_FABRIC_RSCS); 4832 } 4833 break; 4834 4835 case LSRJT_LOGICAL_BSY: 4836 if ((cmd == ELS_CMD_PLOGI) || 4837 (cmd == ELS_CMD_PRLI) || 4838 (cmd == ELS_CMD_NVMEPRLI)) { 4839 delay = 1000; 4840 maxretry = 48; 4841 } else if (cmd == ELS_CMD_FDISC) { 4842 /* FDISC retry policy */ 4843 maxretry = 48; 4844 if (cmdiocb->retry >= 32) 4845 delay = 1000; 4846 } 4847 retry = 1; 4848 break; 4849 4850 case LSRJT_LOGICAL_ERR: 4851 /* There are some cases where switches return this 4852 * error when they are not ready and should be returning 4853 * Logical Busy. We should delay every time. 4854 */ 4855 if (cmd == ELS_CMD_FDISC && 4856 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4857 maxretry = 3; 4858 delay = 1000; 4859 retry = 1; 4860 } else if (cmd == ELS_CMD_FLOGI && 4861 stat.un.b.lsRjtRsnCodeExp == 4862 LSEXP_NOTHING_MORE) { 4863 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4864 retry = 1; 4865 lpfc_printf_vlog(vport, KERN_ERR, 4866 LOG_TRACE_EVENT, 4867 "0820 FLOGI Failed (x%x). " 4868 "BBCredit Not Supported\n", 4869 stat.un.lsRjtError); 4870 } 4871 break; 4872 4873 case LSRJT_PROTOCOL_ERR: 4874 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4875 (cmd == ELS_CMD_FDISC) && 4876 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4877 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4878 ) { 4879 lpfc_printf_vlog(vport, KERN_ERR, 4880 LOG_TRACE_EVENT, 4881 "0122 FDISC Failed (x%x). " 4882 "Fabric Detected Bad WWN\n", 4883 stat.un.lsRjtError); 4884 lpfc_vport_set_state(vport, 4885 FC_VPORT_FABRIC_REJ_WWN); 4886 } 4887 break; 4888 case LSRJT_VENDOR_UNIQUE: 4889 if ((stat.un.b.vendorUnique == 0x45) && 4890 (cmd == ELS_CMD_FLOGI)) { 4891 goto out_retry; 4892 } 4893 break; 4894 case LSRJT_CMD_UNSUPPORTED: 4895 /* lpfc nvmet returns this type of LS_RJT when it 4896 * receives an FCP PRLI because lpfc nvmet only 4897 * support NVME. ELS request is terminated for FCP4 4898 * on this rport. 4899 */ 4900 if (stat.un.b.lsRjtRsnCodeExp == 4901 LSEXP_REQ_UNSUPPORTED) { 4902 if (cmd == ELS_CMD_PRLI) 4903 goto out_retry; 4904 } 4905 break; 4906 } 4907 break; 4908 4909 case IOSTAT_INTERMED_RSP: 4910 case IOSTAT_BA_RJT: 4911 break; 4912 4913 default: 4914 break; 4915 } 4916 4917 if (link_reset) { 4918 rc = lpfc_link_reset(vport); 4919 if (rc) { 4920 /* Do not give up. Retry PLOGI one more time and attempt 4921 * link reset if PLOGI fails again. 4922 */ 4923 retry = 1; 4924 delay = 100; 4925 goto out_retry; 4926 } 4927 return 1; 4928 } 4929 4930 if (did == FDMI_DID) 4931 retry = 1; 4932 4933 if ((cmd == ELS_CMD_FLOGI) && 4934 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4935 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4936 /* FLOGI retry policy */ 4937 retry = 1; 4938 /* retry FLOGI forever */ 4939 if (phba->link_flag != LS_LOOPBACK_MODE) 4940 maxretry = 0; 4941 else 4942 maxretry = 2; 4943 4944 if (cmdiocb->retry >= 100) 4945 delay = 5000; 4946 else if (cmdiocb->retry >= 32) 4947 delay = 1000; 4948 } else if ((cmd == ELS_CMD_FDISC) && 4949 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4950 /* retry FDISCs every second up to devloss */ 4951 retry = 1; 4952 maxretry = vport->cfg_devloss_tmo; 4953 delay = 1000; 4954 } 4955 4956 cmdiocb->retry++; 4957 if (maxretry && (cmdiocb->retry >= maxretry)) { 4958 phba->fc_stat.elsRetryExceeded++; 4959 retry = 0; 4960 } 4961 4962 if ((vport->load_flag & FC_UNLOADING) != 0) 4963 retry = 0; 4964 4965 out_retry: 4966 if (retry) { 4967 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4968 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4969 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4970 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4971 "2849 Stop retry ELS command " 4972 "x%x to remote NPORT x%x, " 4973 "Data: x%x x%x\n", cmd, did, 4974 cmdiocb->retry, delay); 4975 return 0; 4976 } 4977 } 4978 4979 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4980 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4981 "0107 Retry ELS command x%x to remote " 4982 "NPORT x%x Data: x%x x%x\n", 4983 cmd, did, cmdiocb->retry, delay); 4984 4985 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4986 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4987 ((ulp_word4 & IOERR_PARAM_MASK) != 4988 IOERR_NO_RESOURCES))) { 4989 /* Don't reset timer for no resources */ 4990 4991 /* If discovery / RSCN timer is running, reset it */ 4992 if (timer_pending(&vport->fc_disctmo) || 4993 (vport->fc_flag & FC_RSCN_MODE)) 4994 lpfc_set_disctmo(vport); 4995 } 4996 4997 phba->fc_stat.elsXmitRetry++; 4998 if (ndlp && delay) { 4999 phba->fc_stat.elsDelayRetry++; 5000 ndlp->nlp_retry = cmdiocb->retry; 5001 5002 /* delay is specified in milliseconds */ 5003 mod_timer(&ndlp->nlp_delayfunc, 5004 jiffies + msecs_to_jiffies(delay)); 5005 spin_lock_irq(&ndlp->lock); 5006 ndlp->nlp_flag |= NLP_DELAY_TMO; 5007 spin_unlock_irq(&ndlp->lock); 5008 5009 ndlp->nlp_prev_state = ndlp->nlp_state; 5010 if ((cmd == ELS_CMD_PRLI) || 5011 (cmd == ELS_CMD_NVMEPRLI)) 5012 lpfc_nlp_set_state(vport, ndlp, 5013 NLP_STE_PRLI_ISSUE); 5014 else if (cmd != ELS_CMD_ADISC) 5015 lpfc_nlp_set_state(vport, ndlp, 5016 NLP_STE_NPR_NODE); 5017 ndlp->nlp_last_elscmd = cmd; 5018 5019 return 1; 5020 } 5021 switch (cmd) { 5022 case ELS_CMD_FLOGI: 5023 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 5024 return 1; 5025 case ELS_CMD_FDISC: 5026 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 5027 return 1; 5028 case ELS_CMD_PLOGI: 5029 if (ndlp) { 5030 ndlp->nlp_prev_state = ndlp->nlp_state; 5031 lpfc_nlp_set_state(vport, ndlp, 5032 NLP_STE_PLOGI_ISSUE); 5033 } 5034 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5035 return 1; 5036 case ELS_CMD_ADISC: 5037 ndlp->nlp_prev_state = ndlp->nlp_state; 5038 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5039 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5040 return 1; 5041 case ELS_CMD_PRLI: 5042 case ELS_CMD_NVMEPRLI: 5043 ndlp->nlp_prev_state = ndlp->nlp_state; 5044 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5045 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5046 return 1; 5047 case ELS_CMD_LOGO: 5048 ndlp->nlp_prev_state = ndlp->nlp_state; 5049 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5050 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5051 return 1; 5052 } 5053 } 5054 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5055 if (logerr) { 5056 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5057 "0137 No retry ELS command x%x to remote " 5058 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 5059 cmd, did, ulp_status, 5060 ulp_word4); 5061 } 5062 else { 5063 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5064 "0108 No retry ELS command x%x to remote " 5065 "NPORT x%x Retried:%d Error:x%x/%x\n", 5066 cmd, did, cmdiocb->retry, ulp_status, 5067 ulp_word4); 5068 } 5069 return 0; 5070 } 5071 5072 /** 5073 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5074 * @phba: pointer to lpfc hba data structure. 5075 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5076 * 5077 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5078 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5079 * checks to see whether there is a lpfc DMA buffer associated with the 5080 * response of the command IOCB. If so, it will be released before releasing 5081 * the lpfc DMA buffer associated with the IOCB itself. 5082 * 5083 * Return code 5084 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5085 **/ 5086 static int 5087 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5088 { 5089 struct lpfc_dmabuf *buf_ptr; 5090 5091 /* Free the response before processing the command. */ 5092 if (!list_empty(&buf_ptr1->list)) { 5093 list_remove_head(&buf_ptr1->list, buf_ptr, 5094 struct lpfc_dmabuf, 5095 list); 5096 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5097 kfree(buf_ptr); 5098 } 5099 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5100 kfree(buf_ptr1); 5101 return 0; 5102 } 5103 5104 /** 5105 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5106 * @phba: pointer to lpfc hba data structure. 5107 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5108 * 5109 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5110 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5111 * pool. 5112 * 5113 * Return code 5114 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5115 **/ 5116 static int 5117 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5118 { 5119 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5120 kfree(buf_ptr); 5121 return 0; 5122 } 5123 5124 /** 5125 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5126 * @phba: pointer to lpfc hba data structure. 5127 * @elsiocb: pointer to lpfc els command iocb data structure. 5128 * 5129 * This routine frees a command IOCB and its associated resources. The 5130 * command IOCB data structure contains the reference to various associated 5131 * resources, these fields must be set to NULL if the associated reference 5132 * not present: 5133 * cmd_dmabuf - reference to cmd. 5134 * cmd_dmabuf->next - reference to rsp 5135 * rsp_dmabuf - unused 5136 * bpl_dmabuf - reference to bpl 5137 * 5138 * It first properly decrements the reference count held on ndlp for the 5139 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5140 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5141 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5142 * adds the DMA buffer the @phba data structure for the delayed release. 5143 * If reference to the Buffer Pointer List (BPL) is present, the 5144 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5145 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5146 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5147 * 5148 * Return code 5149 * 0 - Success (currently, always return 0) 5150 **/ 5151 int 5152 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5153 { 5154 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5155 5156 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5157 elsiocb->ndlp = NULL; 5158 5159 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5160 if (elsiocb->cmd_dmabuf) { 5161 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5162 /* Firmware could still be in progress of DMAing 5163 * payload, so don't free data buffer till after 5164 * a hbeat. 5165 */ 5166 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5167 buf_ptr = elsiocb->cmd_dmabuf; 5168 elsiocb->cmd_dmabuf = NULL; 5169 if (buf_ptr) { 5170 buf_ptr1 = NULL; 5171 spin_lock_irq(&phba->hbalock); 5172 if (!list_empty(&buf_ptr->list)) { 5173 list_remove_head(&buf_ptr->list, 5174 buf_ptr1, struct lpfc_dmabuf, 5175 list); 5176 INIT_LIST_HEAD(&buf_ptr1->list); 5177 list_add_tail(&buf_ptr1->list, 5178 &phba->elsbuf); 5179 phba->elsbuf_cnt++; 5180 } 5181 INIT_LIST_HEAD(&buf_ptr->list); 5182 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5183 phba->elsbuf_cnt++; 5184 spin_unlock_irq(&phba->hbalock); 5185 } 5186 } else { 5187 buf_ptr1 = elsiocb->cmd_dmabuf; 5188 lpfc_els_free_data(phba, buf_ptr1); 5189 elsiocb->cmd_dmabuf = NULL; 5190 } 5191 } 5192 5193 if (elsiocb->bpl_dmabuf) { 5194 buf_ptr = elsiocb->bpl_dmabuf; 5195 lpfc_els_free_bpl(phba, buf_ptr); 5196 elsiocb->bpl_dmabuf = NULL; 5197 } 5198 lpfc_sli_release_iocbq(phba, elsiocb); 5199 return 0; 5200 } 5201 5202 /** 5203 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5204 * @phba: pointer to lpfc hba data structure. 5205 * @cmdiocb: pointer to lpfc command iocb data structure. 5206 * @rspiocb: pointer to lpfc response iocb data structure. 5207 * 5208 * This routine is the completion callback function to the Logout (LOGO) 5209 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5210 * the completion of the LOGO process. If the node has transitioned to NPR, 5211 * this routine unregisters the RPI if it is still registered. The 5212 * lpfc_els_free_iocb() is invoked to release the IOCB data structure. 5213 **/ 5214 static void 5215 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5216 struct lpfc_iocbq *rspiocb) 5217 { 5218 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5219 struct lpfc_vport *vport = cmdiocb->vport; 5220 u32 ulp_status, ulp_word4; 5221 5222 ulp_status = get_job_ulpstatus(phba, rspiocb); 5223 ulp_word4 = get_job_word4(phba, rspiocb); 5224 5225 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5226 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5227 ulp_status, ulp_word4, ndlp->nlp_DID); 5228 /* ACC to LOGO completes to NPort <nlp_DID> */ 5229 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5230 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5231 "last els x%x Data: x%x x%x x%x\n", 5232 ndlp->nlp_DID, kref_read(&ndlp->kref), 5233 ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, 5234 ndlp->nlp_rpi); 5235 5236 /* This clause allows the LOGO ACC to complete and free resources 5237 * for the Fabric Domain Controller. It does deliberately skip 5238 * the unreg_rpi and release rpi because some fabrics send RDP 5239 * requests after logging out from the initiator. 5240 */ 5241 if (ndlp->nlp_type & NLP_FABRIC && 5242 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5243 goto out; 5244 5245 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5246 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) 5247 lpfc_unreg_rpi(vport, ndlp); 5248 5249 /* If came from PRLO, then PRLO_ACC is done. 5250 * Start rediscovery now. 5251 */ 5252 if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { 5253 spin_lock_irq(&ndlp->lock); 5254 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5255 spin_unlock_irq(&ndlp->lock); 5256 ndlp->nlp_prev_state = ndlp->nlp_state; 5257 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5258 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5259 } 5260 } 5261 5262 out: 5263 /* 5264 * The driver received a LOGO from the rport and has ACK'd it. 5265 * At this point, the driver is done so release the IOCB 5266 */ 5267 lpfc_els_free_iocb(phba, cmdiocb); 5268 lpfc_nlp_put(ndlp); 5269 } 5270 5271 /** 5272 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5273 * @phba: pointer to lpfc hba data structure. 5274 * @pmb: pointer to the driver internal queue element for mailbox command. 5275 * 5276 * This routine is the completion callback function for unregister default 5277 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5278 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5279 * decrements the ndlp reference count held for this completion callback 5280 * function. After that, it invokes the lpfc_drop_node to check 5281 * whether it is appropriate to release the node. 5282 **/ 5283 void 5284 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5285 { 5286 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5287 u32 mbx_flag = pmb->mbox_flag; 5288 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5289 5290 if (ndlp) { 5291 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5292 "0006 rpi x%x DID:%x flg:%x %d x%px " 5293 "mbx_cmd x%x mbx_flag x%x x%px\n", 5294 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5295 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5296 mbx_flag, pmb); 5297 5298 /* This ends the default/temporary RPI cleanup logic for this 5299 * ndlp and the node and rpi needs to be released. Free the rpi 5300 * first on an UNREG_LOGIN and then release the final 5301 * references. 5302 */ 5303 spin_lock_irq(&ndlp->lock); 5304 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5305 if (mbx_cmd == MBX_UNREG_LOGIN) 5306 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5307 spin_unlock_irq(&ndlp->lock); 5308 lpfc_nlp_put(ndlp); 5309 lpfc_drop_node(ndlp->vport, ndlp); 5310 } 5311 5312 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5313 } 5314 5315 /** 5316 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5317 * @phba: pointer to lpfc hba data structure. 5318 * @cmdiocb: pointer to lpfc command iocb data structure. 5319 * @rspiocb: pointer to lpfc response iocb data structure. 5320 * 5321 * This routine is the completion callback function for ELS Response IOCB 5322 * command. In normal case, this callback function just properly sets the 5323 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5324 * field in the command IOCB is not NULL, the referred mailbox command will 5325 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5326 * the IOCB. 5327 **/ 5328 static void 5329 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5330 struct lpfc_iocbq *rspiocb) 5331 { 5332 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5333 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5334 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5335 IOCB_t *irsp; 5336 LPFC_MBOXQ_t *mbox = NULL; 5337 u32 ulp_status, ulp_word4, tmo, did, iotag; 5338 5339 if (!vport) { 5340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5341 "3177 ELS response failed\n"); 5342 goto out; 5343 } 5344 if (cmdiocb->context_un.mbox) 5345 mbox = cmdiocb->context_un.mbox; 5346 5347 ulp_status = get_job_ulpstatus(phba, rspiocb); 5348 ulp_word4 = get_job_word4(phba, rspiocb); 5349 did = get_job_els_rsp64_did(phba, cmdiocb); 5350 5351 if (phba->sli_rev == LPFC_SLI_REV4) { 5352 tmo = get_wqe_tmo(cmdiocb); 5353 iotag = get_wqe_reqtag(cmdiocb); 5354 } else { 5355 irsp = &rspiocb->iocb; 5356 tmo = irsp->ulpTimeout; 5357 iotag = irsp->ulpIoTag; 5358 } 5359 5360 /* Check to see if link went down during discovery */ 5361 if (!ndlp || lpfc_els_chk_latt(vport)) { 5362 if (mbox) 5363 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5364 goto out; 5365 } 5366 5367 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5368 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5369 ulp_status, ulp_word4, did); 5370 /* ELS response tag <ulpIoTag> completes */ 5371 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5372 "0110 ELS response tag x%x completes " 5373 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5374 iotag, ulp_status, ulp_word4, tmo, 5375 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5376 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5377 if (mbox) { 5378 if (ulp_status == 0 5379 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5380 if (!lpfc_unreg_rpi(vport, ndlp) && 5381 (!(vport->fc_flag & FC_PT2PT))) { 5382 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5383 ndlp->nlp_state == 5384 NLP_STE_REG_LOGIN_ISSUE) { 5385 lpfc_printf_vlog(vport, KERN_INFO, 5386 LOG_DISCOVERY, 5387 "0314 PLOGI recov " 5388 "DID x%x " 5389 "Data: x%x x%x x%x\n", 5390 ndlp->nlp_DID, 5391 ndlp->nlp_state, 5392 ndlp->nlp_rpi, 5393 ndlp->nlp_flag); 5394 goto out_free_mbox; 5395 } 5396 } 5397 5398 /* Increment reference count to ndlp to hold the 5399 * reference to ndlp for the callback function. 5400 */ 5401 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5402 if (!mbox->ctx_ndlp) 5403 goto out_free_mbox; 5404 5405 mbox->vport = vport; 5406 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5407 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5408 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5409 } 5410 else { 5411 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5412 ndlp->nlp_prev_state = ndlp->nlp_state; 5413 lpfc_nlp_set_state(vport, ndlp, 5414 NLP_STE_REG_LOGIN_ISSUE); 5415 } 5416 5417 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5418 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5419 != MBX_NOT_FINISHED) 5420 goto out; 5421 5422 /* Decrement the ndlp reference count we 5423 * set for this failed mailbox command. 5424 */ 5425 lpfc_nlp_put(ndlp); 5426 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5427 5428 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5429 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5430 "0138 ELS rsp: Cannot issue reg_login for x%x " 5431 "Data: x%x x%x x%x\n", 5432 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5433 ndlp->nlp_rpi); 5434 } 5435 out_free_mbox: 5436 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5437 } 5438 out: 5439 if (ndlp && shost) { 5440 spin_lock_irq(&ndlp->lock); 5441 if (mbox) 5442 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5443 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5444 spin_unlock_irq(&ndlp->lock); 5445 } 5446 5447 /* An SLI4 NPIV instance wants to drop the node at this point under 5448 * these conditions and release the RPI. 5449 */ 5450 if (phba->sli_rev == LPFC_SLI_REV4 && 5451 vport && vport->port_type == LPFC_NPIV_PORT && 5452 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5453 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5454 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5455 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5456 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5457 spin_lock_irq(&ndlp->lock); 5458 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5459 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5460 spin_unlock_irq(&ndlp->lock); 5461 } 5462 lpfc_drop_node(vport, ndlp); 5463 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5464 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && 5465 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 5466 /* Drop ndlp if there is no planned or outstanding 5467 * issued PRLI. 5468 * 5469 * In cases when the ndlp is acting as both an initiator 5470 * and target function, let our issued PRLI determine 5471 * the final ndlp kref drop. 5472 */ 5473 lpfc_drop_node(vport, ndlp); 5474 } 5475 } 5476 5477 /* Release the originating I/O reference. */ 5478 lpfc_els_free_iocb(phba, cmdiocb); 5479 lpfc_nlp_put(ndlp); 5480 return; 5481 } 5482 5483 /** 5484 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5485 * @vport: pointer to a host virtual N_Port data structure. 5486 * @flag: the els command code to be accepted. 5487 * @oldiocb: pointer to the original lpfc command iocb data structure. 5488 * @ndlp: pointer to a node-list data structure. 5489 * @mbox: pointer to the driver internal queue element for mailbox command. 5490 * 5491 * This routine prepares and issues an Accept (ACC) response IOCB 5492 * command. It uses the @flag to properly set up the IOCB field for the 5493 * specific ACC response command to be issued and invokes the 5494 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5495 * @mbox pointer is passed in, it will be put into the context_un.mbox 5496 * field of the IOCB for the completion callback function to issue the 5497 * mailbox command to the HBA later when callback is invoked. 5498 * 5499 * Note that the ndlp reference count will be incremented by 1 for holding the 5500 * ndlp and the reference to ndlp will be stored into the ndlp field of 5501 * the IOCB for the completion callback function to the corresponding 5502 * response ELS IOCB command. 5503 * 5504 * Return code 5505 * 0 - Successfully issued acc response 5506 * 1 - Failed to issue acc response 5507 **/ 5508 int 5509 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5510 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5511 LPFC_MBOXQ_t *mbox) 5512 { 5513 struct lpfc_hba *phba = vport->phba; 5514 IOCB_t *icmd; 5515 IOCB_t *oldcmd; 5516 union lpfc_wqe128 *wqe; 5517 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5518 struct lpfc_iocbq *elsiocb; 5519 uint8_t *pcmd; 5520 struct serv_parm *sp; 5521 uint16_t cmdsize; 5522 int rc; 5523 ELS_PKT *els_pkt_ptr; 5524 struct fc_els_rdf_resp *rdf_resp; 5525 5526 switch (flag) { 5527 case ELS_CMD_ACC: 5528 cmdsize = sizeof(uint32_t); 5529 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5530 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5531 if (!elsiocb) { 5532 spin_lock_irq(&ndlp->lock); 5533 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5534 spin_unlock_irq(&ndlp->lock); 5535 return 1; 5536 } 5537 5538 if (phba->sli_rev == LPFC_SLI_REV4) { 5539 wqe = &elsiocb->wqe; 5540 /* XRI / rx_id */ 5541 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5542 bf_get(wqe_ctxt_tag, 5543 &oldwqe->xmit_els_rsp.wqe_com)); 5544 5545 /* oxid */ 5546 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5547 bf_get(wqe_rcvoxid, 5548 &oldwqe->xmit_els_rsp.wqe_com)); 5549 } else { 5550 icmd = &elsiocb->iocb; 5551 oldcmd = &oldiocb->iocb; 5552 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5553 icmd->unsli3.rcvsli3.ox_id = 5554 oldcmd->unsli3.rcvsli3.ox_id; 5555 } 5556 5557 pcmd = elsiocb->cmd_dmabuf->virt; 5558 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5559 pcmd += sizeof(uint32_t); 5560 5561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5562 "Issue ACC: did:x%x flg:x%x", 5563 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5564 break; 5565 case ELS_CMD_FLOGI: 5566 case ELS_CMD_PLOGI: 5567 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5568 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5569 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5570 if (!elsiocb) 5571 return 1; 5572 5573 if (phba->sli_rev == LPFC_SLI_REV4) { 5574 wqe = &elsiocb->wqe; 5575 /* XRI / rx_id */ 5576 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5577 bf_get(wqe_ctxt_tag, 5578 &oldwqe->xmit_els_rsp.wqe_com)); 5579 5580 /* oxid */ 5581 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5582 bf_get(wqe_rcvoxid, 5583 &oldwqe->xmit_els_rsp.wqe_com)); 5584 } else { 5585 icmd = &elsiocb->iocb; 5586 oldcmd = &oldiocb->iocb; 5587 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5588 icmd->unsli3.rcvsli3.ox_id = 5589 oldcmd->unsli3.rcvsli3.ox_id; 5590 } 5591 5592 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5593 5594 if (mbox) 5595 elsiocb->context_un.mbox = mbox; 5596 5597 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5598 pcmd += sizeof(uint32_t); 5599 sp = (struct serv_parm *)pcmd; 5600 5601 if (flag == ELS_CMD_FLOGI) { 5602 /* Copy the received service parameters back */ 5603 memcpy(sp, &phba->fc_fabparam, 5604 sizeof(struct serv_parm)); 5605 5606 /* Clear the F_Port bit */ 5607 sp->cmn.fPort = 0; 5608 5609 /* Mark all class service parameters as invalid */ 5610 sp->cls1.classValid = 0; 5611 sp->cls2.classValid = 0; 5612 sp->cls3.classValid = 0; 5613 sp->cls4.classValid = 0; 5614 5615 /* Copy our worldwide names */ 5616 memcpy(&sp->portName, &vport->fc_sparam.portName, 5617 sizeof(struct lpfc_name)); 5618 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5619 sizeof(struct lpfc_name)); 5620 } else { 5621 memcpy(pcmd, &vport->fc_sparam, 5622 sizeof(struct serv_parm)); 5623 5624 sp->cmn.valid_vendor_ver_level = 0; 5625 memset(sp->un.vendorVersion, 0, 5626 sizeof(sp->un.vendorVersion)); 5627 sp->cmn.bbRcvSizeMsb &= 0xF; 5628 5629 /* If our firmware supports this feature, convey that 5630 * info to the target using the vendor specific field. 5631 */ 5632 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5633 sp->cmn.valid_vendor_ver_level = 1; 5634 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5635 sp->un.vv.flags = 5636 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5637 } 5638 } 5639 5640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5641 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5642 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5643 break; 5644 case ELS_CMD_PRLO: 5645 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5646 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5647 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5648 if (!elsiocb) 5649 return 1; 5650 5651 if (phba->sli_rev == LPFC_SLI_REV4) { 5652 wqe = &elsiocb->wqe; 5653 /* XRI / rx_id */ 5654 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5655 bf_get(wqe_ctxt_tag, 5656 &oldwqe->xmit_els_rsp.wqe_com)); 5657 5658 /* oxid */ 5659 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5660 bf_get(wqe_rcvoxid, 5661 &oldwqe->xmit_els_rsp.wqe_com)); 5662 } else { 5663 icmd = &elsiocb->iocb; 5664 oldcmd = &oldiocb->iocb; 5665 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5666 icmd->unsli3.rcvsli3.ox_id = 5667 oldcmd->unsli3.rcvsli3.ox_id; 5668 } 5669 5670 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5671 5672 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5673 sizeof(uint32_t) + sizeof(PRLO)); 5674 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5675 els_pkt_ptr = (ELS_PKT *) pcmd; 5676 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5677 5678 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5679 "Issue ACC PRLO: did:x%x flg:x%x", 5680 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5681 break; 5682 case ELS_CMD_RDF: 5683 cmdsize = sizeof(*rdf_resp); 5684 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5685 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5686 if (!elsiocb) 5687 return 1; 5688 5689 if (phba->sli_rev == LPFC_SLI_REV4) { 5690 wqe = &elsiocb->wqe; 5691 /* XRI / rx_id */ 5692 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5693 bf_get(wqe_ctxt_tag, 5694 &oldwqe->xmit_els_rsp.wqe_com)); 5695 5696 /* oxid */ 5697 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5698 bf_get(wqe_rcvoxid, 5699 &oldwqe->xmit_els_rsp.wqe_com)); 5700 } else { 5701 icmd = &elsiocb->iocb; 5702 oldcmd = &oldiocb->iocb; 5703 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5704 icmd->unsli3.rcvsli3.ox_id = 5705 oldcmd->unsli3.rcvsli3.ox_id; 5706 } 5707 5708 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5709 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5710 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5711 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5712 5713 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5714 rdf_resp->desc_list_len = cpu_to_be32(12); 5715 5716 /* FC-LS-5 specifies LS REQ Information descriptor */ 5717 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5718 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5719 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5720 break; 5721 default: 5722 return 1; 5723 } 5724 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5725 spin_lock_irq(&ndlp->lock); 5726 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5727 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5728 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5729 spin_unlock_irq(&ndlp->lock); 5730 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5731 } else { 5732 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5733 } 5734 5735 phba->fc_stat.elsXmitACC++; 5736 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5737 if (!elsiocb->ndlp) { 5738 lpfc_els_free_iocb(phba, elsiocb); 5739 return 1; 5740 } 5741 5742 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5743 if (rc == IOCB_ERROR) { 5744 lpfc_els_free_iocb(phba, elsiocb); 5745 lpfc_nlp_put(ndlp); 5746 return 1; 5747 } 5748 5749 /* Xmit ELS ACC response tag <ulpIoTag> */ 5750 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5751 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5752 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5753 "RPI: x%x, fc_flag x%x refcnt %d\n", 5754 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5755 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5756 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5757 return 0; 5758 } 5759 5760 /** 5761 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5762 * @vport: pointer to a virtual N_Port data structure. 5763 * @rejectError: reject response to issue 5764 * @oldiocb: pointer to the original lpfc command iocb data structure. 5765 * @ndlp: pointer to a node-list data structure. 5766 * @mbox: pointer to the driver internal queue element for mailbox command. 5767 * 5768 * This routine prepares and issue an Reject (RJT) response IOCB 5769 * command. If a @mbox pointer is passed in, it will be put into the 5770 * context_un.mbox field of the IOCB for the completion callback function 5771 * to issue to the HBA later. 5772 * 5773 * Note that the ndlp reference count will be incremented by 1 for holding the 5774 * ndlp and the reference to ndlp will be stored into the ndlp field of 5775 * the IOCB for the completion callback function to the reject response 5776 * ELS IOCB command. 5777 * 5778 * Return code 5779 * 0 - Successfully issued reject response 5780 * 1 - Failed to issue reject response 5781 **/ 5782 int 5783 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5784 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5785 LPFC_MBOXQ_t *mbox) 5786 { 5787 int rc; 5788 struct lpfc_hba *phba = vport->phba; 5789 IOCB_t *icmd; 5790 IOCB_t *oldcmd; 5791 union lpfc_wqe128 *wqe; 5792 struct lpfc_iocbq *elsiocb; 5793 uint8_t *pcmd; 5794 uint16_t cmdsize; 5795 5796 cmdsize = 2 * sizeof(uint32_t); 5797 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5798 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5799 if (!elsiocb) 5800 return 1; 5801 5802 if (phba->sli_rev == LPFC_SLI_REV4) { 5803 wqe = &elsiocb->wqe; 5804 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5805 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5806 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5807 get_job_rcvoxid(phba, oldiocb)); 5808 } else { 5809 icmd = &elsiocb->iocb; 5810 oldcmd = &oldiocb->iocb; 5811 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5812 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5813 } 5814 5815 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5816 5817 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5818 pcmd += sizeof(uint32_t); 5819 *((uint32_t *) (pcmd)) = rejectError; 5820 5821 if (mbox) 5822 elsiocb->context_un.mbox = mbox; 5823 5824 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5825 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5826 "0129 Xmit ELS RJT x%x response tag x%x " 5827 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5828 "rpi x%x\n", 5829 rejectError, elsiocb->iotag, 5830 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5831 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5832 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5833 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5834 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5835 5836 phba->fc_stat.elsXmitLSRJT++; 5837 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5838 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5839 if (!elsiocb->ndlp) { 5840 lpfc_els_free_iocb(phba, elsiocb); 5841 return 1; 5842 } 5843 5844 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5845 * node's assigned RPI gets released provided this node is not already 5846 * registered with the transport. 5847 */ 5848 if (phba->sli_rev == LPFC_SLI_REV4 && 5849 vport->port_type == LPFC_NPIV_PORT && 5850 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5851 spin_lock_irq(&ndlp->lock); 5852 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5853 spin_unlock_irq(&ndlp->lock); 5854 } 5855 5856 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5857 if (rc == IOCB_ERROR) { 5858 lpfc_els_free_iocb(phba, elsiocb); 5859 lpfc_nlp_put(ndlp); 5860 return 1; 5861 } 5862 5863 return 0; 5864 } 5865 5866 /** 5867 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5868 * @vport: pointer to a host virtual N_Port data structure. 5869 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5870 * @ndlp: NPort to where rsp is directed 5871 * 5872 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5873 * this N_Port's support of hardware signals in its Congestion 5874 * Capabilities Descriptor. 5875 * 5876 * Return code 5877 * 0 - Successfully issued edc rsp command 5878 * 1 - Failed to issue edc rsp command 5879 **/ 5880 static int 5881 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5882 struct lpfc_nodelist *ndlp) 5883 { 5884 struct lpfc_hba *phba = vport->phba; 5885 struct fc_els_edc_resp *edc_rsp; 5886 struct fc_tlv_desc *tlv; 5887 struct lpfc_iocbq *elsiocb; 5888 IOCB_t *icmd, *cmd; 5889 union lpfc_wqe128 *wqe; 5890 u32 cgn_desc_size, lft_desc_size; 5891 u16 cmdsize; 5892 uint8_t *pcmd; 5893 int rc; 5894 5895 cmdsize = sizeof(struct fc_els_edc_resp); 5896 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5897 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5898 sizeof(struct fc_diag_lnkflt_desc) : 0; 5899 cmdsize += cgn_desc_size + lft_desc_size; 5900 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5901 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5902 if (!elsiocb) 5903 return 1; 5904 5905 if (phba->sli_rev == LPFC_SLI_REV4) { 5906 wqe = &elsiocb->wqe; 5907 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5908 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5909 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5910 get_job_rcvoxid(phba, cmdiocb)); 5911 } else { 5912 icmd = &elsiocb->iocb; 5913 cmd = &cmdiocb->iocb; 5914 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5915 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5916 } 5917 5918 pcmd = elsiocb->cmd_dmabuf->virt; 5919 memset(pcmd, 0, cmdsize); 5920 5921 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5922 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5923 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5924 cgn_desc_size + lft_desc_size); 5925 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5926 edc_rsp->lsri.desc_len = cpu_to_be32( 5927 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5928 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5929 tlv = edc_rsp->desc; 5930 lpfc_format_edc_cgn_desc(phba, tlv); 5931 tlv = fc_tlv_next_desc(tlv); 5932 if (lft_desc_size) 5933 lpfc_format_edc_lft_desc(phba, tlv); 5934 5935 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5936 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5937 ndlp->nlp_DID, ndlp->nlp_flag, 5938 kref_read(&ndlp->kref)); 5939 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5940 5941 phba->fc_stat.elsXmitACC++; 5942 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5943 if (!elsiocb->ndlp) { 5944 lpfc_els_free_iocb(phba, elsiocb); 5945 return 1; 5946 } 5947 5948 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5949 if (rc == IOCB_ERROR) { 5950 lpfc_els_free_iocb(phba, elsiocb); 5951 lpfc_nlp_put(ndlp); 5952 return 1; 5953 } 5954 5955 /* Xmit ELS ACC response tag <ulpIoTag> */ 5956 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5957 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5958 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5959 "RPI: x%x, fc_flag x%x\n", 5960 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5961 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5962 ndlp->nlp_rpi, vport->fc_flag); 5963 5964 return 0; 5965 } 5966 5967 /** 5968 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5969 * @vport: pointer to a virtual N_Port data structure. 5970 * @oldiocb: pointer to the original lpfc command iocb data structure. 5971 * @ndlp: pointer to a node-list data structure. 5972 * 5973 * This routine prepares and issues an Accept (ACC) response to Address 5974 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5975 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5976 * 5977 * Note that the ndlp reference count will be incremented by 1 for holding the 5978 * ndlp and the reference to ndlp will be stored into the ndlp field of 5979 * the IOCB for the completion callback function to the ADISC Accept response 5980 * ELS IOCB command. 5981 * 5982 * Return code 5983 * 0 - Successfully issued acc adisc response 5984 * 1 - Failed to issue adisc acc response 5985 **/ 5986 int 5987 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5988 struct lpfc_nodelist *ndlp) 5989 { 5990 struct lpfc_hba *phba = vport->phba; 5991 ADISC *ap; 5992 IOCB_t *icmd, *oldcmd; 5993 union lpfc_wqe128 *wqe; 5994 struct lpfc_iocbq *elsiocb; 5995 uint8_t *pcmd; 5996 uint16_t cmdsize; 5997 int rc; 5998 u32 ulp_context; 5999 6000 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 6001 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6002 ndlp->nlp_DID, ELS_CMD_ACC); 6003 if (!elsiocb) 6004 return 1; 6005 6006 if (phba->sli_rev == LPFC_SLI_REV4) { 6007 wqe = &elsiocb->wqe; 6008 /* XRI / rx_id */ 6009 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6010 get_job_ulpcontext(phba, oldiocb)); 6011 ulp_context = get_job_ulpcontext(phba, elsiocb); 6012 /* oxid */ 6013 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6014 get_job_rcvoxid(phba, oldiocb)); 6015 } else { 6016 icmd = &elsiocb->iocb; 6017 oldcmd = &oldiocb->iocb; 6018 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6019 ulp_context = elsiocb->iocb.ulpContext; 6020 icmd->unsli3.rcvsli3.ox_id = 6021 oldcmd->unsli3.rcvsli3.ox_id; 6022 } 6023 6024 /* Xmit ADISC ACC response tag <ulpIoTag> */ 6025 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6026 "0130 Xmit ADISC ACC response iotag x%x xri: " 6027 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 6028 elsiocb->iotag, ulp_context, 6029 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6030 ndlp->nlp_rpi); 6031 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6032 6033 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6034 pcmd += sizeof(uint32_t); 6035 6036 ap = (ADISC *) (pcmd); 6037 ap->hardAL_PA = phba->fc_pref_ALPA; 6038 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6039 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6040 ap->DID = be32_to_cpu(vport->fc_myDID); 6041 6042 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6043 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6044 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6045 6046 phba->fc_stat.elsXmitACC++; 6047 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6048 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6049 if (!elsiocb->ndlp) { 6050 lpfc_els_free_iocb(phba, elsiocb); 6051 return 1; 6052 } 6053 6054 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6055 if (rc == IOCB_ERROR) { 6056 lpfc_els_free_iocb(phba, elsiocb); 6057 lpfc_nlp_put(ndlp); 6058 return 1; 6059 } 6060 6061 return 0; 6062 } 6063 6064 /** 6065 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6066 * @vport: pointer to a virtual N_Port data structure. 6067 * @oldiocb: pointer to the original lpfc command iocb data structure. 6068 * @ndlp: pointer to a node-list data structure. 6069 * 6070 * This routine prepares and issues an Accept (ACC) response to Process 6071 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6072 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6073 * 6074 * Note that the ndlp reference count will be incremented by 1 for holding the 6075 * ndlp and the reference to ndlp will be stored into the ndlp field of 6076 * the IOCB for the completion callback function to the PRLI Accept response 6077 * ELS IOCB command. 6078 * 6079 * Return code 6080 * 0 - Successfully issued acc prli response 6081 * 1 - Failed to issue acc prli response 6082 **/ 6083 int 6084 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6085 struct lpfc_nodelist *ndlp) 6086 { 6087 struct lpfc_hba *phba = vport->phba; 6088 PRLI *npr; 6089 struct lpfc_nvme_prli *npr_nvme; 6090 lpfc_vpd_t *vpd; 6091 IOCB_t *icmd; 6092 IOCB_t *oldcmd; 6093 union lpfc_wqe128 *wqe; 6094 struct lpfc_iocbq *elsiocb; 6095 uint8_t *pcmd; 6096 uint16_t cmdsize; 6097 uint32_t prli_fc4_req, *req_payload; 6098 struct lpfc_dmabuf *req_buf; 6099 int rc; 6100 u32 elsrspcmd, ulp_context; 6101 6102 /* Need the incoming PRLI payload to determine if the ACC is for an 6103 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6104 */ 6105 req_buf = oldiocb->cmd_dmabuf; 6106 req_payload = (((uint32_t *)req_buf->virt) + 1); 6107 6108 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6109 prli_fc4_req = be32_to_cpu(*req_payload); 6110 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6111 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6112 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6113 prli_fc4_req, *((uint32_t *)req_payload)); 6114 6115 if (prli_fc4_req == PRLI_FCP_TYPE) { 6116 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6117 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6118 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6119 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6120 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6121 } else { 6122 return 1; 6123 } 6124 6125 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6126 ndlp->nlp_DID, elsrspcmd); 6127 if (!elsiocb) 6128 return 1; 6129 6130 if (phba->sli_rev == LPFC_SLI_REV4) { 6131 wqe = &elsiocb->wqe; 6132 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6133 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6134 ulp_context = get_job_ulpcontext(phba, elsiocb); 6135 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6136 get_job_rcvoxid(phba, oldiocb)); 6137 } else { 6138 icmd = &elsiocb->iocb; 6139 oldcmd = &oldiocb->iocb; 6140 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6141 ulp_context = elsiocb->iocb.ulpContext; 6142 icmd->unsli3.rcvsli3.ox_id = 6143 oldcmd->unsli3.rcvsli3.ox_id; 6144 } 6145 6146 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6147 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6148 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6149 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6150 elsiocb->iotag, ulp_context, 6151 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6152 ndlp->nlp_rpi); 6153 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6154 memset(pcmd, 0, cmdsize); 6155 6156 *((uint32_t *)(pcmd)) = elsrspcmd; 6157 pcmd += sizeof(uint32_t); 6158 6159 /* For PRLI, remainder of payload is PRLI parameter page */ 6160 vpd = &phba->vpd; 6161 6162 if (prli_fc4_req == PRLI_FCP_TYPE) { 6163 /* 6164 * If the remote port is a target and our firmware version 6165 * is 3.20 or later, set the following bits for FC-TAPE 6166 * support. 6167 */ 6168 npr = (PRLI *) pcmd; 6169 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6170 (vpd->rev.feaLevelHigh >= 0x02)) { 6171 npr->ConfmComplAllowed = 1; 6172 npr->Retry = 1; 6173 npr->TaskRetryIdReq = 1; 6174 } 6175 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6176 6177 /* Set image pair for complementary pairs only. */ 6178 if (ndlp->nlp_type & NLP_FCP_TARGET) 6179 npr->estabImagePair = 1; 6180 else 6181 npr->estabImagePair = 0; 6182 npr->readXferRdyDis = 1; 6183 npr->ConfmComplAllowed = 1; 6184 npr->prliType = PRLI_FCP_TYPE; 6185 npr->initiatorFunc = 1; 6186 6187 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6188 lpfc_printf_vlog(vport, KERN_INFO, 6189 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 6190 "6014 FCP issue PRLI ACC imgpair %d " 6191 "retry %d task %d\n", 6192 npr->estabImagePair, 6193 npr->Retry, npr->TaskRetryIdReq); 6194 6195 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6196 /* Respond with an NVME PRLI Type */ 6197 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6198 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6199 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6200 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6201 if (phba->nvmet_support) { 6202 bf_set(prli_tgt, npr_nvme, 1); 6203 bf_set(prli_disc, npr_nvme, 1); 6204 if (phba->cfg_nvme_enable_fb) { 6205 bf_set(prli_fba, npr_nvme, 1); 6206 6207 /* TBD. Target mode needs to post buffers 6208 * that support the configured first burst 6209 * byte size. 6210 */ 6211 bf_set(prli_fb_sz, npr_nvme, 6212 phba->cfg_nvmet_fb_size); 6213 } 6214 } else { 6215 bf_set(prli_init, npr_nvme, 1); 6216 } 6217 6218 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6219 "6015 NVME issue PRLI ACC word1 x%08x " 6220 "word4 x%08x word5 x%08x flag x%x, " 6221 "fcp_info x%x nlp_type x%x\n", 6222 npr_nvme->word1, npr_nvme->word4, 6223 npr_nvme->word5, ndlp->nlp_flag, 6224 ndlp->nlp_fcp_info, ndlp->nlp_type); 6225 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6226 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6227 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6228 } else 6229 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6230 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6231 prli_fc4_req, ndlp->nlp_fc4_type, 6232 ndlp->nlp_DID); 6233 6234 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6235 "Issue ACC PRLI: did:x%x flg:x%x", 6236 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6237 6238 phba->fc_stat.elsXmitACC++; 6239 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6240 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6241 if (!elsiocb->ndlp) { 6242 lpfc_els_free_iocb(phba, elsiocb); 6243 return 1; 6244 } 6245 6246 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6247 if (rc == IOCB_ERROR) { 6248 lpfc_els_free_iocb(phba, elsiocb); 6249 lpfc_nlp_put(ndlp); 6250 return 1; 6251 } 6252 6253 return 0; 6254 } 6255 6256 /** 6257 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6258 * @vport: pointer to a virtual N_Port data structure. 6259 * @format: rnid command format. 6260 * @oldiocb: pointer to the original lpfc command iocb data structure. 6261 * @ndlp: pointer to a node-list data structure. 6262 * 6263 * This routine issues a Request Node Identification Data (RNID) Accept 6264 * (ACC) response. It constructs the RNID ACC response command according to 6265 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6266 * issue the response. 6267 * 6268 * Note that the ndlp reference count will be incremented by 1 for holding the 6269 * ndlp and the reference to ndlp will be stored into the ndlp field of 6270 * the IOCB for the completion callback function. 6271 * 6272 * Return code 6273 * 0 - Successfully issued acc rnid response 6274 * 1 - Failed to issue acc rnid response 6275 **/ 6276 static int 6277 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6278 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6279 { 6280 struct lpfc_hba *phba = vport->phba; 6281 RNID *rn; 6282 IOCB_t *icmd, *oldcmd; 6283 union lpfc_wqe128 *wqe; 6284 struct lpfc_iocbq *elsiocb; 6285 uint8_t *pcmd; 6286 uint16_t cmdsize; 6287 int rc; 6288 u32 ulp_context; 6289 6290 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6291 + (2 * sizeof(struct lpfc_name)); 6292 if (format) 6293 cmdsize += sizeof(RNID_TOP_DISC); 6294 6295 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6296 ndlp->nlp_DID, ELS_CMD_ACC); 6297 if (!elsiocb) 6298 return 1; 6299 6300 if (phba->sli_rev == LPFC_SLI_REV4) { 6301 wqe = &elsiocb->wqe; 6302 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6303 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6304 ulp_context = get_job_ulpcontext(phba, elsiocb); 6305 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6306 get_job_rcvoxid(phba, oldiocb)); 6307 } else { 6308 icmd = &elsiocb->iocb; 6309 oldcmd = &oldiocb->iocb; 6310 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6311 ulp_context = elsiocb->iocb.ulpContext; 6312 icmd->unsli3.rcvsli3.ox_id = 6313 oldcmd->unsli3.rcvsli3.ox_id; 6314 } 6315 6316 /* Xmit RNID ACC response tag <ulpIoTag> */ 6317 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6318 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6319 elsiocb->iotag, ulp_context); 6320 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6321 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6322 pcmd += sizeof(uint32_t); 6323 6324 memset(pcmd, 0, sizeof(RNID)); 6325 rn = (RNID *) (pcmd); 6326 rn->Format = format; 6327 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6328 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6329 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6330 switch (format) { 6331 case 0: 6332 rn->SpecificLen = 0; 6333 break; 6334 case RNID_TOPOLOGY_DISC: 6335 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6336 memcpy(&rn->un.topologyDisc.portName, 6337 &vport->fc_portname, sizeof(struct lpfc_name)); 6338 rn->un.topologyDisc.unitType = RNID_HBA; 6339 rn->un.topologyDisc.physPort = 0; 6340 rn->un.topologyDisc.attachedNodes = 0; 6341 break; 6342 default: 6343 rn->CommonLen = 0; 6344 rn->SpecificLen = 0; 6345 break; 6346 } 6347 6348 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6349 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6350 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6351 6352 phba->fc_stat.elsXmitACC++; 6353 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6354 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6355 if (!elsiocb->ndlp) { 6356 lpfc_els_free_iocb(phba, elsiocb); 6357 return 1; 6358 } 6359 6360 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6361 if (rc == IOCB_ERROR) { 6362 lpfc_els_free_iocb(phba, elsiocb); 6363 lpfc_nlp_put(ndlp); 6364 return 1; 6365 } 6366 6367 return 0; 6368 } 6369 6370 /** 6371 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6372 * @vport: pointer to a virtual N_Port data structure. 6373 * @iocb: pointer to the lpfc command iocb data structure. 6374 * @ndlp: pointer to a node-list data structure. 6375 * 6376 * Return 6377 **/ 6378 static void 6379 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6380 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6381 { 6382 struct lpfc_hba *phba = vport->phba; 6383 uint8_t *pcmd; 6384 struct RRQ *rrq; 6385 uint16_t rxid; 6386 uint16_t xri; 6387 struct lpfc_node_rrq *prrq; 6388 6389 6390 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6391 pcmd += sizeof(uint32_t); 6392 rrq = (struct RRQ *)pcmd; 6393 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6394 rxid = bf_get(rrq_rxid, rrq); 6395 6396 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6397 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6398 " x%x x%x\n", 6399 be32_to_cpu(bf_get(rrq_did, rrq)), 6400 bf_get(rrq_oxid, rrq), 6401 rxid, 6402 get_wqe_reqtag(iocb), 6403 get_job_ulpcontext(phba, iocb)); 6404 6405 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6406 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6407 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6408 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6409 xri = bf_get(rrq_oxid, rrq); 6410 else 6411 xri = rxid; 6412 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6413 if (prrq) 6414 lpfc_clr_rrq_active(phba, xri, prrq); 6415 return; 6416 } 6417 6418 /** 6419 * lpfc_els_rsp_echo_acc - Issue echo acc response 6420 * @vport: pointer to a virtual N_Port data structure. 6421 * @data: pointer to echo data to return in the accept. 6422 * @oldiocb: pointer to the original lpfc command iocb data structure. 6423 * @ndlp: pointer to a node-list data structure. 6424 * 6425 * Return code 6426 * 0 - Successfully issued acc echo response 6427 * 1 - Failed to issue acc echo response 6428 **/ 6429 static int 6430 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6431 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6432 { 6433 struct lpfc_hba *phba = vport->phba; 6434 IOCB_t *icmd, *oldcmd; 6435 union lpfc_wqe128 *wqe; 6436 struct lpfc_iocbq *elsiocb; 6437 uint8_t *pcmd; 6438 uint16_t cmdsize; 6439 int rc; 6440 u32 ulp_context; 6441 6442 if (phba->sli_rev == LPFC_SLI_REV4) 6443 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6444 else 6445 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6446 6447 /* The accumulated length can exceed the BPL_SIZE. For 6448 * now, use this as the limit 6449 */ 6450 if (cmdsize > LPFC_BPL_SIZE) 6451 cmdsize = LPFC_BPL_SIZE; 6452 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6453 ndlp->nlp_DID, ELS_CMD_ACC); 6454 if (!elsiocb) 6455 return 1; 6456 6457 if (phba->sli_rev == LPFC_SLI_REV4) { 6458 wqe = &elsiocb->wqe; 6459 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6460 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6461 ulp_context = get_job_ulpcontext(phba, elsiocb); 6462 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6463 get_job_rcvoxid(phba, oldiocb)); 6464 } else { 6465 icmd = &elsiocb->iocb; 6466 oldcmd = &oldiocb->iocb; 6467 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6468 ulp_context = elsiocb->iocb.ulpContext; 6469 icmd->unsli3.rcvsli3.ox_id = 6470 oldcmd->unsli3.rcvsli3.ox_id; 6471 } 6472 6473 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6474 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6475 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6476 elsiocb->iotag, ulp_context); 6477 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6478 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6479 pcmd += sizeof(uint32_t); 6480 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6481 6482 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6483 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6484 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6485 6486 phba->fc_stat.elsXmitACC++; 6487 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6488 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6489 if (!elsiocb->ndlp) { 6490 lpfc_els_free_iocb(phba, elsiocb); 6491 return 1; 6492 } 6493 6494 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6495 if (rc == IOCB_ERROR) { 6496 lpfc_els_free_iocb(phba, elsiocb); 6497 lpfc_nlp_put(ndlp); 6498 return 1; 6499 } 6500 6501 return 0; 6502 } 6503 6504 /** 6505 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6506 * @vport: pointer to a host virtual N_Port data structure. 6507 * 6508 * This routine issues Address Discover (ADISC) ELS commands to those 6509 * N_Ports which are in node port recovery state and ADISC has not been issued 6510 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6511 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6512 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6513 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6514 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6515 * IOCBs quit for later pick up. On the other hand, after walking through 6516 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6517 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6518 * no more ADISC need to be sent. 6519 * 6520 * Return code 6521 * The number of N_Ports with adisc issued. 6522 **/ 6523 int 6524 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6525 { 6526 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6527 struct lpfc_nodelist *ndlp, *next_ndlp; 6528 int sentadisc = 0; 6529 6530 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6531 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6532 6533 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6534 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6535 continue; 6536 6537 spin_lock_irq(&ndlp->lock); 6538 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6539 spin_unlock_irq(&ndlp->lock); 6540 6541 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6542 /* This node was marked for ADISC but was not picked 6543 * for discovery. This is possible if the node was 6544 * missing in gidft response. 6545 * 6546 * At time of marking node for ADISC, we skipped unreg 6547 * from backend 6548 */ 6549 lpfc_nlp_unreg_node(vport, ndlp); 6550 lpfc_unreg_rpi(vport, ndlp); 6551 continue; 6552 } 6553 6554 ndlp->nlp_prev_state = ndlp->nlp_state; 6555 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6556 lpfc_issue_els_adisc(vport, ndlp, 0); 6557 sentadisc++; 6558 vport->num_disc_nodes++; 6559 if (vport->num_disc_nodes >= 6560 vport->cfg_discovery_threads) { 6561 spin_lock_irq(shost->host_lock); 6562 vport->fc_flag |= FC_NLP_MORE; 6563 spin_unlock_irq(shost->host_lock); 6564 break; 6565 } 6566 6567 } 6568 if (sentadisc == 0) { 6569 spin_lock_irq(shost->host_lock); 6570 vport->fc_flag &= ~FC_NLP_MORE; 6571 spin_unlock_irq(shost->host_lock); 6572 } 6573 return sentadisc; 6574 } 6575 6576 /** 6577 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6578 * @vport: pointer to a host virtual N_Port data structure. 6579 * 6580 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6581 * which are in node port recovery state, with a @vport. Each time an ELS 6582 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6583 * the per @vport number of discover count (num_disc_nodes) shall be 6584 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6585 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6586 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6587 * later pick up. On the other hand, after walking through all the ndlps with 6588 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6589 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6590 * PLOGI need to be sent. 6591 * 6592 * Return code 6593 * The number of N_Ports with plogi issued. 6594 **/ 6595 int 6596 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6597 { 6598 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6599 struct lpfc_nodelist *ndlp, *next_ndlp; 6600 int sentplogi = 0; 6601 6602 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6603 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6604 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6605 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6606 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6607 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6608 ndlp->nlp_prev_state = ndlp->nlp_state; 6609 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6610 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6611 sentplogi++; 6612 vport->num_disc_nodes++; 6613 if (vport->num_disc_nodes >= 6614 vport->cfg_discovery_threads) { 6615 spin_lock_irq(shost->host_lock); 6616 vport->fc_flag |= FC_NLP_MORE; 6617 spin_unlock_irq(shost->host_lock); 6618 break; 6619 } 6620 } 6621 } 6622 6623 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6624 "6452 Discover PLOGI %d flag x%x\n", 6625 sentplogi, vport->fc_flag); 6626 6627 if (sentplogi) { 6628 lpfc_set_disctmo(vport); 6629 } 6630 else { 6631 spin_lock_irq(shost->host_lock); 6632 vport->fc_flag &= ~FC_NLP_MORE; 6633 spin_unlock_irq(shost->host_lock); 6634 } 6635 return sentplogi; 6636 } 6637 6638 static uint32_t 6639 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6640 uint32_t word0) 6641 { 6642 6643 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6644 desc->payload.els_req = word0; 6645 desc->length = cpu_to_be32(sizeof(desc->payload)); 6646 6647 return sizeof(struct fc_rdp_link_service_desc); 6648 } 6649 6650 static uint32_t 6651 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6652 uint8_t *page_a0, uint8_t *page_a2) 6653 { 6654 uint16_t wavelength; 6655 uint16_t temperature; 6656 uint16_t rx_power; 6657 uint16_t tx_bias; 6658 uint16_t tx_power; 6659 uint16_t vcc; 6660 uint16_t flag = 0; 6661 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6662 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6663 6664 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6665 6666 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6667 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6668 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6669 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6670 6671 if ((trasn_code_byte4->fc_sw_laser) || 6672 (trasn_code_byte5->fc_sw_laser_sl) || 6673 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6674 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6675 } else if (trasn_code_byte4->fc_lw_laser) { 6676 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6677 page_a0[SSF_WAVELENGTH_B0]; 6678 if (wavelength == SFP_WAVELENGTH_LC1310) 6679 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6680 if (wavelength == SFP_WAVELENGTH_LL1550) 6681 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6682 } 6683 /* check if its SFP+ */ 6684 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6685 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6686 << SFP_FLAG_CT_SHIFT; 6687 6688 /* check if its OPTICAL */ 6689 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6690 SFP_FLAG_IS_OPTICAL_PORT : 0) 6691 << SFP_FLAG_IS_OPTICAL_SHIFT; 6692 6693 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6694 page_a2[SFF_TEMPERATURE_B0]); 6695 vcc = (page_a2[SFF_VCC_B1] << 8 | 6696 page_a2[SFF_VCC_B0]); 6697 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6698 page_a2[SFF_TXPOWER_B0]); 6699 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6700 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6701 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6702 page_a2[SFF_RXPOWER_B0]); 6703 desc->sfp_info.temperature = cpu_to_be16(temperature); 6704 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6705 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6706 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6707 desc->sfp_info.vcc = cpu_to_be16(vcc); 6708 6709 desc->sfp_info.flags = cpu_to_be16(flag); 6710 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6711 6712 return sizeof(struct fc_rdp_sfp_desc); 6713 } 6714 6715 static uint32_t 6716 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6717 READ_LNK_VAR *stat) 6718 { 6719 uint32_t type; 6720 6721 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6722 6723 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6724 6725 desc->info.port_type = cpu_to_be32(type); 6726 6727 desc->info.link_status.link_failure_cnt = 6728 cpu_to_be32(stat->linkFailureCnt); 6729 desc->info.link_status.loss_of_synch_cnt = 6730 cpu_to_be32(stat->lossSyncCnt); 6731 desc->info.link_status.loss_of_signal_cnt = 6732 cpu_to_be32(stat->lossSignalCnt); 6733 desc->info.link_status.primitive_seq_proto_err = 6734 cpu_to_be32(stat->primSeqErrCnt); 6735 desc->info.link_status.invalid_trans_word = 6736 cpu_to_be32(stat->invalidXmitWord); 6737 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6738 6739 desc->length = cpu_to_be32(sizeof(desc->info)); 6740 6741 return sizeof(struct fc_rdp_link_error_status_desc); 6742 } 6743 6744 static uint32_t 6745 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6746 struct lpfc_vport *vport) 6747 { 6748 uint32_t bbCredit; 6749 6750 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6751 6752 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6753 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6754 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6755 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6756 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6757 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6758 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6759 } else { 6760 desc->bbc_info.attached_port_bbc = 0; 6761 } 6762 6763 desc->bbc_info.rtt = 0; 6764 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6765 6766 return sizeof(struct fc_rdp_bbc_desc); 6767 } 6768 6769 static uint32_t 6770 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6771 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6772 { 6773 uint32_t flags = 0; 6774 6775 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6776 6777 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6778 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6779 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6780 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6781 6782 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6783 flags |= RDP_OET_HIGH_ALARM; 6784 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6785 flags |= RDP_OET_LOW_ALARM; 6786 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6787 flags |= RDP_OET_HIGH_WARNING; 6788 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6789 flags |= RDP_OET_LOW_WARNING; 6790 6791 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6792 desc->oed_info.function_flags = cpu_to_be32(flags); 6793 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6794 return sizeof(struct fc_rdp_oed_sfp_desc); 6795 } 6796 6797 static uint32_t 6798 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6799 struct fc_rdp_oed_sfp_desc *desc, 6800 uint8_t *page_a2) 6801 { 6802 uint32_t flags = 0; 6803 6804 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6805 6806 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6807 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6808 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6809 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6810 6811 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6812 flags |= RDP_OET_HIGH_ALARM; 6813 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6814 flags |= RDP_OET_LOW_ALARM; 6815 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6816 flags |= RDP_OET_HIGH_WARNING; 6817 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6818 flags |= RDP_OET_LOW_WARNING; 6819 6820 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6821 desc->oed_info.function_flags = cpu_to_be32(flags); 6822 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6823 return sizeof(struct fc_rdp_oed_sfp_desc); 6824 } 6825 6826 static uint32_t 6827 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6828 struct fc_rdp_oed_sfp_desc *desc, 6829 uint8_t *page_a2) 6830 { 6831 uint32_t flags = 0; 6832 6833 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6834 6835 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6836 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6837 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6838 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6839 6840 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6841 flags |= RDP_OET_HIGH_ALARM; 6842 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6843 flags |= RDP_OET_LOW_ALARM; 6844 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6845 flags |= RDP_OET_HIGH_WARNING; 6846 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6847 flags |= RDP_OET_LOW_WARNING; 6848 6849 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6850 desc->oed_info.function_flags = cpu_to_be32(flags); 6851 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6852 return sizeof(struct fc_rdp_oed_sfp_desc); 6853 } 6854 6855 static uint32_t 6856 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6857 struct fc_rdp_oed_sfp_desc *desc, 6858 uint8_t *page_a2) 6859 { 6860 uint32_t flags = 0; 6861 6862 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6863 6864 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6865 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6866 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6867 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6868 6869 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6870 flags |= RDP_OET_HIGH_ALARM; 6871 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6872 flags |= RDP_OET_LOW_ALARM; 6873 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6874 flags |= RDP_OET_HIGH_WARNING; 6875 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6876 flags |= RDP_OET_LOW_WARNING; 6877 6878 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6879 desc->oed_info.function_flags = cpu_to_be32(flags); 6880 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6881 return sizeof(struct fc_rdp_oed_sfp_desc); 6882 } 6883 6884 6885 static uint32_t 6886 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6887 struct fc_rdp_oed_sfp_desc *desc, 6888 uint8_t *page_a2) 6889 { 6890 uint32_t flags = 0; 6891 6892 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6893 6894 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6895 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6896 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6897 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6898 6899 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6900 flags |= RDP_OET_HIGH_ALARM; 6901 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6902 flags |= RDP_OET_LOW_ALARM; 6903 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6904 flags |= RDP_OET_HIGH_WARNING; 6905 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6906 flags |= RDP_OET_LOW_WARNING; 6907 6908 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6909 desc->oed_info.function_flags = cpu_to_be32(flags); 6910 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6911 return sizeof(struct fc_rdp_oed_sfp_desc); 6912 } 6913 6914 static uint32_t 6915 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6916 uint8_t *page_a0, struct lpfc_vport *vport) 6917 { 6918 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6919 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6920 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6921 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6922 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6923 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6924 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6925 return sizeof(struct fc_rdp_opd_sfp_desc); 6926 } 6927 6928 static uint32_t 6929 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6930 { 6931 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6932 return 0; 6933 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6934 6935 desc->info.CorrectedBlocks = 6936 cpu_to_be32(stat->fecCorrBlkCount); 6937 desc->info.UncorrectableBlocks = 6938 cpu_to_be32(stat->fecUncorrBlkCount); 6939 6940 desc->length = cpu_to_be32(sizeof(desc->info)); 6941 6942 return sizeof(struct fc_fec_rdp_desc); 6943 } 6944 6945 static uint32_t 6946 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6947 { 6948 uint16_t rdp_cap = 0; 6949 uint16_t rdp_speed; 6950 6951 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6952 6953 switch (phba->fc_linkspeed) { 6954 case LPFC_LINK_SPEED_1GHZ: 6955 rdp_speed = RDP_PS_1GB; 6956 break; 6957 case LPFC_LINK_SPEED_2GHZ: 6958 rdp_speed = RDP_PS_2GB; 6959 break; 6960 case LPFC_LINK_SPEED_4GHZ: 6961 rdp_speed = RDP_PS_4GB; 6962 break; 6963 case LPFC_LINK_SPEED_8GHZ: 6964 rdp_speed = RDP_PS_8GB; 6965 break; 6966 case LPFC_LINK_SPEED_10GHZ: 6967 rdp_speed = RDP_PS_10GB; 6968 break; 6969 case LPFC_LINK_SPEED_16GHZ: 6970 rdp_speed = RDP_PS_16GB; 6971 break; 6972 case LPFC_LINK_SPEED_32GHZ: 6973 rdp_speed = RDP_PS_32GB; 6974 break; 6975 case LPFC_LINK_SPEED_64GHZ: 6976 rdp_speed = RDP_PS_64GB; 6977 break; 6978 case LPFC_LINK_SPEED_128GHZ: 6979 rdp_speed = RDP_PS_128GB; 6980 break; 6981 case LPFC_LINK_SPEED_256GHZ: 6982 rdp_speed = RDP_PS_256GB; 6983 break; 6984 default: 6985 rdp_speed = RDP_PS_UNKNOWN; 6986 break; 6987 } 6988 6989 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6990 6991 if (phba->lmt & LMT_256Gb) 6992 rdp_cap |= RDP_PS_256GB; 6993 if (phba->lmt & LMT_128Gb) 6994 rdp_cap |= RDP_PS_128GB; 6995 if (phba->lmt & LMT_64Gb) 6996 rdp_cap |= RDP_PS_64GB; 6997 if (phba->lmt & LMT_32Gb) 6998 rdp_cap |= RDP_PS_32GB; 6999 if (phba->lmt & LMT_16Gb) 7000 rdp_cap |= RDP_PS_16GB; 7001 if (phba->lmt & LMT_10Gb) 7002 rdp_cap |= RDP_PS_10GB; 7003 if (phba->lmt & LMT_8Gb) 7004 rdp_cap |= RDP_PS_8GB; 7005 if (phba->lmt & LMT_4Gb) 7006 rdp_cap |= RDP_PS_4GB; 7007 if (phba->lmt & LMT_2Gb) 7008 rdp_cap |= RDP_PS_2GB; 7009 if (phba->lmt & LMT_1Gb) 7010 rdp_cap |= RDP_PS_1GB; 7011 7012 if (rdp_cap == 0) 7013 rdp_cap = RDP_CAP_UNKNOWN; 7014 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 7015 rdp_cap |= RDP_CAP_USER_CONFIGURED; 7016 7017 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 7018 desc->length = cpu_to_be32(sizeof(desc->info)); 7019 return sizeof(struct fc_rdp_port_speed_desc); 7020 } 7021 7022 static uint32_t 7023 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 7024 struct lpfc_vport *vport) 7025 { 7026 7027 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7028 7029 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 7030 sizeof(desc->port_names.wwnn)); 7031 7032 memcpy(desc->port_names.wwpn, &vport->fc_portname, 7033 sizeof(desc->port_names.wwpn)); 7034 7035 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7036 return sizeof(struct fc_rdp_port_name_desc); 7037 } 7038 7039 static uint32_t 7040 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 7041 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7042 { 7043 7044 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7045 if (vport->fc_flag & FC_FABRIC) { 7046 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7047 sizeof(desc->port_names.wwnn)); 7048 7049 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7050 sizeof(desc->port_names.wwpn)); 7051 } else { /* Point to Point */ 7052 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7053 sizeof(desc->port_names.wwnn)); 7054 7055 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7056 sizeof(desc->port_names.wwpn)); 7057 } 7058 7059 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7060 return sizeof(struct fc_rdp_port_name_desc); 7061 } 7062 7063 static void 7064 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7065 int status) 7066 { 7067 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7068 struct lpfc_vport *vport = ndlp->vport; 7069 struct lpfc_iocbq *elsiocb; 7070 struct ulp_bde64 *bpl; 7071 IOCB_t *icmd; 7072 union lpfc_wqe128 *wqe; 7073 uint8_t *pcmd; 7074 struct ls_rjt *stat; 7075 struct fc_rdp_res_frame *rdp_res; 7076 uint32_t cmdsize, len; 7077 uint16_t *flag_ptr; 7078 int rc; 7079 u32 ulp_context; 7080 7081 if (status != SUCCESS) 7082 goto error; 7083 7084 /* This will change once we know the true size of the RDP payload */ 7085 cmdsize = sizeof(struct fc_rdp_res_frame); 7086 7087 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7088 lpfc_max_els_tries, rdp_context->ndlp, 7089 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7090 if (!elsiocb) 7091 goto free_rdp_context; 7092 7093 ulp_context = get_job_ulpcontext(phba, elsiocb); 7094 if (phba->sli_rev == LPFC_SLI_REV4) { 7095 wqe = &elsiocb->wqe; 7096 /* ox-id of the frame */ 7097 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7098 rdp_context->ox_id); 7099 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7100 rdp_context->rx_id); 7101 } else { 7102 icmd = &elsiocb->iocb; 7103 icmd->ulpContext = rdp_context->rx_id; 7104 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7105 } 7106 7107 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7108 "2171 Xmit RDP response tag x%x xri x%x, " 7109 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7110 elsiocb->iotag, ulp_context, 7111 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7112 ndlp->nlp_rpi); 7113 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7114 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7115 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7116 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7117 7118 /* Update Alarm and Warning */ 7119 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7120 phba->sfp_alarm |= *flag_ptr; 7121 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7122 phba->sfp_warning |= *flag_ptr; 7123 7124 /* For RDP payload */ 7125 len = 8; 7126 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7127 (len + pcmd), ELS_CMD_RDP); 7128 7129 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7130 rdp_context->page_a0, rdp_context->page_a2); 7131 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7132 phba); 7133 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7134 (len + pcmd), &rdp_context->link_stat); 7135 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7136 (len + pcmd), vport); 7137 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7138 (len + pcmd), vport, ndlp); 7139 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7140 &rdp_context->link_stat); 7141 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7142 &rdp_context->link_stat, vport); 7143 len += lpfc_rdp_res_oed_temp_desc(phba, 7144 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7145 rdp_context->page_a2); 7146 len += lpfc_rdp_res_oed_voltage_desc(phba, 7147 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7148 rdp_context->page_a2); 7149 len += lpfc_rdp_res_oed_txbias_desc(phba, 7150 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7151 rdp_context->page_a2); 7152 len += lpfc_rdp_res_oed_txpower_desc(phba, 7153 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7154 rdp_context->page_a2); 7155 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7156 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7157 rdp_context->page_a2); 7158 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7159 rdp_context->page_a0, vport); 7160 7161 rdp_res->length = cpu_to_be32(len - 8); 7162 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7163 7164 /* Now that we know the true size of the payload, update the BPL */ 7165 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7166 bpl->tus.f.bdeSize = len; 7167 bpl->tus.f.bdeFlags = 0; 7168 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7169 7170 phba->fc_stat.elsXmitACC++; 7171 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7172 if (!elsiocb->ndlp) { 7173 lpfc_els_free_iocb(phba, elsiocb); 7174 goto free_rdp_context; 7175 } 7176 7177 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7178 if (rc == IOCB_ERROR) { 7179 lpfc_els_free_iocb(phba, elsiocb); 7180 lpfc_nlp_put(ndlp); 7181 } 7182 7183 goto free_rdp_context; 7184 7185 error: 7186 cmdsize = 2 * sizeof(uint32_t); 7187 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7188 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7189 if (!elsiocb) 7190 goto free_rdp_context; 7191 7192 if (phba->sli_rev == LPFC_SLI_REV4) { 7193 wqe = &elsiocb->wqe; 7194 /* ox-id of the frame */ 7195 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7196 rdp_context->ox_id); 7197 bf_set(wqe_ctxt_tag, 7198 &wqe->xmit_els_rsp.wqe_com, 7199 rdp_context->rx_id); 7200 } else { 7201 icmd = &elsiocb->iocb; 7202 icmd->ulpContext = rdp_context->rx_id; 7203 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7204 } 7205 7206 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7207 7208 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7209 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7210 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7211 7212 phba->fc_stat.elsXmitLSRJT++; 7213 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7214 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7215 if (!elsiocb->ndlp) { 7216 lpfc_els_free_iocb(phba, elsiocb); 7217 goto free_rdp_context; 7218 } 7219 7220 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7221 if (rc == IOCB_ERROR) { 7222 lpfc_els_free_iocb(phba, elsiocb); 7223 lpfc_nlp_put(ndlp); 7224 } 7225 7226 free_rdp_context: 7227 /* This reference put is for the original unsolicited RDP. If the 7228 * prep failed, there is no reference to remove. 7229 */ 7230 lpfc_nlp_put(ndlp); 7231 kfree(rdp_context); 7232 } 7233 7234 static int 7235 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7236 { 7237 LPFC_MBOXQ_t *mbox = NULL; 7238 int rc; 7239 7240 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7241 if (!mbox) { 7242 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7243 "7105 failed to allocate mailbox memory"); 7244 return 1; 7245 } 7246 7247 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7248 goto rdp_fail; 7249 mbox->vport = rdp_context->ndlp->vport; 7250 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7251 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7252 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7253 if (rc == MBX_NOT_FINISHED) { 7254 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7255 return 1; 7256 } 7257 7258 return 0; 7259 7260 rdp_fail: 7261 mempool_free(mbox, phba->mbox_mem_pool); 7262 return 1; 7263 } 7264 7265 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7266 struct lpfc_rdp_context *rdp_context) 7267 { 7268 LPFC_MBOXQ_t *mbox = NULL; 7269 int rc; 7270 struct lpfc_dmabuf *mp; 7271 struct lpfc_dmabuf *mpsave; 7272 void *virt; 7273 MAILBOX_t *mb; 7274 7275 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7276 if (!mbox) { 7277 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7278 "7205 failed to allocate mailbox memory"); 7279 return 1; 7280 } 7281 7282 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7283 goto sfp_fail; 7284 mp = mbox->ctx_buf; 7285 mpsave = mp; 7286 virt = mp->virt; 7287 if (phba->sli_rev < LPFC_SLI_REV4) { 7288 mb = &mbox->u.mb; 7289 mb->un.varDmp.cv = 1; 7290 mb->un.varDmp.co = 1; 7291 mb->un.varWords[2] = 0; 7292 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7293 mb->un.varWords[4] = 0; 7294 mb->un.varWords[5] = 0; 7295 mb->un.varWords[6] = 0; 7296 mb->un.varWords[7] = 0; 7297 mb->un.varWords[8] = 0; 7298 mb->un.varWords[9] = 0; 7299 mb->un.varWords[10] = 0; 7300 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7301 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7302 mbox->mbox_offset_word = 5; 7303 mbox->ctx_buf = virt; 7304 } else { 7305 bf_set(lpfc_mbx_memory_dump_type3_length, 7306 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7307 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7308 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7309 } 7310 mbox->vport = phba->pport; 7311 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7312 7313 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7314 if (rc == MBX_NOT_FINISHED) { 7315 rc = 1; 7316 goto error; 7317 } 7318 7319 if (phba->sli_rev == LPFC_SLI_REV4) 7320 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 7321 else 7322 mp = mpsave; 7323 7324 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7325 rc = 1; 7326 goto error; 7327 } 7328 7329 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7330 DMP_SFF_PAGE_A0_SIZE); 7331 7332 memset(mbox, 0, sizeof(*mbox)); 7333 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7334 INIT_LIST_HEAD(&mp->list); 7335 7336 /* save address for completion */ 7337 mbox->ctx_buf = mp; 7338 mbox->vport = phba->pport; 7339 7340 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7341 bf_set(lpfc_mbx_memory_dump_type3_type, 7342 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7343 bf_set(lpfc_mbx_memory_dump_type3_link, 7344 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7345 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7346 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7347 if (phba->sli_rev < LPFC_SLI_REV4) { 7348 mb = &mbox->u.mb; 7349 mb->un.varDmp.cv = 1; 7350 mb->un.varDmp.co = 1; 7351 mb->un.varWords[2] = 0; 7352 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7353 mb->un.varWords[4] = 0; 7354 mb->un.varWords[5] = 0; 7355 mb->un.varWords[6] = 0; 7356 mb->un.varWords[7] = 0; 7357 mb->un.varWords[8] = 0; 7358 mb->un.varWords[9] = 0; 7359 mb->un.varWords[10] = 0; 7360 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7361 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7362 mbox->mbox_offset_word = 5; 7363 mbox->ctx_buf = virt; 7364 } else { 7365 bf_set(lpfc_mbx_memory_dump_type3_length, 7366 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7367 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7368 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7369 } 7370 7371 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7372 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7373 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7374 rc = 1; 7375 goto error; 7376 } 7377 rc = 0; 7378 7379 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7380 DMP_SFF_PAGE_A2_SIZE); 7381 7382 error: 7383 mbox->ctx_buf = mpsave; 7384 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7385 7386 return rc; 7387 7388 sfp_fail: 7389 mempool_free(mbox, phba->mbox_mem_pool); 7390 return 1; 7391 } 7392 7393 /* 7394 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7395 * @vport: pointer to a host virtual N_Port data structure. 7396 * @cmdiocb: pointer to lpfc command iocb data structure. 7397 * @ndlp: pointer to a node-list data structure. 7398 * 7399 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7400 * IOCB. First, the payload of the unsolicited RDP is checked. 7401 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7402 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7403 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7404 * gather all data and send RDP response. 7405 * 7406 * Return code 7407 * 0 - Sent the acc response 7408 * 1 - Sent the reject response. 7409 */ 7410 static int 7411 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7412 struct lpfc_nodelist *ndlp) 7413 { 7414 struct lpfc_hba *phba = vport->phba; 7415 struct lpfc_dmabuf *pcmd; 7416 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7417 struct fc_rdp_req_frame *rdp_req; 7418 struct lpfc_rdp_context *rdp_context; 7419 union lpfc_wqe128 *cmd = NULL; 7420 struct ls_rjt stat; 7421 7422 if (phba->sli_rev < LPFC_SLI_REV4 || 7423 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7424 LPFC_SLI_INTF_IF_TYPE_2) { 7425 rjt_err = LSRJT_UNABLE_TPC; 7426 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7427 goto error; 7428 } 7429 7430 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7431 rjt_err = LSRJT_UNABLE_TPC; 7432 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7433 goto error; 7434 } 7435 7436 pcmd = cmdiocb->cmd_dmabuf; 7437 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7438 7439 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7440 "2422 ELS RDP Request " 7441 "dec len %d tag x%x port_id %d len %d\n", 7442 be32_to_cpu(rdp_req->rdp_des_length), 7443 be32_to_cpu(rdp_req->nport_id_desc.tag), 7444 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7445 be32_to_cpu(rdp_req->nport_id_desc.length)); 7446 7447 if (sizeof(struct fc_rdp_nport_desc) != 7448 be32_to_cpu(rdp_req->rdp_des_length)) 7449 goto rjt_logerr; 7450 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7451 goto rjt_logerr; 7452 if (RDP_NPORT_ID_SIZE != 7453 be32_to_cpu(rdp_req->nport_id_desc.length)) 7454 goto rjt_logerr; 7455 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7456 if (!rdp_context) { 7457 rjt_err = LSRJT_UNABLE_TPC; 7458 goto error; 7459 } 7460 7461 cmd = &cmdiocb->wqe; 7462 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7463 if (!rdp_context->ndlp) { 7464 kfree(rdp_context); 7465 rjt_err = LSRJT_UNABLE_TPC; 7466 goto error; 7467 } 7468 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7469 &cmd->xmit_els_rsp.wqe_com); 7470 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7471 &cmd->xmit_els_rsp.wqe_com); 7472 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7473 if (lpfc_get_rdp_info(phba, rdp_context)) { 7474 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7475 "2423 Unable to send mailbox"); 7476 kfree(rdp_context); 7477 rjt_err = LSRJT_UNABLE_TPC; 7478 lpfc_nlp_put(ndlp); 7479 goto error; 7480 } 7481 7482 return 0; 7483 7484 rjt_logerr: 7485 rjt_err = LSRJT_LOGICAL_ERR; 7486 7487 error: 7488 memset(&stat, 0, sizeof(stat)); 7489 stat.un.b.lsRjtRsnCode = rjt_err; 7490 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7491 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7492 return 1; 7493 } 7494 7495 7496 static void 7497 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7498 { 7499 MAILBOX_t *mb; 7500 IOCB_t *icmd; 7501 union lpfc_wqe128 *wqe; 7502 uint8_t *pcmd; 7503 struct lpfc_iocbq *elsiocb; 7504 struct lpfc_nodelist *ndlp; 7505 struct ls_rjt *stat; 7506 union lpfc_sli4_cfg_shdr *shdr; 7507 struct lpfc_lcb_context *lcb_context; 7508 struct fc_lcb_res_frame *lcb_res; 7509 uint32_t cmdsize, shdr_status, shdr_add_status; 7510 int rc; 7511 7512 mb = &pmb->u.mb; 7513 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7514 ndlp = lcb_context->ndlp; 7515 pmb->ctx_ndlp = NULL; 7516 pmb->ctx_buf = NULL; 7517 7518 shdr = (union lpfc_sli4_cfg_shdr *) 7519 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7522 7523 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7524 "0194 SET_BEACON_CONFIG mailbox " 7525 "completed with status x%x add_status x%x," 7526 " mbx status x%x\n", 7527 shdr_status, shdr_add_status, mb->mbxStatus); 7528 7529 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7530 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7531 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7532 mempool_free(pmb, phba->mbox_mem_pool); 7533 goto error; 7534 } 7535 7536 mempool_free(pmb, phba->mbox_mem_pool); 7537 cmdsize = sizeof(struct fc_lcb_res_frame); 7538 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7539 lpfc_max_els_tries, ndlp, 7540 ndlp->nlp_DID, ELS_CMD_ACC); 7541 7542 /* Decrement the ndlp reference count from previous mbox command */ 7543 lpfc_nlp_put(ndlp); 7544 7545 if (!elsiocb) 7546 goto free_lcb_context; 7547 7548 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7549 7550 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7551 7552 if (phba->sli_rev == LPFC_SLI_REV4) { 7553 wqe = &elsiocb->wqe; 7554 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7555 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7556 lcb_context->ox_id); 7557 } else { 7558 icmd = &elsiocb->iocb; 7559 icmd->ulpContext = lcb_context->rx_id; 7560 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7561 } 7562 7563 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7564 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7565 lcb_res->lcb_sub_command = lcb_context->sub_command; 7566 lcb_res->lcb_type = lcb_context->type; 7567 lcb_res->capability = lcb_context->capability; 7568 lcb_res->lcb_frequency = lcb_context->frequency; 7569 lcb_res->lcb_duration = lcb_context->duration; 7570 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7571 phba->fc_stat.elsXmitACC++; 7572 7573 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7574 if (!elsiocb->ndlp) { 7575 lpfc_els_free_iocb(phba, elsiocb); 7576 goto out; 7577 } 7578 7579 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7580 if (rc == IOCB_ERROR) { 7581 lpfc_els_free_iocb(phba, elsiocb); 7582 lpfc_nlp_put(ndlp); 7583 } 7584 out: 7585 kfree(lcb_context); 7586 return; 7587 7588 error: 7589 cmdsize = sizeof(struct fc_lcb_res_frame); 7590 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7591 lpfc_max_els_tries, ndlp, 7592 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7593 lpfc_nlp_put(ndlp); 7594 if (!elsiocb) 7595 goto free_lcb_context; 7596 7597 if (phba->sli_rev == LPFC_SLI_REV4) { 7598 wqe = &elsiocb->wqe; 7599 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7600 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7601 lcb_context->ox_id); 7602 } else { 7603 icmd = &elsiocb->iocb; 7604 icmd->ulpContext = lcb_context->rx_id; 7605 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7606 } 7607 7608 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7609 7610 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7611 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7612 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7613 7614 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7615 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7616 7617 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7618 phba->fc_stat.elsXmitLSRJT++; 7619 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7620 if (!elsiocb->ndlp) { 7621 lpfc_els_free_iocb(phba, elsiocb); 7622 goto free_lcb_context; 7623 } 7624 7625 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7626 if (rc == IOCB_ERROR) { 7627 lpfc_els_free_iocb(phba, elsiocb); 7628 lpfc_nlp_put(ndlp); 7629 } 7630 free_lcb_context: 7631 kfree(lcb_context); 7632 } 7633 7634 static int 7635 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7636 struct lpfc_lcb_context *lcb_context, 7637 uint32_t beacon_state) 7638 { 7639 struct lpfc_hba *phba = vport->phba; 7640 union lpfc_sli4_cfg_shdr *cfg_shdr; 7641 LPFC_MBOXQ_t *mbox = NULL; 7642 uint32_t len; 7643 int rc; 7644 7645 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7646 if (!mbox) 7647 return 1; 7648 7649 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7650 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7651 sizeof(struct lpfc_sli4_cfg_mhdr); 7652 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7653 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7654 LPFC_SLI4_MBX_EMBED); 7655 mbox->ctx_ndlp = (void *)lcb_context; 7656 mbox->vport = phba->pport; 7657 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7658 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7659 phba->sli4_hba.physical_port); 7660 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7661 beacon_state); 7662 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7663 7664 /* 7665 * Check bv1s bit before issuing the mailbox 7666 * if bv1s == 1, LCB V1 supported 7667 * else, LCB V0 supported 7668 */ 7669 7670 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7671 /* COMMON_SET_BEACON_CONFIG_V1 */ 7672 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7673 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7674 bf_set(lpfc_mbx_set_beacon_port_type, 7675 &mbox->u.mqe.un.beacon_config, 0); 7676 bf_set(lpfc_mbx_set_beacon_duration_v1, 7677 &mbox->u.mqe.un.beacon_config, 7678 be16_to_cpu(lcb_context->duration)); 7679 } else { 7680 /* COMMON_SET_BEACON_CONFIG_V0 */ 7681 if (be16_to_cpu(lcb_context->duration) != 0) { 7682 mempool_free(mbox, phba->mbox_mem_pool); 7683 return 1; 7684 } 7685 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7686 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7687 bf_set(lpfc_mbx_set_beacon_state, 7688 &mbox->u.mqe.un.beacon_config, beacon_state); 7689 bf_set(lpfc_mbx_set_beacon_port_type, 7690 &mbox->u.mqe.un.beacon_config, 1); 7691 bf_set(lpfc_mbx_set_beacon_duration, 7692 &mbox->u.mqe.un.beacon_config, 7693 be16_to_cpu(lcb_context->duration)); 7694 } 7695 7696 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7697 if (rc == MBX_NOT_FINISHED) { 7698 mempool_free(mbox, phba->mbox_mem_pool); 7699 return 1; 7700 } 7701 7702 return 0; 7703 } 7704 7705 7706 /** 7707 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7708 * @vport: pointer to a host virtual N_Port data structure. 7709 * @cmdiocb: pointer to lpfc command iocb data structure. 7710 * @ndlp: pointer to a node-list data structure. 7711 * 7712 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7713 * First, the payload of the unsolicited LCB is checked. 7714 * Then based on Subcommand beacon will either turn on or off. 7715 * 7716 * Return code 7717 * 0 - Sent the acc response 7718 * 1 - Sent the reject response. 7719 **/ 7720 static int 7721 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7722 struct lpfc_nodelist *ndlp) 7723 { 7724 struct lpfc_hba *phba = vport->phba; 7725 struct lpfc_dmabuf *pcmd; 7726 uint8_t *lp; 7727 struct fc_lcb_request_frame *beacon; 7728 struct lpfc_lcb_context *lcb_context; 7729 u8 state, rjt_err = 0; 7730 struct ls_rjt stat; 7731 7732 pcmd = cmdiocb->cmd_dmabuf; 7733 lp = (uint8_t *)pcmd->virt; 7734 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7735 7736 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7737 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7738 "type x%x frequency %x duration x%x\n", 7739 lp[0], lp[1], lp[2], 7740 beacon->lcb_command, 7741 beacon->lcb_sub_command, 7742 beacon->lcb_type, 7743 beacon->lcb_frequency, 7744 be16_to_cpu(beacon->lcb_duration)); 7745 7746 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7747 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7748 rjt_err = LSRJT_CMD_UNSUPPORTED; 7749 goto rjt; 7750 } 7751 7752 if (phba->sli_rev < LPFC_SLI_REV4 || 7753 phba->hba_flag & HBA_FCOE_MODE || 7754 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7755 LPFC_SLI_INTF_IF_TYPE_2)) { 7756 rjt_err = LSRJT_CMD_UNSUPPORTED; 7757 goto rjt; 7758 } 7759 7760 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7761 if (!lcb_context) { 7762 rjt_err = LSRJT_UNABLE_TPC; 7763 goto rjt; 7764 } 7765 7766 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7767 lcb_context->sub_command = beacon->lcb_sub_command; 7768 lcb_context->capability = 0; 7769 lcb_context->type = beacon->lcb_type; 7770 lcb_context->frequency = beacon->lcb_frequency; 7771 lcb_context->duration = beacon->lcb_duration; 7772 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7773 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7774 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7775 if (!lcb_context->ndlp) { 7776 rjt_err = LSRJT_UNABLE_TPC; 7777 goto rjt_free; 7778 } 7779 7780 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7781 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7782 "0193 failed to send mail box"); 7783 lpfc_nlp_put(ndlp); 7784 rjt_err = LSRJT_UNABLE_TPC; 7785 goto rjt_free; 7786 } 7787 return 0; 7788 7789 rjt_free: 7790 kfree(lcb_context); 7791 rjt: 7792 memset(&stat, 0, sizeof(stat)); 7793 stat.un.b.lsRjtRsnCode = rjt_err; 7794 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7795 return 1; 7796 } 7797 7798 7799 /** 7800 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7801 * @vport: pointer to a host virtual N_Port data structure. 7802 * 7803 * This routine cleans up any Registration State Change Notification 7804 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7805 * @vport together with the host_lock is used to prevent multiple thread 7806 * trying to access the RSCN array on a same @vport at the same time. 7807 **/ 7808 void 7809 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7810 { 7811 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7812 struct lpfc_hba *phba = vport->phba; 7813 int i; 7814 7815 spin_lock_irq(shost->host_lock); 7816 if (vport->fc_rscn_flush) { 7817 /* Another thread is walking fc_rscn_id_list on this vport */ 7818 spin_unlock_irq(shost->host_lock); 7819 return; 7820 } 7821 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7822 vport->fc_rscn_flush = 1; 7823 spin_unlock_irq(shost->host_lock); 7824 7825 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7826 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7827 vport->fc_rscn_id_list[i] = NULL; 7828 } 7829 spin_lock_irq(shost->host_lock); 7830 vport->fc_rscn_id_cnt = 0; 7831 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7832 spin_unlock_irq(shost->host_lock); 7833 lpfc_can_disctmo(vport); 7834 /* Indicate we are done walking this fc_rscn_id_list */ 7835 vport->fc_rscn_flush = 0; 7836 } 7837 7838 /** 7839 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7840 * @vport: pointer to a host virtual N_Port data structure. 7841 * @did: remote destination port identifier. 7842 * 7843 * This routine checks whether there is any pending Registration State 7844 * Configuration Notification (RSCN) to a @did on @vport. 7845 * 7846 * Return code 7847 * None zero - The @did matched with a pending rscn 7848 * 0 - not able to match @did with a pending rscn 7849 **/ 7850 int 7851 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7852 { 7853 D_ID ns_did; 7854 D_ID rscn_did; 7855 uint32_t *lp; 7856 uint32_t payload_len, i; 7857 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7858 7859 ns_did.un.word = did; 7860 7861 /* Never match fabric nodes for RSCNs */ 7862 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7863 return 0; 7864 7865 /* If we are doing a FULL RSCN rediscovery, match everything */ 7866 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7867 return did; 7868 7869 spin_lock_irq(shost->host_lock); 7870 if (vport->fc_rscn_flush) { 7871 /* Another thread is walking fc_rscn_id_list on this vport */ 7872 spin_unlock_irq(shost->host_lock); 7873 return 0; 7874 } 7875 /* Indicate we are walking fc_rscn_id_list on this vport */ 7876 vport->fc_rscn_flush = 1; 7877 spin_unlock_irq(shost->host_lock); 7878 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7879 lp = vport->fc_rscn_id_list[i]->virt; 7880 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7881 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7882 while (payload_len) { 7883 rscn_did.un.word = be32_to_cpu(*lp++); 7884 payload_len -= sizeof(uint32_t); 7885 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7886 case RSCN_ADDRESS_FORMAT_PORT: 7887 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7888 && (ns_did.un.b.area == rscn_did.un.b.area) 7889 && (ns_did.un.b.id == rscn_did.un.b.id)) 7890 goto return_did_out; 7891 break; 7892 case RSCN_ADDRESS_FORMAT_AREA: 7893 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7894 && (ns_did.un.b.area == rscn_did.un.b.area)) 7895 goto return_did_out; 7896 break; 7897 case RSCN_ADDRESS_FORMAT_DOMAIN: 7898 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7899 goto return_did_out; 7900 break; 7901 case RSCN_ADDRESS_FORMAT_FABRIC: 7902 goto return_did_out; 7903 } 7904 } 7905 } 7906 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7907 vport->fc_rscn_flush = 0; 7908 return 0; 7909 return_did_out: 7910 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7911 vport->fc_rscn_flush = 0; 7912 return did; 7913 } 7914 7915 /** 7916 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7917 * @vport: pointer to a host virtual N_Port data structure. 7918 * 7919 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7920 * state machine for a @vport's nodes that are with pending RSCN (Registration 7921 * State Change Notification). 7922 * 7923 * Return code 7924 * 0 - Successful (currently alway return 0) 7925 **/ 7926 static int 7927 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7928 { 7929 struct lpfc_nodelist *ndlp = NULL, *n; 7930 7931 /* Move all affected nodes by pending RSCNs to NPR state. */ 7932 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7933 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7934 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7935 continue; 7936 7937 /* NVME Target mode does not do RSCN Recovery. */ 7938 if (vport->phba->nvmet_support) 7939 continue; 7940 7941 /* If we are in the process of doing discovery on this 7942 * NPort, let it continue on its own. 7943 */ 7944 switch (ndlp->nlp_state) { 7945 case NLP_STE_PLOGI_ISSUE: 7946 case NLP_STE_ADISC_ISSUE: 7947 case NLP_STE_REG_LOGIN_ISSUE: 7948 case NLP_STE_PRLI_ISSUE: 7949 case NLP_STE_LOGO_ISSUE: 7950 continue; 7951 } 7952 7953 lpfc_disc_state_machine(vport, ndlp, NULL, 7954 NLP_EVT_DEVICE_RECOVERY); 7955 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7956 } 7957 return 0; 7958 } 7959 7960 /** 7961 * lpfc_send_rscn_event - Send an RSCN event to management application 7962 * @vport: pointer to a host virtual N_Port data structure. 7963 * @cmdiocb: pointer to lpfc command iocb data structure. 7964 * 7965 * lpfc_send_rscn_event sends an RSCN netlink event to management 7966 * applications. 7967 */ 7968 static void 7969 lpfc_send_rscn_event(struct lpfc_vport *vport, 7970 struct lpfc_iocbq *cmdiocb) 7971 { 7972 struct lpfc_dmabuf *pcmd; 7973 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7974 uint32_t *payload_ptr; 7975 uint32_t payload_len; 7976 struct lpfc_rscn_event_header *rscn_event_data; 7977 7978 pcmd = cmdiocb->cmd_dmabuf; 7979 payload_ptr = (uint32_t *) pcmd->virt; 7980 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7981 7982 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7983 payload_len, GFP_KERNEL); 7984 if (!rscn_event_data) { 7985 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7986 "0147 Failed to allocate memory for RSCN event\n"); 7987 return; 7988 } 7989 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7990 rscn_event_data->payload_length = payload_len; 7991 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7992 payload_len); 7993 7994 fc_host_post_vendor_event(shost, 7995 fc_get_event_number(), 7996 sizeof(struct lpfc_rscn_event_header) + payload_len, 7997 (char *)rscn_event_data, 7998 LPFC_NL_VENDOR_ID); 7999 8000 kfree(rscn_event_data); 8001 } 8002 8003 /** 8004 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 8005 * @vport: pointer to a host virtual N_Port data structure. 8006 * @cmdiocb: pointer to lpfc command iocb data structure. 8007 * @ndlp: pointer to a node-list data structure. 8008 * 8009 * This routine processes an unsolicited RSCN (Registration State Change 8010 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 8011 * to invoke fc_host_post_event() routine to the FC transport layer. If the 8012 * discover state machine is about to begin discovery, it just accepts the 8013 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 8014 * contains N_Port IDs for other vports on this HBA, it just accepts the 8015 * RSCN and ignore processing it. If the state machine is in the recovery 8016 * state, the fc_rscn_id_list of this @vport is walked and the 8017 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 8018 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 8019 * routine is invoked to handle the RSCN event. 8020 * 8021 * Return code 8022 * 0 - Just sent the acc response 8023 * 1 - Sent the acc response and waited for name server completion 8024 **/ 8025 static int 8026 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8027 struct lpfc_nodelist *ndlp) 8028 { 8029 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8030 struct lpfc_hba *phba = vport->phba; 8031 struct lpfc_dmabuf *pcmd; 8032 uint32_t *lp, *datap; 8033 uint32_t payload_len, length, nportid, *cmd; 8034 int rscn_cnt; 8035 int rscn_id = 0, hba_id = 0; 8036 int i, tmo; 8037 8038 pcmd = cmdiocb->cmd_dmabuf; 8039 lp = (uint32_t *) pcmd->virt; 8040 8041 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 8042 payload_len -= sizeof(uint32_t); /* take off word 0 */ 8043 /* RSCN received */ 8044 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8045 "0214 RSCN received Data: x%x x%x x%x x%x\n", 8046 vport->fc_flag, payload_len, *lp, 8047 vport->fc_rscn_id_cnt); 8048 8049 /* Send an RSCN event to the management application */ 8050 lpfc_send_rscn_event(vport, cmdiocb); 8051 8052 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8053 fc_host_post_event(shost, fc_get_event_number(), 8054 FCH_EVT_RSCN, lp[i]); 8055 8056 /* Check if RSCN is coming from a direct-connected remote NPort */ 8057 if (vport->fc_flag & FC_PT2PT) { 8058 /* If so, just ACC it, no other action needed for now */ 8059 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8060 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 8061 *lp, vport->fc_flag, payload_len); 8062 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8063 8064 /* Check to see if we need to NVME rescan this target 8065 * remoteport. 8066 */ 8067 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8068 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8069 lpfc_nvme_rescan_port(vport, ndlp); 8070 return 0; 8071 } 8072 8073 /* If we are about to begin discovery, just ACC the RSCN. 8074 * Discovery processing will satisfy it. 8075 */ 8076 if (vport->port_state <= LPFC_NS_QRY) { 8077 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8078 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8079 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8080 8081 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8082 return 0; 8083 } 8084 8085 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8086 * just ACC and ignore it. 8087 */ 8088 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8089 !(vport->cfg_peer_port_login)) { 8090 i = payload_len; 8091 datap = lp; 8092 while (i > 0) { 8093 nportid = *datap++; 8094 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8095 i -= sizeof(uint32_t); 8096 rscn_id++; 8097 if (lpfc_find_vport_by_did(phba, nportid)) 8098 hba_id++; 8099 } 8100 if (rscn_id == hba_id) { 8101 /* ALL NPortIDs in RSCN are on HBA */ 8102 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8103 "0219 Ignore RSCN " 8104 "Data: x%x x%x x%x x%x\n", 8105 vport->fc_flag, payload_len, 8106 *lp, vport->fc_rscn_id_cnt); 8107 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8108 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8109 ndlp->nlp_DID, vport->port_state, 8110 ndlp->nlp_flag); 8111 8112 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8113 ndlp, NULL); 8114 /* Restart disctmo if its already running */ 8115 if (vport->fc_flag & FC_DISC_TMO) { 8116 tmo = ((phba->fc_ratov * 3) + 3); 8117 mod_timer(&vport->fc_disctmo, 8118 jiffies + 8119 msecs_to_jiffies(1000 * tmo)); 8120 } 8121 return 0; 8122 } 8123 } 8124 8125 spin_lock_irq(shost->host_lock); 8126 if (vport->fc_rscn_flush) { 8127 /* Another thread is walking fc_rscn_id_list on this vport */ 8128 vport->fc_flag |= FC_RSCN_DISCOVERY; 8129 spin_unlock_irq(shost->host_lock); 8130 /* Send back ACC */ 8131 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8132 return 0; 8133 } 8134 /* Indicate we are walking fc_rscn_id_list on this vport */ 8135 vport->fc_rscn_flush = 1; 8136 spin_unlock_irq(shost->host_lock); 8137 /* Get the array count after successfully have the token */ 8138 rscn_cnt = vport->fc_rscn_id_cnt; 8139 /* If we are already processing an RSCN, save the received 8140 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8141 */ 8142 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 8143 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8144 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8145 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8146 8147 spin_lock_irq(shost->host_lock); 8148 vport->fc_flag |= FC_RSCN_DEFERRED; 8149 8150 /* Restart disctmo if its already running */ 8151 if (vport->fc_flag & FC_DISC_TMO) { 8152 tmo = ((phba->fc_ratov * 3) + 3); 8153 mod_timer(&vport->fc_disctmo, 8154 jiffies + msecs_to_jiffies(1000 * tmo)); 8155 } 8156 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8157 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 8158 vport->fc_flag |= FC_RSCN_MODE; 8159 spin_unlock_irq(shost->host_lock); 8160 if (rscn_cnt) { 8161 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8162 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8163 } 8164 if ((rscn_cnt) && 8165 (payload_len + length <= LPFC_BPL_SIZE)) { 8166 *cmd &= ELS_CMD_MASK; 8167 *cmd |= cpu_to_be32(payload_len + length); 8168 memcpy(((uint8_t *)cmd) + length, lp, 8169 payload_len); 8170 } else { 8171 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8172 vport->fc_rscn_id_cnt++; 8173 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8174 * routine will not try to free it. 8175 */ 8176 cmdiocb->cmd_dmabuf = NULL; 8177 } 8178 /* Deferred RSCN */ 8179 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8180 "0235 Deferred RSCN " 8181 "Data: x%x x%x x%x\n", 8182 vport->fc_rscn_id_cnt, vport->fc_flag, 8183 vport->port_state); 8184 } else { 8185 vport->fc_flag |= FC_RSCN_DISCOVERY; 8186 spin_unlock_irq(shost->host_lock); 8187 /* ReDiscovery RSCN */ 8188 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8189 "0234 ReDiscovery RSCN " 8190 "Data: x%x x%x x%x\n", 8191 vport->fc_rscn_id_cnt, vport->fc_flag, 8192 vport->port_state); 8193 } 8194 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8195 vport->fc_rscn_flush = 0; 8196 /* Send back ACC */ 8197 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8198 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8199 lpfc_rscn_recovery_check(vport); 8200 return 0; 8201 } 8202 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8203 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8204 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8205 8206 spin_lock_irq(shost->host_lock); 8207 vport->fc_flag |= FC_RSCN_MODE; 8208 spin_unlock_irq(shost->host_lock); 8209 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8210 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8211 vport->fc_rscn_flush = 0; 8212 /* 8213 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8214 * not try to free it. 8215 */ 8216 cmdiocb->cmd_dmabuf = NULL; 8217 lpfc_set_disctmo(vport); 8218 /* Send back ACC */ 8219 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8220 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8221 lpfc_rscn_recovery_check(vport); 8222 return lpfc_els_handle_rscn(vport); 8223 } 8224 8225 /** 8226 * lpfc_els_handle_rscn - Handle rscn for a vport 8227 * @vport: pointer to a host virtual N_Port data structure. 8228 * 8229 * This routine handles the Registration State Configuration Notification 8230 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8231 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8232 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8233 * NameServer shall be issued. If CT command to the NameServer fails to be 8234 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8235 * RSCN activities with the @vport. 8236 * 8237 * Return code 8238 * 0 - Cleaned up rscn on the @vport 8239 * 1 - Wait for plogi to name server before proceed 8240 **/ 8241 int 8242 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8243 { 8244 struct lpfc_nodelist *ndlp; 8245 struct lpfc_hba *phba = vport->phba; 8246 8247 /* Ignore RSCN if the port is being torn down. */ 8248 if (vport->load_flag & FC_UNLOADING) { 8249 lpfc_els_flush_rscn(vport); 8250 return 0; 8251 } 8252 8253 /* Start timer for RSCN processing */ 8254 lpfc_set_disctmo(vport); 8255 8256 /* RSCN processed */ 8257 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8258 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8259 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8260 vport->port_state, vport->num_disc_nodes, 8261 vport->gidft_inp); 8262 8263 /* To process RSCN, first compare RSCN data with NameServer */ 8264 vport->fc_ns_retry = 0; 8265 vport->num_disc_nodes = 0; 8266 8267 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8268 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8269 /* Good ndlp, issue CT Request to NameServer. Need to 8270 * know how many gidfts were issued. If none, then just 8271 * flush the RSCN. Otherwise, the outstanding requests 8272 * need to complete. 8273 */ 8274 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8275 if (lpfc_issue_gidft(vport) > 0) 8276 return 1; 8277 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8278 if (lpfc_issue_gidpt(vport) > 0) 8279 return 1; 8280 } else { 8281 return 1; 8282 } 8283 } else { 8284 /* Nameserver login in question. Revalidate. */ 8285 if (ndlp) { 8286 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8287 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8288 } else { 8289 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8290 if (!ndlp) { 8291 lpfc_els_flush_rscn(vport); 8292 return 0; 8293 } 8294 ndlp->nlp_prev_state = ndlp->nlp_state; 8295 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8296 } 8297 ndlp->nlp_type |= NLP_FABRIC; 8298 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8299 /* Wait for NameServer login cmpl before we can 8300 * continue 8301 */ 8302 return 1; 8303 } 8304 8305 lpfc_els_flush_rscn(vport); 8306 return 0; 8307 } 8308 8309 /** 8310 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8311 * @vport: pointer to a host virtual N_Port data structure. 8312 * @cmdiocb: pointer to lpfc command iocb data structure. 8313 * @ndlp: pointer to a node-list data structure. 8314 * 8315 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8316 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8317 * point topology. As an unsolicited FLOGI should not be received in a loop 8318 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8319 * lpfc_check_sparm() routine is invoked to check the parameters in the 8320 * unsolicited FLOGI. If parameters validation failed, the routine 8321 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8322 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8323 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8324 * will initiate PLOGI. The higher lexicographical value party shall has 8325 * higher priority (as the winning port) and will initiate PLOGI and 8326 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8327 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8328 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8329 * 8330 * Return code 8331 * 0 - Successfully processed the unsolicited flogi 8332 * 1 - Failed to process the unsolicited flogi 8333 **/ 8334 static int 8335 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8336 struct lpfc_nodelist *ndlp) 8337 { 8338 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8339 struct lpfc_hba *phba = vport->phba; 8340 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8341 uint32_t *lp = (uint32_t *) pcmd->virt; 8342 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8343 struct serv_parm *sp; 8344 LPFC_MBOXQ_t *mbox; 8345 uint32_t cmd, did; 8346 int rc; 8347 uint32_t fc_flag = 0; 8348 uint32_t port_state = 0; 8349 8350 /* Clear external loopback plug detected flag */ 8351 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8352 8353 cmd = *lp++; 8354 sp = (struct serv_parm *) lp; 8355 8356 /* FLOGI received */ 8357 8358 lpfc_set_disctmo(vport); 8359 8360 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8361 /* We should never receive a FLOGI in loop mode, ignore it */ 8362 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8363 8364 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8365 Loop Mode */ 8366 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8367 "0113 An FLOGI ELS command x%x was " 8368 "received from DID x%x in Loop Mode\n", 8369 cmd, did); 8370 return 1; 8371 } 8372 8373 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8374 8375 /* 8376 * If our portname is greater than the remote portname, 8377 * then we initiate Nport login. 8378 */ 8379 8380 rc = memcmp(&vport->fc_portname, &sp->portName, 8381 sizeof(struct lpfc_name)); 8382 8383 if (!rc) { 8384 if (phba->sli_rev < LPFC_SLI_REV4) { 8385 mbox = mempool_alloc(phba->mbox_mem_pool, 8386 GFP_KERNEL); 8387 if (!mbox) 8388 return 1; 8389 lpfc_linkdown(phba); 8390 lpfc_init_link(phba, mbox, 8391 phba->cfg_topology, 8392 phba->cfg_link_speed); 8393 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8394 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8395 mbox->vport = vport; 8396 rc = lpfc_sli_issue_mbox(phba, mbox, 8397 MBX_NOWAIT); 8398 lpfc_set_loopback_flag(phba); 8399 if (rc == MBX_NOT_FINISHED) 8400 mempool_free(mbox, phba->mbox_mem_pool); 8401 return 1; 8402 } 8403 8404 /* External loopback plug insertion detected */ 8405 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8406 8407 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8408 "1119 External Loopback plug detected\n"); 8409 8410 /* abort the flogi coming back to ourselves 8411 * due to external loopback on the port. 8412 */ 8413 lpfc_els_abort_flogi(phba); 8414 return 0; 8415 8416 } else if (rc > 0) { /* greater than */ 8417 spin_lock_irq(shost->host_lock); 8418 vport->fc_flag |= FC_PT2PT_PLOGI; 8419 spin_unlock_irq(shost->host_lock); 8420 8421 /* If we have the high WWPN we can assign our own 8422 * myDID; otherwise, we have to WAIT for a PLOGI 8423 * from the remote NPort to find out what it 8424 * will be. 8425 */ 8426 vport->fc_myDID = PT2PT_LocalID; 8427 } else { 8428 vport->fc_myDID = PT2PT_RemoteID; 8429 } 8430 8431 /* 8432 * The vport state should go to LPFC_FLOGI only 8433 * AFTER we issue a FLOGI, not receive one. 8434 */ 8435 spin_lock_irq(shost->host_lock); 8436 fc_flag = vport->fc_flag; 8437 port_state = vport->port_state; 8438 vport->fc_flag |= FC_PT2PT; 8439 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8440 8441 /* Acking an unsol FLOGI. Count 1 for link bounce 8442 * work-around. 8443 */ 8444 vport->rcv_flogi_cnt++; 8445 spin_unlock_irq(shost->host_lock); 8446 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8447 "3311 Rcv Flogi PS x%x new PS x%x " 8448 "fc_flag x%x new fc_flag x%x\n", 8449 port_state, vport->port_state, 8450 fc_flag, vport->fc_flag); 8451 8452 /* 8453 * We temporarily set fc_myDID to make it look like we are 8454 * a Fabric. This is done just so we end up with the right 8455 * did / sid on the FLOGI ACC rsp. 8456 */ 8457 did = vport->fc_myDID; 8458 vport->fc_myDID = Fabric_DID; 8459 8460 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8461 8462 /* Defer ACC response until AFTER we issue a FLOGI */ 8463 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8464 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8465 &wqe->xmit_els_rsp.wqe_com); 8466 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8467 &wqe->xmit_els_rsp.wqe_com); 8468 8469 vport->fc_myDID = did; 8470 8471 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8472 "3344 Deferring FLOGI ACC: rx_id: x%x," 8473 " ox_id: x%x, hba_flag x%x\n", 8474 phba->defer_flogi_acc_rx_id, 8475 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8476 8477 phba->defer_flogi_acc_flag = true; 8478 8479 return 0; 8480 } 8481 8482 /* Send back ACC */ 8483 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8484 8485 /* Now lets put fc_myDID back to what its supposed to be */ 8486 vport->fc_myDID = did; 8487 8488 return 0; 8489 } 8490 8491 /** 8492 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8493 * @vport: pointer to a host virtual N_Port data structure. 8494 * @cmdiocb: pointer to lpfc command iocb data structure. 8495 * @ndlp: pointer to a node-list data structure. 8496 * 8497 * This routine processes Request Node Identification Data (RNID) IOCB 8498 * received as an ELS unsolicited event. Only when the RNID specified format 8499 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8500 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8501 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8502 * rejected by invoking the lpfc_els_rsp_reject() routine. 8503 * 8504 * Return code 8505 * 0 - Successfully processed rnid iocb (currently always return 0) 8506 **/ 8507 static int 8508 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8509 struct lpfc_nodelist *ndlp) 8510 { 8511 struct lpfc_dmabuf *pcmd; 8512 uint32_t *lp; 8513 RNID *rn; 8514 struct ls_rjt stat; 8515 8516 pcmd = cmdiocb->cmd_dmabuf; 8517 lp = (uint32_t *) pcmd->virt; 8518 8519 lp++; 8520 rn = (RNID *) lp; 8521 8522 /* RNID received */ 8523 8524 switch (rn->Format) { 8525 case 0: 8526 case RNID_TOPOLOGY_DISC: 8527 /* Send back ACC */ 8528 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8529 break; 8530 default: 8531 /* Reject this request because format not supported */ 8532 stat.un.b.lsRjtRsvd0 = 0; 8533 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8534 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8535 stat.un.b.vendorUnique = 0; 8536 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8537 NULL); 8538 } 8539 return 0; 8540 } 8541 8542 /** 8543 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8544 * @vport: pointer to a host virtual N_Port data structure. 8545 * @cmdiocb: pointer to lpfc command iocb data structure. 8546 * @ndlp: pointer to a node-list data structure. 8547 * 8548 * Return code 8549 * 0 - Successfully processed echo iocb (currently always return 0) 8550 **/ 8551 static int 8552 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8553 struct lpfc_nodelist *ndlp) 8554 { 8555 uint8_t *pcmd; 8556 8557 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8558 8559 /* skip over first word of echo command to find echo data */ 8560 pcmd += sizeof(uint32_t); 8561 8562 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8563 return 0; 8564 } 8565 8566 /** 8567 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8568 * @vport: pointer to a host virtual N_Port data structure. 8569 * @cmdiocb: pointer to lpfc command iocb data structure. 8570 * @ndlp: pointer to a node-list data structure. 8571 * 8572 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8573 * received as an ELS unsolicited event. Currently, this function just invokes 8574 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8575 * 8576 * Return code 8577 * 0 - Successfully processed lirr iocb (currently always return 0) 8578 **/ 8579 static int 8580 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8581 struct lpfc_nodelist *ndlp) 8582 { 8583 struct ls_rjt stat; 8584 8585 /* For now, unconditionally reject this command */ 8586 stat.un.b.lsRjtRsvd0 = 0; 8587 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8588 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8589 stat.un.b.vendorUnique = 0; 8590 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8591 return 0; 8592 } 8593 8594 /** 8595 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8596 * @vport: pointer to a host virtual N_Port data structure. 8597 * @cmdiocb: pointer to lpfc command iocb data structure. 8598 * @ndlp: pointer to a node-list data structure. 8599 * 8600 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8601 * received as an ELS unsolicited event. A request to RRQ shall only 8602 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8603 * Nx_Port N_Port_ID of the target Exchange is the same as the 8604 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8605 * not accepted, an LS_RJT with reason code "Unable to perform 8606 * command request" and reason code explanation "Invalid Originator 8607 * S_ID" shall be returned. For now, we just unconditionally accept 8608 * RRQ from the target. 8609 **/ 8610 static void 8611 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8612 struct lpfc_nodelist *ndlp) 8613 { 8614 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8615 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8616 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8617 } 8618 8619 /** 8620 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8621 * @phba: pointer to lpfc hba data structure. 8622 * @pmb: pointer to the driver internal queue element for mailbox command. 8623 * 8624 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8625 * mailbox command. This callback function is to actually send the Accept 8626 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8627 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8628 * mailbox command, constructs the RLS response with the link statistics 8629 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8630 * response to the RLS. 8631 * 8632 * Note that the ndlp reference count will be incremented by 1 for holding the 8633 * ndlp and the reference to ndlp will be stored into the ndlp field of 8634 * the IOCB for the completion callback function to the RLS Accept Response 8635 * ELS IOCB command. 8636 * 8637 **/ 8638 static void 8639 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8640 { 8641 int rc = 0; 8642 MAILBOX_t *mb; 8643 IOCB_t *icmd; 8644 union lpfc_wqe128 *wqe; 8645 struct RLS_RSP *rls_rsp; 8646 uint8_t *pcmd; 8647 struct lpfc_iocbq *elsiocb; 8648 struct lpfc_nodelist *ndlp; 8649 uint16_t oxid; 8650 uint16_t rxid; 8651 uint32_t cmdsize; 8652 u32 ulp_context; 8653 8654 mb = &pmb->u.mb; 8655 8656 ndlp = pmb->ctx_ndlp; 8657 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8658 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8659 pmb->ctx_buf = NULL; 8660 pmb->ctx_ndlp = NULL; 8661 8662 if (mb->mbxStatus) { 8663 mempool_free(pmb, phba->mbox_mem_pool); 8664 return; 8665 } 8666 8667 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8668 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8669 lpfc_max_els_tries, ndlp, 8670 ndlp->nlp_DID, ELS_CMD_ACC); 8671 8672 /* Decrement the ndlp reference count from previous mbox command */ 8673 lpfc_nlp_put(ndlp); 8674 8675 if (!elsiocb) { 8676 mempool_free(pmb, phba->mbox_mem_pool); 8677 return; 8678 } 8679 8680 ulp_context = get_job_ulpcontext(phba, elsiocb); 8681 if (phba->sli_rev == LPFC_SLI_REV4) { 8682 wqe = &elsiocb->wqe; 8683 /* Xri / rx_id */ 8684 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8685 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8686 } else { 8687 icmd = &elsiocb->iocb; 8688 icmd->ulpContext = rxid; 8689 icmd->unsli3.rcvsli3.ox_id = oxid; 8690 } 8691 8692 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8693 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8694 pcmd += sizeof(uint32_t); /* Skip past command */ 8695 rls_rsp = (struct RLS_RSP *)pcmd; 8696 8697 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8698 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8699 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8700 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8701 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8702 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8703 mempool_free(pmb, phba->mbox_mem_pool); 8704 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8705 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8706 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8707 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8708 elsiocb->iotag, ulp_context, 8709 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8710 ndlp->nlp_rpi); 8711 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8712 phba->fc_stat.elsXmitACC++; 8713 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8714 if (!elsiocb->ndlp) { 8715 lpfc_els_free_iocb(phba, elsiocb); 8716 return; 8717 } 8718 8719 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8720 if (rc == IOCB_ERROR) { 8721 lpfc_els_free_iocb(phba, elsiocb); 8722 lpfc_nlp_put(ndlp); 8723 } 8724 return; 8725 } 8726 8727 /** 8728 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8729 * @vport: pointer to a host virtual N_Port data structure. 8730 * @cmdiocb: pointer to lpfc command iocb data structure. 8731 * @ndlp: pointer to a node-list data structure. 8732 * 8733 * This routine processes Read Link Status (RLS) IOCB received as an 8734 * ELS unsolicited event. It first checks the remote port state. If the 8735 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8736 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8737 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8738 * for reading the HBA link statistics. It is for the callback function, 8739 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8740 * to actually sending out RPL Accept (ACC) response. 8741 * 8742 * Return codes 8743 * 0 - Successfully processed rls iocb (currently always return 0) 8744 **/ 8745 static int 8746 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8747 struct lpfc_nodelist *ndlp) 8748 { 8749 struct lpfc_hba *phba = vport->phba; 8750 LPFC_MBOXQ_t *mbox; 8751 struct ls_rjt stat; 8752 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8753 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8754 8755 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8756 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8757 /* reject the unsolicited RLS request and done with it */ 8758 goto reject_out; 8759 8760 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8761 if (mbox) { 8762 lpfc_read_lnk_stat(phba, mbox); 8763 mbox->ctx_buf = (void *)((unsigned long) 8764 (ox_id << 16 | ctx)); 8765 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8766 if (!mbox->ctx_ndlp) 8767 goto node_err; 8768 mbox->vport = vport; 8769 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8770 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8771 != MBX_NOT_FINISHED) 8772 /* Mbox completion will send ELS Response */ 8773 return 0; 8774 /* Decrement reference count used for the failed mbox 8775 * command. 8776 */ 8777 lpfc_nlp_put(ndlp); 8778 node_err: 8779 mempool_free(mbox, phba->mbox_mem_pool); 8780 } 8781 reject_out: 8782 /* issue rejection response */ 8783 stat.un.b.lsRjtRsvd0 = 0; 8784 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8785 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8786 stat.un.b.vendorUnique = 0; 8787 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8788 return 0; 8789 } 8790 8791 /** 8792 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8793 * @vport: pointer to a host virtual N_Port data structure. 8794 * @cmdiocb: pointer to lpfc command iocb data structure. 8795 * @ndlp: pointer to a node-list data structure. 8796 * 8797 * This routine processes Read Timout Value (RTV) IOCB received as an 8798 * ELS unsolicited event. It first checks the remote port state. If the 8799 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8800 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8801 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8802 * Value (RTV) unsolicited IOCB event. 8803 * 8804 * Note that the ndlp reference count will be incremented by 1 for holding the 8805 * ndlp and the reference to ndlp will be stored into the ndlp field of 8806 * the IOCB for the completion callback function to the RTV Accept Response 8807 * ELS IOCB command. 8808 * 8809 * Return codes 8810 * 0 - Successfully processed rtv iocb (currently always return 0) 8811 **/ 8812 static int 8813 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8814 struct lpfc_nodelist *ndlp) 8815 { 8816 int rc = 0; 8817 IOCB_t *icmd; 8818 union lpfc_wqe128 *wqe; 8819 struct lpfc_hba *phba = vport->phba; 8820 struct ls_rjt stat; 8821 struct RTV_RSP *rtv_rsp; 8822 uint8_t *pcmd; 8823 struct lpfc_iocbq *elsiocb; 8824 uint32_t cmdsize; 8825 u32 ulp_context; 8826 8827 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8828 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8829 /* reject the unsolicited RTV request and done with it */ 8830 goto reject_out; 8831 8832 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8833 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8834 lpfc_max_els_tries, ndlp, 8835 ndlp->nlp_DID, ELS_CMD_ACC); 8836 8837 if (!elsiocb) 8838 return 1; 8839 8840 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8841 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8842 pcmd += sizeof(uint32_t); /* Skip past command */ 8843 8844 ulp_context = get_job_ulpcontext(phba, elsiocb); 8845 /* use the command's xri in the response */ 8846 if (phba->sli_rev == LPFC_SLI_REV4) { 8847 wqe = &elsiocb->wqe; 8848 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8849 get_job_ulpcontext(phba, cmdiocb)); 8850 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8851 get_job_rcvoxid(phba, cmdiocb)); 8852 } else { 8853 icmd = &elsiocb->iocb; 8854 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8855 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8856 } 8857 8858 rtv_rsp = (struct RTV_RSP *)pcmd; 8859 8860 /* populate RTV payload */ 8861 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8862 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8863 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8864 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8865 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8866 8867 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8868 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8869 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8870 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8871 "Data: x%x x%x x%x\n", 8872 elsiocb->iotag, ulp_context, 8873 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8874 ndlp->nlp_rpi, 8875 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8876 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8877 phba->fc_stat.elsXmitACC++; 8878 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8879 if (!elsiocb->ndlp) { 8880 lpfc_els_free_iocb(phba, elsiocb); 8881 return 0; 8882 } 8883 8884 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8885 if (rc == IOCB_ERROR) { 8886 lpfc_els_free_iocb(phba, elsiocb); 8887 lpfc_nlp_put(ndlp); 8888 } 8889 return 0; 8890 8891 reject_out: 8892 /* issue rejection response */ 8893 stat.un.b.lsRjtRsvd0 = 0; 8894 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8895 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8896 stat.un.b.vendorUnique = 0; 8897 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8898 return 0; 8899 } 8900 8901 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8902 * @vport: pointer to a host virtual N_Port data structure. 8903 * @ndlp: pointer to a node-list data structure. 8904 * @did: DID of the target. 8905 * @rrq: Pointer to the rrq struct. 8906 * 8907 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8908 * successful, the completion handler will clear the RRQ. 8909 * 8910 * Return codes 8911 * 0 - Successfully sent rrq els iocb. 8912 * 1 - Failed to send rrq els iocb. 8913 **/ 8914 static int 8915 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8916 uint32_t did, struct lpfc_node_rrq *rrq) 8917 { 8918 struct lpfc_hba *phba = vport->phba; 8919 struct RRQ *els_rrq; 8920 struct lpfc_iocbq *elsiocb; 8921 uint8_t *pcmd; 8922 uint16_t cmdsize; 8923 int ret; 8924 8925 if (!ndlp) 8926 return 1; 8927 8928 /* If ndlp is not NULL, we will bump the reference count on it */ 8929 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8930 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8931 ELS_CMD_RRQ); 8932 if (!elsiocb) 8933 return 1; 8934 8935 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8936 8937 /* For RRQ request, remainder of payload is Exchange IDs */ 8938 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8939 pcmd += sizeof(uint32_t); 8940 els_rrq = (struct RRQ *) pcmd; 8941 8942 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8943 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8944 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8945 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8946 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8947 8948 8949 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8950 "Issue RRQ: did:x%x", 8951 did, rrq->xritag, rrq->rxid); 8952 elsiocb->context_un.rrq = rrq; 8953 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8954 8955 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8956 if (!elsiocb->ndlp) 8957 goto io_err; 8958 8959 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8960 if (ret == IOCB_ERROR) { 8961 lpfc_nlp_put(ndlp); 8962 goto io_err; 8963 } 8964 return 0; 8965 8966 io_err: 8967 lpfc_els_free_iocb(phba, elsiocb); 8968 return 1; 8969 } 8970 8971 /** 8972 * lpfc_send_rrq - Sends ELS RRQ if needed. 8973 * @phba: pointer to lpfc hba data structure. 8974 * @rrq: pointer to the active rrq. 8975 * 8976 * This routine will call the lpfc_issue_els_rrq if the rrq is 8977 * still active for the xri. If this function returns a failure then 8978 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8979 * 8980 * Returns 0 Success. 8981 * 1 Failure. 8982 **/ 8983 int 8984 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8985 { 8986 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8987 rrq->nlp_DID); 8988 if (!ndlp) 8989 return 1; 8990 8991 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8992 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8993 rrq->nlp_DID, rrq); 8994 else 8995 return 1; 8996 } 8997 8998 /** 8999 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 9000 * @vport: pointer to a host virtual N_Port data structure. 9001 * @cmdsize: size of the ELS command. 9002 * @oldiocb: pointer to the original lpfc command iocb data structure. 9003 * @ndlp: pointer to a node-list data structure. 9004 * 9005 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 9006 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 9007 * 9008 * Note that the ndlp reference count will be incremented by 1 for holding the 9009 * ndlp and the reference to ndlp will be stored into the ndlp field of 9010 * the IOCB for the completion callback function to the RPL Accept Response 9011 * ELS command. 9012 * 9013 * Return code 9014 * 0 - Successfully issued ACC RPL ELS command 9015 * 1 - Failed to issue ACC RPL ELS command 9016 **/ 9017 static int 9018 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 9019 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 9020 { 9021 int rc = 0; 9022 struct lpfc_hba *phba = vport->phba; 9023 IOCB_t *icmd; 9024 union lpfc_wqe128 *wqe; 9025 RPL_RSP rpl_rsp; 9026 struct lpfc_iocbq *elsiocb; 9027 uint8_t *pcmd; 9028 u32 ulp_context; 9029 9030 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 9031 ndlp->nlp_DID, ELS_CMD_ACC); 9032 9033 if (!elsiocb) 9034 return 1; 9035 9036 ulp_context = get_job_ulpcontext(phba, elsiocb); 9037 if (phba->sli_rev == LPFC_SLI_REV4) { 9038 wqe = &elsiocb->wqe; 9039 /* Xri / rx_id */ 9040 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 9041 get_job_ulpcontext(phba, oldiocb)); 9042 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9043 get_job_rcvoxid(phba, oldiocb)); 9044 } else { 9045 icmd = &elsiocb->iocb; 9046 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9047 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9048 } 9049 9050 pcmd = elsiocb->cmd_dmabuf->virt; 9051 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9052 pcmd += sizeof(uint16_t); 9053 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9054 pcmd += sizeof(uint16_t); 9055 9056 /* Setup the RPL ACC payload */ 9057 rpl_rsp.listLen = be32_to_cpu(1); 9058 rpl_rsp.index = 0; 9059 rpl_rsp.port_num_blk.portNum = 0; 9060 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9061 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9062 sizeof(struct lpfc_name)); 9063 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9064 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9065 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9066 "0120 Xmit ELS RPL ACC response tag x%x " 9067 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9068 "rpi x%x\n", 9069 elsiocb->iotag, ulp_context, 9070 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9071 ndlp->nlp_rpi); 9072 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9073 phba->fc_stat.elsXmitACC++; 9074 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9075 if (!elsiocb->ndlp) { 9076 lpfc_els_free_iocb(phba, elsiocb); 9077 return 1; 9078 } 9079 9080 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9081 if (rc == IOCB_ERROR) { 9082 lpfc_els_free_iocb(phba, elsiocb); 9083 lpfc_nlp_put(ndlp); 9084 return 1; 9085 } 9086 9087 return 0; 9088 } 9089 9090 /** 9091 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9092 * @vport: pointer to a host virtual N_Port data structure. 9093 * @cmdiocb: pointer to lpfc command iocb data structure. 9094 * @ndlp: pointer to a node-list data structure. 9095 * 9096 * This routine processes Read Port List (RPL) IOCB received as an ELS 9097 * unsolicited event. It first checks the remote port state. If the remote 9098 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9099 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9100 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9101 * to accept the RPL. 9102 * 9103 * Return code 9104 * 0 - Successfully processed rpl iocb (currently always return 0) 9105 **/ 9106 static int 9107 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9108 struct lpfc_nodelist *ndlp) 9109 { 9110 struct lpfc_dmabuf *pcmd; 9111 uint32_t *lp; 9112 uint32_t maxsize; 9113 uint16_t cmdsize; 9114 RPL *rpl; 9115 struct ls_rjt stat; 9116 9117 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9118 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9119 /* issue rejection response */ 9120 stat.un.b.lsRjtRsvd0 = 0; 9121 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9122 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9123 stat.un.b.vendorUnique = 0; 9124 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9125 NULL); 9126 /* rejected the unsolicited RPL request and done with it */ 9127 return 0; 9128 } 9129 9130 pcmd = cmdiocb->cmd_dmabuf; 9131 lp = (uint32_t *) pcmd->virt; 9132 rpl = (RPL *) (lp + 1); 9133 maxsize = be32_to_cpu(rpl->maxsize); 9134 9135 /* We support only one port */ 9136 if ((rpl->index == 0) && 9137 ((maxsize == 0) || 9138 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9139 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9140 } else { 9141 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9142 } 9143 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9144 9145 return 0; 9146 } 9147 9148 /** 9149 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9150 * @vport: pointer to a virtual N_Port data structure. 9151 * @cmdiocb: pointer to lpfc command iocb data structure. 9152 * @ndlp: pointer to a node-list data structure. 9153 * 9154 * This routine processes Fibre Channel Address Resolution Protocol 9155 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9156 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9157 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9158 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9159 * remote PortName is compared against the FC PortName stored in the @vport 9160 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9161 * compared against the FC NodeName stored in the @vport data structure. 9162 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9163 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9164 * invoked to send out FARP Response to the remote node. Before sending the 9165 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9166 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9167 * routine is invoked to log into the remote port first. 9168 * 9169 * Return code 9170 * 0 - Either the FARP Match Mode not supported or successfully processed 9171 **/ 9172 static int 9173 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9174 struct lpfc_nodelist *ndlp) 9175 { 9176 struct lpfc_dmabuf *pcmd; 9177 uint32_t *lp; 9178 FARP *fp; 9179 uint32_t cnt, did; 9180 9181 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9182 pcmd = cmdiocb->cmd_dmabuf; 9183 lp = (uint32_t *) pcmd->virt; 9184 9185 lp++; 9186 fp = (FARP *) lp; 9187 /* FARP-REQ received from DID <did> */ 9188 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9189 "0601 FARP-REQ received from DID x%x\n", did); 9190 /* We will only support match on WWPN or WWNN */ 9191 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9192 return 0; 9193 } 9194 9195 cnt = 0; 9196 /* If this FARP command is searching for my portname */ 9197 if (fp->Mflags & FARP_MATCH_PORT) { 9198 if (memcmp(&fp->RportName, &vport->fc_portname, 9199 sizeof(struct lpfc_name)) == 0) 9200 cnt = 1; 9201 } 9202 9203 /* If this FARP command is searching for my nodename */ 9204 if (fp->Mflags & FARP_MATCH_NODE) { 9205 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9206 sizeof(struct lpfc_name)) == 0) 9207 cnt = 1; 9208 } 9209 9210 if (cnt) { 9211 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9212 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9213 /* Log back into the node before sending the FARP. */ 9214 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9215 ndlp->nlp_prev_state = ndlp->nlp_state; 9216 lpfc_nlp_set_state(vport, ndlp, 9217 NLP_STE_PLOGI_ISSUE); 9218 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9219 } 9220 9221 /* Send a FARP response to that node */ 9222 if (fp->Rflags & FARP_REQUEST_FARPR) 9223 lpfc_issue_els_farpr(vport, did, 0); 9224 } 9225 } 9226 return 0; 9227 } 9228 9229 /** 9230 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9231 * @vport: pointer to a host virtual N_Port data structure. 9232 * @cmdiocb: pointer to lpfc command iocb data structure. 9233 * @ndlp: pointer to a node-list data structure. 9234 * 9235 * This routine processes Fibre Channel Address Resolution Protocol 9236 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9237 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9238 * the FARP response request. 9239 * 9240 * Return code 9241 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9242 **/ 9243 static int 9244 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9245 struct lpfc_nodelist *ndlp) 9246 { 9247 uint32_t did; 9248 9249 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9250 9251 /* FARP-RSP received from DID <did> */ 9252 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9253 "0600 FARP-RSP received from DID x%x\n", did); 9254 /* ACCEPT the Farp resp request */ 9255 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9256 9257 return 0; 9258 } 9259 9260 /** 9261 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9262 * @vport: pointer to a host virtual N_Port data structure. 9263 * @cmdiocb: pointer to lpfc command iocb data structure. 9264 * @fan_ndlp: pointer to a node-list data structure. 9265 * 9266 * This routine processes a Fabric Address Notification (FAN) IOCB 9267 * command received as an ELS unsolicited event. The FAN ELS command will 9268 * only be processed on a physical port (i.e., the @vport represents the 9269 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9270 * compared against those in the phba data structure. If any of those is 9271 * different, the lpfc_initial_flogi() routine is invoked to initialize 9272 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9273 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9274 * is invoked to register login to the fabric. 9275 * 9276 * Return code 9277 * 0 - Successfully processed fan iocb (currently always return 0). 9278 **/ 9279 static int 9280 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9281 struct lpfc_nodelist *fan_ndlp) 9282 { 9283 struct lpfc_hba *phba = vport->phba; 9284 uint32_t *lp; 9285 FAN *fp; 9286 9287 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9288 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9289 fp = (FAN *) ++lp; 9290 /* FAN received; Fan does not have a reply sequence */ 9291 if ((vport == phba->pport) && 9292 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9293 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9294 sizeof(struct lpfc_name))) || 9295 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9296 sizeof(struct lpfc_name)))) { 9297 /* This port has switched fabrics. FLOGI is required */ 9298 lpfc_issue_init_vfi(vport); 9299 } else { 9300 /* FAN verified - skip FLOGI */ 9301 vport->fc_myDID = vport->fc_prevDID; 9302 if (phba->sli_rev < LPFC_SLI_REV4) 9303 lpfc_issue_fabric_reglogin(vport); 9304 else { 9305 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9306 "3138 Need register VFI: (x%x/%x)\n", 9307 vport->fc_prevDID, vport->fc_myDID); 9308 lpfc_issue_reg_vfi(vport); 9309 } 9310 } 9311 } 9312 return 0; 9313 } 9314 9315 /** 9316 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9317 * @vport: pointer to a host virtual N_Port data structure. 9318 * @cmdiocb: pointer to lpfc command iocb data structure. 9319 * @ndlp: pointer to a node-list data structure. 9320 * 9321 * Return code 9322 * 0 - Successfully processed echo iocb (currently always return 0) 9323 **/ 9324 static int 9325 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9326 struct lpfc_nodelist *ndlp) 9327 { 9328 struct lpfc_hba *phba = vport->phba; 9329 struct fc_els_edc *edc_req; 9330 struct fc_tlv_desc *tlv; 9331 uint8_t *payload; 9332 uint32_t *ptr, dtag; 9333 const char *dtag_nm; 9334 int desc_cnt = 0, bytes_remain; 9335 struct fc_diag_lnkflt_desc *plnkflt; 9336 9337 payload = cmdiocb->cmd_dmabuf->virt; 9338 9339 edc_req = (struct fc_els_edc *)payload; 9340 bytes_remain = be32_to_cpu(edc_req->desc_len); 9341 9342 ptr = (uint32_t *)payload; 9343 lpfc_printf_vlog(vport, KERN_INFO, 9344 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9345 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9346 bytes_remain, be32_to_cpu(*ptr), 9347 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9348 9349 /* No signal support unless there is a congestion descriptor */ 9350 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9351 phba->cgn_sig_freq = 0; 9352 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9353 9354 if (bytes_remain <= 0) 9355 goto out; 9356 9357 tlv = edc_req->desc; 9358 9359 /* 9360 * cycle through EDC diagnostic descriptors to find the 9361 * congestion signaling capability descriptor 9362 */ 9363 while (bytes_remain) { 9364 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9365 lpfc_printf_log(phba, KERN_WARNING, 9366 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9367 "6464 Truncated TLV hdr on " 9368 "Diagnostic descriptor[%d]\n", 9369 desc_cnt); 9370 goto out; 9371 } 9372 9373 dtag = be32_to_cpu(tlv->desc_tag); 9374 switch (dtag) { 9375 case ELS_DTAG_LNK_FAULT_CAP: 9376 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9377 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9378 sizeof(struct fc_diag_lnkflt_desc)) { 9379 lpfc_printf_log(phba, KERN_WARNING, 9380 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9381 "6465 Truncated Link Fault Diagnostic " 9382 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9383 desc_cnt, bytes_remain, 9384 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9385 sizeof(struct fc_diag_lnkflt_desc)); 9386 goto out; 9387 } 9388 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9389 lpfc_printf_log(phba, KERN_INFO, 9390 LOG_ELS | LOG_LDS_EVENT, 9391 "4626 Link Fault Desc Data: x%08x len x%x " 9392 "da x%x dd x%x interval x%x\n", 9393 be32_to_cpu(plnkflt->desc_tag), 9394 be32_to_cpu(plnkflt->desc_len), 9395 be32_to_cpu( 9396 plnkflt->degrade_activate_threshold), 9397 be32_to_cpu( 9398 plnkflt->degrade_deactivate_threshold), 9399 be32_to_cpu(plnkflt->fec_degrade_interval)); 9400 break; 9401 case ELS_DTAG_CG_SIGNAL_CAP: 9402 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9403 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9404 sizeof(struct fc_diag_cg_sig_desc)) { 9405 lpfc_printf_log( 9406 phba, KERN_WARNING, LOG_CGN_MGMT, 9407 "6466 Truncated cgn signal Diagnostic " 9408 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9409 desc_cnt, bytes_remain, 9410 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9411 sizeof(struct fc_diag_cg_sig_desc)); 9412 goto out; 9413 } 9414 9415 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9416 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9417 9418 /* We start negotiation with lpfc_fabric_cgn_frequency. 9419 * When we process the EDC, we will settle on the 9420 * higher frequency. 9421 */ 9422 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9423 9424 lpfc_least_capable_settings( 9425 phba, (struct fc_diag_cg_sig_desc *)tlv); 9426 break; 9427 default: 9428 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9429 lpfc_printf_log(phba, KERN_WARNING, 9430 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9431 "6467 unknown Diagnostic " 9432 "Descriptor[%d]: tag x%x (%s)\n", 9433 desc_cnt, dtag, dtag_nm); 9434 } 9435 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9436 tlv = fc_tlv_next_desc(tlv); 9437 desc_cnt++; 9438 } 9439 out: 9440 /* Need to send back an ACC */ 9441 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9442 9443 lpfc_config_cgn_signal(phba); 9444 return 0; 9445 } 9446 9447 /** 9448 * lpfc_els_timeout - Handler funciton to the els timer 9449 * @t: timer context used to obtain the vport. 9450 * 9451 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9452 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9453 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9454 * up the worker thread. It is for the worker thread to invoke the routine 9455 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9456 **/ 9457 void 9458 lpfc_els_timeout(struct timer_list *t) 9459 { 9460 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9461 struct lpfc_hba *phba = vport->phba; 9462 uint32_t tmo_posted; 9463 unsigned long iflag; 9464 9465 spin_lock_irqsave(&vport->work_port_lock, iflag); 9466 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9467 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9468 vport->work_port_events |= WORKER_ELS_TMO; 9469 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9470 9471 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9472 lpfc_worker_wake_up(phba); 9473 return; 9474 } 9475 9476 9477 /** 9478 * lpfc_els_timeout_handler - Process an els timeout event 9479 * @vport: pointer to a virtual N_Port data structure. 9480 * 9481 * This routine is the actual handler function that processes an ELS timeout 9482 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9483 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9484 * invoking the lpfc_sli_issue_abort_iotag() routine. 9485 **/ 9486 void 9487 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9488 { 9489 struct lpfc_hba *phba = vport->phba; 9490 struct lpfc_sli_ring *pring; 9491 struct lpfc_iocbq *tmp_iocb, *piocb; 9492 IOCB_t *cmd = NULL; 9493 struct lpfc_dmabuf *pcmd; 9494 uint32_t els_command = 0; 9495 uint32_t timeout; 9496 uint32_t remote_ID = 0xffffffff; 9497 LIST_HEAD(abort_list); 9498 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9499 9500 9501 timeout = (uint32_t)(phba->fc_ratov << 1); 9502 9503 pring = lpfc_phba_elsring(phba); 9504 if (unlikely(!pring)) 9505 return; 9506 9507 if (phba->pport->load_flag & FC_UNLOADING) 9508 return; 9509 9510 spin_lock_irq(&phba->hbalock); 9511 if (phba->sli_rev == LPFC_SLI_REV4) 9512 spin_lock(&pring->ring_lock); 9513 9514 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9515 ulp_command = get_job_cmnd(phba, piocb); 9516 ulp_context = get_job_ulpcontext(phba, piocb); 9517 did = get_job_els_rsp64_did(phba, piocb); 9518 9519 if (phba->sli_rev == LPFC_SLI_REV4) { 9520 iotag = get_wqe_reqtag(piocb); 9521 } else { 9522 cmd = &piocb->iocb; 9523 iotag = cmd->ulpIoTag; 9524 } 9525 9526 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9527 ulp_command == CMD_ABORT_XRI_CX || 9528 ulp_command == CMD_ABORT_XRI_CN || 9529 ulp_command == CMD_CLOSE_XRI_CN) 9530 continue; 9531 9532 if (piocb->vport != vport) 9533 continue; 9534 9535 pcmd = piocb->cmd_dmabuf; 9536 if (pcmd) 9537 els_command = *(uint32_t *) (pcmd->virt); 9538 9539 if (els_command == ELS_CMD_FARP || 9540 els_command == ELS_CMD_FARPR || 9541 els_command == ELS_CMD_FDISC) 9542 continue; 9543 9544 if (piocb->drvrTimeout > 0) { 9545 if (piocb->drvrTimeout >= timeout) 9546 piocb->drvrTimeout -= timeout; 9547 else 9548 piocb->drvrTimeout = 0; 9549 continue; 9550 } 9551 9552 remote_ID = 0xffffffff; 9553 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9554 remote_ID = did; 9555 } else { 9556 struct lpfc_nodelist *ndlp; 9557 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9558 if (ndlp) 9559 remote_ID = ndlp->nlp_DID; 9560 } 9561 list_add_tail(&piocb->dlist, &abort_list); 9562 } 9563 if (phba->sli_rev == LPFC_SLI_REV4) 9564 spin_unlock(&pring->ring_lock); 9565 spin_unlock_irq(&phba->hbalock); 9566 9567 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9568 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9569 "0127 ELS timeout Data: x%x x%x x%x " 9570 "x%x\n", els_command, 9571 remote_ID, ulp_command, iotag); 9572 9573 spin_lock_irq(&phba->hbalock); 9574 list_del_init(&piocb->dlist); 9575 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9576 spin_unlock_irq(&phba->hbalock); 9577 } 9578 9579 /* Make sure HBA is alive */ 9580 lpfc_issue_hb_tmo(phba); 9581 9582 if (!list_empty(&pring->txcmplq)) 9583 if (!(phba->pport->load_flag & FC_UNLOADING)) 9584 mod_timer(&vport->els_tmofunc, 9585 jiffies + msecs_to_jiffies(1000 * timeout)); 9586 } 9587 9588 /** 9589 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9590 * @vport: pointer to a host virtual N_Port data structure. 9591 * 9592 * This routine is used to clean up all the outstanding ELS commands on a 9593 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9594 * routine. After that, it walks the ELS transmit queue to remove all the 9595 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9596 * the IOCBs with a non-NULL completion callback function, the callback 9597 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9598 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9599 * callback function, the IOCB will simply be released. Finally, it walks 9600 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9601 * completion queue IOCB that is associated with the @vport and is not 9602 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9603 * part of the discovery state machine) out to HBA by invoking the 9604 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9605 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9606 * the IOCBs are aborted when this function returns. 9607 **/ 9608 void 9609 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9610 { 9611 LIST_HEAD(abort_list); 9612 LIST_HEAD(cancel_list); 9613 struct lpfc_hba *phba = vport->phba; 9614 struct lpfc_sli_ring *pring; 9615 struct lpfc_iocbq *tmp_iocb, *piocb; 9616 u32 ulp_command; 9617 unsigned long iflags = 0; 9618 bool mbx_tmo_err; 9619 9620 lpfc_fabric_abort_vport(vport); 9621 9622 /* 9623 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9624 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9625 * ultimately grabs the ring_lock, the driver must splice the list into 9626 * a working list and release the locks before calling the abort. 9627 */ 9628 spin_lock_irqsave(&phba->hbalock, iflags); 9629 pring = lpfc_phba_elsring(phba); 9630 9631 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9632 if (unlikely(!pring)) { 9633 spin_unlock_irqrestore(&phba->hbalock, iflags); 9634 return; 9635 } 9636 9637 if (phba->sli_rev == LPFC_SLI_REV4) 9638 spin_lock(&pring->ring_lock); 9639 9640 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); 9641 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9642 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9643 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) 9644 continue; 9645 9646 if (piocb->vport != vport) 9647 continue; 9648 9649 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) 9650 continue; 9651 9652 /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs, 9653 * or GEN_REQUESTs waiting for a CQE response. 9654 */ 9655 ulp_command = get_job_cmnd(phba, piocb); 9656 if (ulp_command == CMD_ELS_REQUEST64_WQE || 9657 ulp_command == CMD_XMIT_ELS_RSP64_WQE) { 9658 list_add_tail(&piocb->dlist, &abort_list); 9659 9660 /* If the link is down when flushing ELS commands 9661 * the firmware will not complete them till after 9662 * the link comes back up. This may confuse 9663 * discovery for the new link up, so we need to 9664 * change the compl routine to just clean up the iocb 9665 * and avoid any retry logic. 9666 */ 9667 if (phba->link_state == LPFC_LINK_DOWN) 9668 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9669 } else if (ulp_command == CMD_GEN_REQUEST64_CR || 9670 mbx_tmo_err) 9671 list_add_tail(&piocb->dlist, &abort_list); 9672 } 9673 9674 if (phba->sli_rev == LPFC_SLI_REV4) 9675 spin_unlock(&pring->ring_lock); 9676 spin_unlock_irqrestore(&phba->hbalock, iflags); 9677 9678 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9679 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9680 spin_lock_irqsave(&phba->hbalock, iflags); 9681 list_del_init(&piocb->dlist); 9682 if (mbx_tmo_err) 9683 list_move_tail(&piocb->list, &cancel_list); 9684 else 9685 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9686 9687 spin_unlock_irqrestore(&phba->hbalock, iflags); 9688 } 9689 if (!list_empty(&cancel_list)) 9690 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 9691 IOERR_SLI_ABORTED); 9692 else 9693 /* Make sure HBA is alive */ 9694 lpfc_issue_hb_tmo(phba); 9695 9696 if (!list_empty(&abort_list)) 9697 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9698 "3387 abort list for txq not empty\n"); 9699 INIT_LIST_HEAD(&abort_list); 9700 9701 spin_lock_irqsave(&phba->hbalock, iflags); 9702 if (phba->sli_rev == LPFC_SLI_REV4) 9703 spin_lock(&pring->ring_lock); 9704 9705 /* No need to abort the txq list, 9706 * just queue them up for lpfc_sli_cancel_iocbs 9707 */ 9708 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9709 ulp_command = get_job_cmnd(phba, piocb); 9710 9711 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9712 continue; 9713 9714 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9715 if (ulp_command == CMD_QUE_RING_BUF_CN || 9716 ulp_command == CMD_QUE_RING_BUF64_CN || 9717 ulp_command == CMD_CLOSE_XRI_CN || 9718 ulp_command == CMD_ABORT_XRI_CN || 9719 ulp_command == CMD_ABORT_XRI_CX) 9720 continue; 9721 9722 if (piocb->vport != vport) 9723 continue; 9724 9725 list_del_init(&piocb->list); 9726 list_add_tail(&piocb->list, &abort_list); 9727 } 9728 9729 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9730 if (vport == phba->pport) { 9731 list_for_each_entry_safe(piocb, tmp_iocb, 9732 &phba->fabric_iocb_list, list) { 9733 list_del_init(&piocb->list); 9734 list_add_tail(&piocb->list, &abort_list); 9735 } 9736 } 9737 9738 if (phba->sli_rev == LPFC_SLI_REV4) 9739 spin_unlock(&pring->ring_lock); 9740 spin_unlock_irqrestore(&phba->hbalock, iflags); 9741 9742 /* Cancel all the IOCBs from the completions list */ 9743 lpfc_sli_cancel_iocbs(phba, &abort_list, 9744 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9745 9746 return; 9747 } 9748 9749 /** 9750 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9751 * @phba: pointer to lpfc hba data structure. 9752 * 9753 * This routine is used to clean up all the outstanding ELS commands on a 9754 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9755 * routine. After that, it walks the ELS transmit queue to remove all the 9756 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9757 * the IOCBs with the completion callback function associated, the callback 9758 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9759 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9760 * callback function associated, the IOCB will simply be released. Finally, 9761 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9762 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9763 * management plane IOCBs that are not part of the discovery state machine) 9764 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9765 **/ 9766 void 9767 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9768 { 9769 struct lpfc_vport *vport; 9770 9771 spin_lock_irq(&phba->port_list_lock); 9772 list_for_each_entry(vport, &phba->port_list, listentry) 9773 lpfc_els_flush_cmd(vport); 9774 spin_unlock_irq(&phba->port_list_lock); 9775 9776 return; 9777 } 9778 9779 /** 9780 * lpfc_send_els_failure_event - Posts an ELS command failure event 9781 * @phba: Pointer to hba context object. 9782 * @cmdiocbp: Pointer to command iocb which reported error. 9783 * @rspiocbp: Pointer to response iocb which reported error. 9784 * 9785 * This function sends an event when there is an ELS command 9786 * failure. 9787 **/ 9788 void 9789 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9790 struct lpfc_iocbq *cmdiocbp, 9791 struct lpfc_iocbq *rspiocbp) 9792 { 9793 struct lpfc_vport *vport = cmdiocbp->vport; 9794 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9795 struct lpfc_lsrjt_event lsrjt_event; 9796 struct lpfc_fabric_event_header fabric_event; 9797 struct ls_rjt stat; 9798 struct lpfc_nodelist *ndlp; 9799 uint32_t *pcmd; 9800 u32 ulp_status, ulp_word4; 9801 9802 ndlp = cmdiocbp->ndlp; 9803 if (!ndlp) 9804 return; 9805 9806 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9807 ulp_word4 = get_job_word4(phba, rspiocbp); 9808 9809 if (ulp_status == IOSTAT_LS_RJT) { 9810 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9811 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9812 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9813 sizeof(struct lpfc_name)); 9814 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9815 sizeof(struct lpfc_name)); 9816 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9817 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9818 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9819 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9820 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9821 fc_host_post_vendor_event(shost, 9822 fc_get_event_number(), 9823 sizeof(lsrjt_event), 9824 (char *)&lsrjt_event, 9825 LPFC_NL_VENDOR_ID); 9826 return; 9827 } 9828 if (ulp_status == IOSTAT_NPORT_BSY || 9829 ulp_status == IOSTAT_FABRIC_BSY) { 9830 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9831 if (ulp_status == IOSTAT_NPORT_BSY) 9832 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9833 else 9834 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9835 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9836 sizeof(struct lpfc_name)); 9837 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9838 sizeof(struct lpfc_name)); 9839 fc_host_post_vendor_event(shost, 9840 fc_get_event_number(), 9841 sizeof(fabric_event), 9842 (char *)&fabric_event, 9843 LPFC_NL_VENDOR_ID); 9844 return; 9845 } 9846 9847 } 9848 9849 /** 9850 * lpfc_send_els_event - Posts unsolicited els event 9851 * @vport: Pointer to vport object. 9852 * @ndlp: Pointer FC node object. 9853 * @payload: ELS command code type. 9854 * 9855 * This function posts an event when there is an incoming 9856 * unsolicited ELS command. 9857 **/ 9858 static void 9859 lpfc_send_els_event(struct lpfc_vport *vport, 9860 struct lpfc_nodelist *ndlp, 9861 uint32_t *payload) 9862 { 9863 struct lpfc_els_event_header *els_data = NULL; 9864 struct lpfc_logo_event *logo_data = NULL; 9865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9866 9867 if (*payload == ELS_CMD_LOGO) { 9868 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9869 if (!logo_data) { 9870 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9871 "0148 Failed to allocate memory " 9872 "for LOGO event\n"); 9873 return; 9874 } 9875 els_data = &logo_data->header; 9876 } else { 9877 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9878 GFP_KERNEL); 9879 if (!els_data) { 9880 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9881 "0149 Failed to allocate memory " 9882 "for ELS event\n"); 9883 return; 9884 } 9885 } 9886 els_data->event_type = FC_REG_ELS_EVENT; 9887 switch (*payload) { 9888 case ELS_CMD_PLOGI: 9889 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9890 break; 9891 case ELS_CMD_PRLO: 9892 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9893 break; 9894 case ELS_CMD_ADISC: 9895 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9896 break; 9897 case ELS_CMD_LOGO: 9898 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9899 /* Copy the WWPN in the LOGO payload */ 9900 memcpy(logo_data->logo_wwpn, &payload[2], 9901 sizeof(struct lpfc_name)); 9902 break; 9903 default: 9904 kfree(els_data); 9905 return; 9906 } 9907 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9908 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9909 if (*payload == ELS_CMD_LOGO) { 9910 fc_host_post_vendor_event(shost, 9911 fc_get_event_number(), 9912 sizeof(struct lpfc_logo_event), 9913 (char *)logo_data, 9914 LPFC_NL_VENDOR_ID); 9915 kfree(logo_data); 9916 } else { 9917 fc_host_post_vendor_event(shost, 9918 fc_get_event_number(), 9919 sizeof(struct lpfc_els_event_header), 9920 (char *)els_data, 9921 LPFC_NL_VENDOR_ID); 9922 kfree(els_data); 9923 } 9924 9925 return; 9926 } 9927 9928 9929 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9930 FC_FPIN_LI_EVT_TYPES_INIT); 9931 9932 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9933 FC_FPIN_DELI_EVT_TYPES_INIT); 9934 9935 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9936 FC_FPIN_CONGN_EVT_TYPES_INIT); 9937 9938 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9939 fc_fpin_congn_severity_types, 9940 FC_FPIN_CONGN_SEVERITY_INIT); 9941 9942 9943 /** 9944 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9945 * @phba: Pointer to phba object. 9946 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9947 * @cnt: count of WWPNs in FPIN payload 9948 * 9949 * This routine is called by LI and PC descriptors. 9950 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9951 */ 9952 static void 9953 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9954 { 9955 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9956 __be64 wwn; 9957 u64 wwpn; 9958 int i, len; 9959 int line = 0; 9960 int wcnt = 0; 9961 bool endit = false; 9962 9963 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9964 for (i = 0; i < cnt; i++) { 9965 /* Are we on the last WWPN */ 9966 if (i == (cnt - 1)) 9967 endit = true; 9968 9969 /* Extract the next WWPN from the payload */ 9970 wwn = *wwnlist++; 9971 wwpn = be64_to_cpu(wwn); 9972 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9973 " %016llx", wwpn); 9974 9975 /* Log a message if we are on the last WWPN 9976 * or if we hit the max allowed per message. 9977 */ 9978 wcnt++; 9979 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9980 buf[len] = 0; 9981 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9982 "4686 %s\n", buf); 9983 9984 /* Check if we reached the last WWPN */ 9985 if (endit) 9986 return; 9987 9988 /* Limit the number of log message displayed per FPIN */ 9989 line++; 9990 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9991 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9992 "4687 %d WWPNs Truncated\n", 9993 cnt - i - 1); 9994 return; 9995 } 9996 9997 /* Start over with next log message */ 9998 wcnt = 0; 9999 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 10000 "Additional WWPNs:"); 10001 } 10002 } 10003 } 10004 10005 /** 10006 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 10007 * @phba: Pointer to phba object. 10008 * @tlv: Pointer to the Link Integrity Notification Descriptor. 10009 * 10010 * This function processes a Link Integrity FPIN event by logging a message. 10011 **/ 10012 static void 10013 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10014 { 10015 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 10016 const char *li_evt_str; 10017 u32 li_evt, cnt; 10018 10019 li_evt = be16_to_cpu(li->event_type); 10020 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 10021 cnt = be32_to_cpu(li->pname_count); 10022 10023 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10024 "4680 FPIN Link Integrity %s (x%x) " 10025 "Detecting PN x%016llx Attached PN x%016llx " 10026 "Duration %d mSecs Count %d Port Cnt %d\n", 10027 li_evt_str, li_evt, 10028 be64_to_cpu(li->detecting_wwpn), 10029 be64_to_cpu(li->attached_wwpn), 10030 be32_to_cpu(li->event_threshold), 10031 be32_to_cpu(li->event_count), cnt); 10032 10033 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 10034 } 10035 10036 /** 10037 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 10038 * @phba: Pointer to hba object. 10039 * @tlv: Pointer to the Delivery Notification Descriptor TLV 10040 * 10041 * This function processes a Delivery FPIN event by logging a message. 10042 **/ 10043 static void 10044 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10045 { 10046 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 10047 const char *del_rsn_str; 10048 u32 del_rsn; 10049 __be32 *frame; 10050 10051 del_rsn = be16_to_cpu(del->deli_reason_code); 10052 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 10053 10054 /* Skip over desc_tag/desc_len header to payload */ 10055 frame = (__be32 *)(del + 1); 10056 10057 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10058 "4681 FPIN Delivery %s (x%x) " 10059 "Detecting PN x%016llx Attached PN x%016llx " 10060 "DiscHdr0 x%08x " 10061 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10062 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10063 del_rsn_str, del_rsn, 10064 be64_to_cpu(del->detecting_wwpn), 10065 be64_to_cpu(del->attached_wwpn), 10066 be32_to_cpu(frame[0]), 10067 be32_to_cpu(frame[1]), 10068 be32_to_cpu(frame[2]), 10069 be32_to_cpu(frame[3]), 10070 be32_to_cpu(frame[4]), 10071 be32_to_cpu(frame[5])); 10072 } 10073 10074 /** 10075 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10076 * @phba: Pointer to hba object. 10077 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10078 * 10079 * This function processes a Peer Congestion FPIN event by logging a message. 10080 **/ 10081 static void 10082 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10083 { 10084 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10085 const char *pc_evt_str; 10086 u32 pc_evt, cnt; 10087 10088 pc_evt = be16_to_cpu(pc->event_type); 10089 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10090 cnt = be32_to_cpu(pc->pname_count); 10091 10092 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10093 "4684 FPIN Peer Congestion %s (x%x) " 10094 "Duration %d mSecs " 10095 "Detecting PN x%016llx Attached PN x%016llx " 10096 "Impacted Port Cnt %d\n", 10097 pc_evt_str, pc_evt, 10098 be32_to_cpu(pc->event_period), 10099 be64_to_cpu(pc->detecting_wwpn), 10100 be64_to_cpu(pc->attached_wwpn), 10101 cnt); 10102 10103 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10104 } 10105 10106 /** 10107 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10108 * @phba: Pointer to hba object. 10109 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10110 * 10111 * This function processes an FPIN Congestion Notifiction. The notification 10112 * could be an Alarm or Warning. This routine feeds that data into driver's 10113 * running congestion algorithm. It also processes the FPIN by 10114 * logging a message. It returns 1 to indicate deliver this message 10115 * to the upper layer or 0 to indicate don't deliver it. 10116 **/ 10117 static int 10118 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10119 { 10120 struct lpfc_cgn_info *cp; 10121 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10122 const char *cgn_evt_str; 10123 u32 cgn_evt; 10124 const char *cgn_sev_str; 10125 u32 cgn_sev; 10126 uint16_t value; 10127 u32 crc; 10128 bool nm_log = false; 10129 int rc = 1; 10130 10131 cgn_evt = be16_to_cpu(cgn->event_type); 10132 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10133 cgn_sev = cgn->severity; 10134 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10135 10136 /* The driver only takes action on a Credit Stall or Oversubscription 10137 * event type to engage the IO algorithm. The driver prints an 10138 * unmaskable message only for Lost Credit and Credit Stall. 10139 * TODO: Still need to have definition of host action on clear, 10140 * lost credit and device specific event types. 10141 */ 10142 switch (cgn_evt) { 10143 case FPIN_CONGN_LOST_CREDIT: 10144 nm_log = true; 10145 break; 10146 case FPIN_CONGN_CREDIT_STALL: 10147 nm_log = true; 10148 fallthrough; 10149 case FPIN_CONGN_OVERSUBSCRIPTION: 10150 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10151 nm_log = false; 10152 switch (cgn_sev) { 10153 case FPIN_CONGN_SEVERITY_ERROR: 10154 /* Take action here for an Alarm event */ 10155 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10156 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10157 /* Track of alarm cnt for SYNC_WQE */ 10158 atomic_inc(&phba->cgn_sync_alarm_cnt); 10159 } 10160 /* Track alarm cnt for cgn_info regardless 10161 * of whether CMF is configured for Signals 10162 * or FPINs. 10163 */ 10164 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10165 goto cleanup; 10166 } 10167 break; 10168 case FPIN_CONGN_SEVERITY_WARNING: 10169 /* Take action here for a Warning event */ 10170 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10171 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10172 /* Track of warning cnt for SYNC_WQE */ 10173 atomic_inc(&phba->cgn_sync_warn_cnt); 10174 } 10175 /* Track warning cnt and freq for cgn_info 10176 * regardless of whether CMF is configured for 10177 * Signals or FPINs. 10178 */ 10179 atomic_inc(&phba->cgn_fabric_warn_cnt); 10180 cleanup: 10181 /* Save frequency in ms */ 10182 phba->cgn_fpin_frequency = 10183 be32_to_cpu(cgn->event_period); 10184 value = phba->cgn_fpin_frequency; 10185 if (phba->cgn_i) { 10186 cp = (struct lpfc_cgn_info *) 10187 phba->cgn_i->virt; 10188 cp->cgn_alarm_freq = 10189 cpu_to_le16(value); 10190 cp->cgn_warn_freq = 10191 cpu_to_le16(value); 10192 crc = lpfc_cgn_calc_crc32 10193 (cp, 10194 LPFC_CGN_INFO_SZ, 10195 LPFC_CGN_CRC32_SEED); 10196 cp->cgn_info_crc = cpu_to_le32(crc); 10197 } 10198 10199 /* Don't deliver to upper layer since 10200 * driver took action on this tlv. 10201 */ 10202 rc = 0; 10203 } 10204 break; 10205 } 10206 break; 10207 } 10208 10209 /* Change the log level to unmaskable for the following event types. */ 10210 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10211 LOG_CGN_MGMT | LOG_ELS, 10212 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10213 "Duration %d mSecs\n", 10214 cgn_sev_str, cgn_evt_str, cgn_evt, 10215 be32_to_cpu(cgn->event_period)); 10216 return rc; 10217 } 10218 10219 void 10220 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10221 { 10222 struct lpfc_hba *phba = vport->phba; 10223 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10224 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10225 const char *dtag_nm; 10226 int desc_cnt = 0, bytes_remain, cnt; 10227 u32 dtag, deliver = 0; 10228 int len; 10229 10230 /* FPINs handled only if we are in the right discovery state */ 10231 if (vport->port_state < LPFC_DISC_AUTH) 10232 return; 10233 10234 /* make sure there is the full fpin header */ 10235 if (fpin_length < sizeof(struct fc_els_fpin)) 10236 return; 10237 10238 /* Sanity check descriptor length. The desc_len value does not 10239 * include space for the ELS command and the desc_len fields. 10240 */ 10241 len = be32_to_cpu(fpin->desc_len); 10242 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10243 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10244 "4671 Bad ELS FPIN length %d: %d\n", 10245 len, fpin_length); 10246 return; 10247 } 10248 10249 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10250 first_tlv = tlv; 10251 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10252 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10253 10254 /* process each descriptor separately */ 10255 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10256 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10257 dtag = be32_to_cpu(tlv->desc_tag); 10258 switch (dtag) { 10259 case ELS_DTAG_LNK_INTEGRITY: 10260 lpfc_els_rcv_fpin_li(phba, tlv); 10261 deliver = 1; 10262 break; 10263 case ELS_DTAG_DELIVERY: 10264 lpfc_els_rcv_fpin_del(phba, tlv); 10265 deliver = 1; 10266 break; 10267 case ELS_DTAG_PEER_CONGEST: 10268 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10269 deliver = 1; 10270 break; 10271 case ELS_DTAG_CONGESTION: 10272 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10273 break; 10274 default: 10275 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10276 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10277 "4678 unknown FPIN descriptor[%d]: " 10278 "tag x%x (%s)\n", 10279 desc_cnt, dtag, dtag_nm); 10280 10281 /* If descriptor is bad, drop the rest of the data */ 10282 return; 10283 } 10284 lpfc_cgn_update_stat(phba, dtag); 10285 cnt = be32_to_cpu(tlv->desc_len); 10286 10287 /* Sanity check descriptor length. The desc_len value does not 10288 * include space for the desc_tag and the desc_len fields. 10289 */ 10290 len -= (cnt + sizeof(struct fc_tlv_desc)); 10291 if (len < 0) { 10292 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10293 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10294 "4672 Bad FPIN descriptor TLV length " 10295 "%d: %d %d %s\n", 10296 cnt, len, fpin_length, dtag_nm); 10297 return; 10298 } 10299 10300 current_tlv = tlv; 10301 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10302 tlv = fc_tlv_next_desc(tlv); 10303 10304 /* Format payload such that the FPIN delivered to the 10305 * upper layer is a single descriptor FPIN. 10306 */ 10307 if (desc_cnt) 10308 memcpy(first_tlv, current_tlv, 10309 (cnt + sizeof(struct fc_els_fpin))); 10310 10311 /* Adjust the length so that it only reflects a 10312 * single descriptor FPIN. 10313 */ 10314 fpin_length = cnt + sizeof(struct fc_els_fpin); 10315 fpin->desc_len = cpu_to_be32(fpin_length); 10316 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10317 10318 /* Send every descriptor individually to the upper layer */ 10319 if (deliver) 10320 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10321 fpin_length, (char *)fpin, 0); 10322 desc_cnt++; 10323 } 10324 } 10325 10326 /** 10327 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10328 * @phba: pointer to lpfc hba data structure. 10329 * @pring: pointer to a SLI ring. 10330 * @vport: pointer to a host virtual N_Port data structure. 10331 * @elsiocb: pointer to lpfc els command iocb data structure. 10332 * 10333 * This routine is used for processing the IOCB associated with a unsolicited 10334 * event. It first determines whether there is an existing ndlp that matches 10335 * the DID from the unsolicited IOCB. If not, it will create a new one with 10336 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10337 * IOCB is then used to invoke the proper routine and to set up proper state 10338 * of the discovery state machine. 10339 **/ 10340 static void 10341 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10342 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10343 { 10344 struct lpfc_nodelist *ndlp; 10345 struct ls_rjt stat; 10346 u32 *payload, payload_len; 10347 u32 cmd = 0, did = 0, newnode, status = 0; 10348 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10349 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10350 LPFC_MBOXQ_t *mbox; 10351 10352 if (!vport || !elsiocb->cmd_dmabuf) 10353 goto dropit; 10354 10355 newnode = 0; 10356 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10357 payload = elsiocb->cmd_dmabuf->virt; 10358 if (phba->sli_rev == LPFC_SLI_REV4) 10359 payload_len = wcqe_cmpl->total_data_placed; 10360 else 10361 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10362 status = get_job_ulpstatus(phba, elsiocb); 10363 cmd = *payload; 10364 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10365 lpfc_sli3_post_buffer(phba, pring, 1); 10366 10367 did = get_job_els_rsp64_did(phba, elsiocb); 10368 if (status) { 10369 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10370 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10371 status, get_job_word4(phba, elsiocb), did); 10372 goto dropit; 10373 } 10374 10375 /* Check to see if link went down during discovery */ 10376 if (lpfc_els_chk_latt(vport)) 10377 goto dropit; 10378 10379 /* Ignore traffic received during vport shutdown. */ 10380 if (vport->load_flag & FC_UNLOADING) 10381 goto dropit; 10382 10383 /* If NPort discovery is delayed drop incoming ELS */ 10384 if ((vport->fc_flag & FC_DISC_DELAYED) && 10385 (cmd != ELS_CMD_PLOGI)) 10386 goto dropit; 10387 10388 ndlp = lpfc_findnode_did(vport, did); 10389 if (!ndlp) { 10390 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10391 ndlp = lpfc_nlp_init(vport, did); 10392 if (!ndlp) 10393 goto dropit; 10394 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10395 newnode = 1; 10396 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10397 ndlp->nlp_type |= NLP_FABRIC; 10398 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10400 newnode = 1; 10401 } 10402 10403 phba->fc_stat.elsRcvFrame++; 10404 10405 /* 10406 * Do not process any unsolicited ELS commands 10407 * if the ndlp is in DEV_LOSS 10408 */ 10409 spin_lock_irq(&ndlp->lock); 10410 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10411 spin_unlock_irq(&ndlp->lock); 10412 if (newnode) 10413 lpfc_nlp_put(ndlp); 10414 goto dropit; 10415 } 10416 spin_unlock_irq(&ndlp->lock); 10417 10418 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10419 if (!elsiocb->ndlp) 10420 goto dropit; 10421 elsiocb->vport = vport; 10422 10423 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10424 cmd &= ELS_CMD_MASK; 10425 } 10426 /* ELS command <elsCmd> received from NPORT <did> */ 10427 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10428 "0112 ELS command x%x received from NPORT x%x " 10429 "refcnt %d Data: x%x x%x x%x x%x\n", 10430 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10431 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10432 10433 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10434 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10435 (cmd != ELS_CMD_FLOGI) && 10436 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10437 rjt_err = LSRJT_LOGICAL_BSY; 10438 rjt_exp = LSEXP_NOTHING_MORE; 10439 goto lsrjt; 10440 } 10441 10442 switch (cmd) { 10443 case ELS_CMD_PLOGI: 10444 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10445 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10446 did, vport->port_state, ndlp->nlp_flag); 10447 10448 phba->fc_stat.elsRcvPLOGI++; 10449 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10450 if (phba->sli_rev == LPFC_SLI_REV4 && 10451 (phba->pport->fc_flag & FC_PT2PT)) { 10452 vport->fc_prevDID = vport->fc_myDID; 10453 /* Our DID needs to be updated before registering 10454 * the vfi. This is done in lpfc_rcv_plogi but 10455 * that is called after the reg_vfi. 10456 */ 10457 vport->fc_myDID = 10458 bf_get(els_rsp64_sid, 10459 &elsiocb->wqe.xmit_els_rsp); 10460 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10461 "3312 Remote port assigned DID x%x " 10462 "%x\n", vport->fc_myDID, 10463 vport->fc_prevDID); 10464 } 10465 10466 lpfc_send_els_event(vport, ndlp, payload); 10467 10468 /* If Nport discovery is delayed, reject PLOGIs */ 10469 if (vport->fc_flag & FC_DISC_DELAYED) { 10470 rjt_err = LSRJT_UNABLE_TPC; 10471 rjt_exp = LSEXP_NOTHING_MORE; 10472 break; 10473 } 10474 10475 if (vport->port_state < LPFC_DISC_AUTH) { 10476 if (!(phba->pport->fc_flag & FC_PT2PT) || 10477 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10478 rjt_err = LSRJT_UNABLE_TPC; 10479 rjt_exp = LSEXP_NOTHING_MORE; 10480 break; 10481 } 10482 } 10483 10484 spin_lock_irq(&ndlp->lock); 10485 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10486 spin_unlock_irq(&ndlp->lock); 10487 10488 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10489 NLP_EVT_RCV_PLOGI); 10490 10491 break; 10492 case ELS_CMD_FLOGI: 10493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10494 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10495 did, vport->port_state, ndlp->nlp_flag); 10496 10497 phba->fc_stat.elsRcvFLOGI++; 10498 10499 /* If the driver believes fabric discovery is done and is ready, 10500 * bounce the link. There is some descrepancy. 10501 */ 10502 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10503 vport->fc_flag & FC_PT2PT && 10504 vport->rcv_flogi_cnt >= 1) { 10505 rjt_err = LSRJT_LOGICAL_BSY; 10506 rjt_exp = LSEXP_NOTHING_MORE; 10507 init_link++; 10508 goto lsrjt; 10509 } 10510 10511 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10512 /* retain node if our response is deferred */ 10513 if (phba->defer_flogi_acc_flag) 10514 break; 10515 if (newnode) 10516 lpfc_disc_state_machine(vport, ndlp, NULL, 10517 NLP_EVT_DEVICE_RM); 10518 break; 10519 case ELS_CMD_LOGO: 10520 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10521 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10522 did, vport->port_state, ndlp->nlp_flag); 10523 10524 phba->fc_stat.elsRcvLOGO++; 10525 lpfc_send_els_event(vport, ndlp, payload); 10526 if (vport->port_state < LPFC_DISC_AUTH) { 10527 rjt_err = LSRJT_UNABLE_TPC; 10528 rjt_exp = LSEXP_NOTHING_MORE; 10529 break; 10530 } 10531 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10532 if (newnode) 10533 lpfc_disc_state_machine(vport, ndlp, NULL, 10534 NLP_EVT_DEVICE_RM); 10535 break; 10536 case ELS_CMD_PRLO: 10537 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10538 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10539 did, vport->port_state, ndlp->nlp_flag); 10540 10541 phba->fc_stat.elsRcvPRLO++; 10542 lpfc_send_els_event(vport, ndlp, payload); 10543 if (vport->port_state < LPFC_DISC_AUTH) { 10544 rjt_err = LSRJT_UNABLE_TPC; 10545 rjt_exp = LSEXP_NOTHING_MORE; 10546 break; 10547 } 10548 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10549 break; 10550 case ELS_CMD_LCB: 10551 phba->fc_stat.elsRcvLCB++; 10552 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10553 break; 10554 case ELS_CMD_RDP: 10555 phba->fc_stat.elsRcvRDP++; 10556 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10557 break; 10558 case ELS_CMD_RSCN: 10559 phba->fc_stat.elsRcvRSCN++; 10560 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10561 if (newnode) 10562 lpfc_disc_state_machine(vport, ndlp, NULL, 10563 NLP_EVT_DEVICE_RM); 10564 break; 10565 case ELS_CMD_ADISC: 10566 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10567 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10568 did, vport->port_state, ndlp->nlp_flag); 10569 10570 lpfc_send_els_event(vport, ndlp, payload); 10571 phba->fc_stat.elsRcvADISC++; 10572 if (vport->port_state < LPFC_DISC_AUTH) { 10573 rjt_err = LSRJT_UNABLE_TPC; 10574 rjt_exp = LSEXP_NOTHING_MORE; 10575 break; 10576 } 10577 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10578 NLP_EVT_RCV_ADISC); 10579 break; 10580 case ELS_CMD_PDISC: 10581 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10582 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10583 did, vport->port_state, ndlp->nlp_flag); 10584 10585 phba->fc_stat.elsRcvPDISC++; 10586 if (vport->port_state < LPFC_DISC_AUTH) { 10587 rjt_err = LSRJT_UNABLE_TPC; 10588 rjt_exp = LSEXP_NOTHING_MORE; 10589 break; 10590 } 10591 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10592 NLP_EVT_RCV_PDISC); 10593 break; 10594 case ELS_CMD_FARPR: 10595 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10596 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10597 did, vport->port_state, ndlp->nlp_flag); 10598 10599 phba->fc_stat.elsRcvFARPR++; 10600 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10601 break; 10602 case ELS_CMD_FARP: 10603 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10604 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10605 did, vport->port_state, ndlp->nlp_flag); 10606 10607 phba->fc_stat.elsRcvFARP++; 10608 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10609 break; 10610 case ELS_CMD_FAN: 10611 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10612 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10613 did, vport->port_state, ndlp->nlp_flag); 10614 10615 phba->fc_stat.elsRcvFAN++; 10616 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10617 break; 10618 case ELS_CMD_PRLI: 10619 case ELS_CMD_NVMEPRLI: 10620 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10621 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10622 did, vport->port_state, ndlp->nlp_flag); 10623 10624 phba->fc_stat.elsRcvPRLI++; 10625 if ((vport->port_state < LPFC_DISC_AUTH) && 10626 (vport->fc_flag & FC_FABRIC)) { 10627 rjt_err = LSRJT_UNABLE_TPC; 10628 rjt_exp = LSEXP_NOTHING_MORE; 10629 break; 10630 } 10631 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10632 break; 10633 case ELS_CMD_LIRR: 10634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10635 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10636 did, vport->port_state, ndlp->nlp_flag); 10637 10638 phba->fc_stat.elsRcvLIRR++; 10639 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10640 if (newnode) 10641 lpfc_disc_state_machine(vport, ndlp, NULL, 10642 NLP_EVT_DEVICE_RM); 10643 break; 10644 case ELS_CMD_RLS: 10645 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10646 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10647 did, vport->port_state, ndlp->nlp_flag); 10648 10649 phba->fc_stat.elsRcvRLS++; 10650 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10651 if (newnode) 10652 lpfc_disc_state_machine(vport, ndlp, NULL, 10653 NLP_EVT_DEVICE_RM); 10654 break; 10655 case ELS_CMD_RPL: 10656 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10657 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10658 did, vport->port_state, ndlp->nlp_flag); 10659 10660 phba->fc_stat.elsRcvRPL++; 10661 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10662 if (newnode) 10663 lpfc_disc_state_machine(vport, ndlp, NULL, 10664 NLP_EVT_DEVICE_RM); 10665 break; 10666 case ELS_CMD_RNID: 10667 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10668 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10669 did, vport->port_state, ndlp->nlp_flag); 10670 10671 phba->fc_stat.elsRcvRNID++; 10672 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10673 if (newnode) 10674 lpfc_disc_state_machine(vport, ndlp, NULL, 10675 NLP_EVT_DEVICE_RM); 10676 break; 10677 case ELS_CMD_RTV: 10678 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10679 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10680 did, vport->port_state, ndlp->nlp_flag); 10681 phba->fc_stat.elsRcvRTV++; 10682 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10683 if (newnode) 10684 lpfc_disc_state_machine(vport, ndlp, NULL, 10685 NLP_EVT_DEVICE_RM); 10686 break; 10687 case ELS_CMD_RRQ: 10688 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10689 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10690 did, vport->port_state, ndlp->nlp_flag); 10691 10692 phba->fc_stat.elsRcvRRQ++; 10693 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10694 if (newnode) 10695 lpfc_disc_state_machine(vport, ndlp, NULL, 10696 NLP_EVT_DEVICE_RM); 10697 break; 10698 case ELS_CMD_ECHO: 10699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10700 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10701 did, vport->port_state, ndlp->nlp_flag); 10702 10703 phba->fc_stat.elsRcvECHO++; 10704 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10705 if (newnode) 10706 lpfc_disc_state_machine(vport, ndlp, NULL, 10707 NLP_EVT_DEVICE_RM); 10708 break; 10709 case ELS_CMD_REC: 10710 /* receive this due to exchange closed */ 10711 rjt_err = LSRJT_UNABLE_TPC; 10712 rjt_exp = LSEXP_INVALID_OX_RX; 10713 break; 10714 case ELS_CMD_FPIN: 10715 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10716 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10717 did, vport->port_state, ndlp->nlp_flag); 10718 10719 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10720 payload_len); 10721 10722 /* There are no replies, so no rjt codes */ 10723 break; 10724 case ELS_CMD_EDC: 10725 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10726 break; 10727 case ELS_CMD_RDF: 10728 phba->fc_stat.elsRcvRDF++; 10729 /* Accept RDF only from fabric controller */ 10730 if (did != Fabric_Cntl_DID) { 10731 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10732 "1115 Received RDF from invalid DID " 10733 "x%x\n", did); 10734 rjt_err = LSRJT_PROTOCOL_ERR; 10735 rjt_exp = LSEXP_NOTHING_MORE; 10736 goto lsrjt; 10737 } 10738 10739 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10740 break; 10741 default: 10742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10743 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10744 cmd, did, vport->port_state); 10745 10746 /* Unsupported ELS command, reject */ 10747 rjt_err = LSRJT_CMD_UNSUPPORTED; 10748 rjt_exp = LSEXP_NOTHING_MORE; 10749 10750 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10751 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10752 "0115 Unknown ELS command x%x " 10753 "received from NPORT x%x\n", cmd, did); 10754 if (newnode) 10755 lpfc_disc_state_machine(vport, ndlp, NULL, 10756 NLP_EVT_DEVICE_RM); 10757 break; 10758 } 10759 10760 lsrjt: 10761 /* check if need to LS_RJT received ELS cmd */ 10762 if (rjt_err) { 10763 memset(&stat, 0, sizeof(stat)); 10764 stat.un.b.lsRjtRsnCode = rjt_err; 10765 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10766 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10767 NULL); 10768 /* Remove the reference from above for new nodes. */ 10769 if (newnode) 10770 lpfc_disc_state_machine(vport, ndlp, NULL, 10771 NLP_EVT_DEVICE_RM); 10772 } 10773 10774 /* Release the reference on this elsiocb, not the ndlp. */ 10775 lpfc_nlp_put(elsiocb->ndlp); 10776 elsiocb->ndlp = NULL; 10777 10778 /* Special case. Driver received an unsolicited command that 10779 * unsupportable given the driver's current state. Reset the 10780 * link and start over. 10781 */ 10782 if (init_link) { 10783 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10784 if (!mbox) 10785 return; 10786 lpfc_linkdown(phba); 10787 lpfc_init_link(phba, mbox, 10788 phba->cfg_topology, 10789 phba->cfg_link_speed); 10790 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10792 mbox->vport = vport; 10793 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10794 MBX_NOT_FINISHED) 10795 mempool_free(mbox, phba->mbox_mem_pool); 10796 } 10797 10798 return; 10799 10800 dropit: 10801 if (vport && !(vport->load_flag & FC_UNLOADING)) 10802 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10803 "0111 Dropping received ELS cmd " 10804 "Data: x%x x%x x%x x%x\n", 10805 cmd, status, get_job_word4(phba, elsiocb), did); 10806 10807 phba->fc_stat.elsRcvDrop++; 10808 } 10809 10810 /** 10811 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10812 * @phba: pointer to lpfc hba data structure. 10813 * @pring: pointer to a SLI ring. 10814 * @elsiocb: pointer to lpfc els iocb data structure. 10815 * 10816 * This routine is used to process an unsolicited event received from a SLI 10817 * (Service Level Interface) ring. The actual processing of the data buffer 10818 * associated with the unsolicited event is done by invoking the routine 10819 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10820 * SLI ring on which the unsolicited event was received. 10821 **/ 10822 void 10823 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10824 struct lpfc_iocbq *elsiocb) 10825 { 10826 struct lpfc_vport *vport = elsiocb->vport; 10827 u32 ulp_command, status, parameter, bde_count = 0; 10828 IOCB_t *icmd; 10829 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10830 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10831 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10832 dma_addr_t paddr; 10833 10834 elsiocb->cmd_dmabuf = NULL; 10835 elsiocb->rsp_dmabuf = NULL; 10836 elsiocb->bpl_dmabuf = NULL; 10837 10838 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10839 ulp_command = get_job_cmnd(phba, elsiocb); 10840 status = get_job_ulpstatus(phba, elsiocb); 10841 parameter = get_job_word4(phba, elsiocb); 10842 if (phba->sli_rev == LPFC_SLI_REV4) 10843 bde_count = wcqe_cmpl->word3; 10844 else 10845 bde_count = elsiocb->iocb.ulpBdeCount; 10846 10847 if (status == IOSTAT_NEED_BUFFER) { 10848 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10849 } else if (status == IOSTAT_LOCAL_REJECT && 10850 (parameter & IOERR_PARAM_MASK) == 10851 IOERR_RCV_BUFFER_WAITING) { 10852 phba->fc_stat.NoRcvBuf++; 10853 /* Not enough posted buffers; Try posting more buffers */ 10854 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10855 lpfc_sli3_post_buffer(phba, pring, 0); 10856 return; 10857 } 10858 10859 if (phba->sli_rev == LPFC_SLI_REV3) { 10860 icmd = &elsiocb->iocb; 10861 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10862 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10863 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10864 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10865 vport = phba->pport; 10866 else 10867 vport = lpfc_find_vport_by_vpid(phba, 10868 icmd->unsli3.rcvsli3.vpi); 10869 } 10870 } 10871 10872 /* If there are no BDEs associated 10873 * with this IOCB, there is nothing to do. 10874 */ 10875 if (bde_count == 0) 10876 return; 10877 10878 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10879 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10880 elsiocb->cmd_dmabuf = bdeBuf1; 10881 if (bde_count == 2) 10882 elsiocb->bpl_dmabuf = bdeBuf2; 10883 } else { 10884 icmd = &elsiocb->iocb; 10885 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10886 icmd->un.cont64[0].addrLow); 10887 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10888 paddr); 10889 if (bde_count == 2) { 10890 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10891 icmd->un.cont64[1].addrLow); 10892 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10893 pring, 10894 paddr); 10895 } 10896 } 10897 10898 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10899 /* 10900 * The different unsolicited event handlers would tell us 10901 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10902 */ 10903 if (elsiocb->cmd_dmabuf) { 10904 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10905 elsiocb->cmd_dmabuf = NULL; 10906 } 10907 10908 if (elsiocb->bpl_dmabuf) { 10909 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10910 elsiocb->bpl_dmabuf = NULL; 10911 } 10912 10913 } 10914 10915 static void 10916 lpfc_start_fdmi(struct lpfc_vport *vport) 10917 { 10918 struct lpfc_nodelist *ndlp; 10919 10920 /* If this is the first time, allocate an ndlp and initialize 10921 * it. Otherwise, make sure the node is enabled and then do the 10922 * login. 10923 */ 10924 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10925 if (!ndlp) { 10926 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10927 if (ndlp) { 10928 ndlp->nlp_type |= NLP_FABRIC; 10929 } else { 10930 return; 10931 } 10932 } 10933 10934 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10935 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10936 } 10937 10938 /** 10939 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10940 * @phba: pointer to lpfc hba data structure. 10941 * @vport: pointer to a virtual N_Port data structure. 10942 * 10943 * This routine issues a Port Login (PLOGI) to the Name Server with 10944 * State Change Request (SCR) for a @vport. This routine will create an 10945 * ndlp for the Name Server associated to the @vport if such node does 10946 * not already exist. The PLOGI to Name Server is issued by invoking the 10947 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10948 * (FDMI) is configured to the @vport, a FDMI node will be created and 10949 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10950 **/ 10951 void 10952 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10953 { 10954 struct lpfc_nodelist *ndlp; 10955 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10956 10957 /* 10958 * If lpfc_delay_discovery parameter is set and the clean address 10959 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10960 * discovery. 10961 */ 10962 spin_lock_irq(shost->host_lock); 10963 if (vport->fc_flag & FC_DISC_DELAYED) { 10964 spin_unlock_irq(shost->host_lock); 10965 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10966 "3334 Delay fc port discovery for %d secs\n", 10967 phba->fc_ratov); 10968 mod_timer(&vport->delayed_disc_tmo, 10969 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10970 return; 10971 } 10972 spin_unlock_irq(shost->host_lock); 10973 10974 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10975 if (!ndlp) { 10976 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10977 if (!ndlp) { 10978 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10979 lpfc_disc_start(vport); 10980 return; 10981 } 10982 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10983 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10984 "0251 NameServer login: no memory\n"); 10985 return; 10986 } 10987 } 10988 10989 ndlp->nlp_type |= NLP_FABRIC; 10990 10991 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10992 10993 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10994 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10995 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10996 "0252 Cannot issue NameServer login\n"); 10997 return; 10998 } 10999 11000 if ((phba->cfg_enable_SmartSAN || 11001 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 11002 (vport->load_flag & FC_ALLOW_FDMI)) 11003 lpfc_start_fdmi(vport); 11004 } 11005 11006 /** 11007 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 11008 * @phba: pointer to lpfc hba data structure. 11009 * @pmb: pointer to the driver internal queue element for mailbox command. 11010 * 11011 * This routine is the completion callback function to register new vport 11012 * mailbox command. If the new vport mailbox command completes successfully, 11013 * the fabric registration login shall be performed on physical port (the 11014 * new vport created is actually a physical port, with VPI 0) or the port 11015 * login to Name Server for State Change Request (SCR) will be performed 11016 * on virtual port (real virtual port, with VPI greater than 0). 11017 **/ 11018 static void 11019 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 11020 { 11021 struct lpfc_vport *vport = pmb->vport; 11022 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11023 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 11024 MAILBOX_t *mb = &pmb->u.mb; 11025 int rc; 11026 11027 spin_lock_irq(shost->host_lock); 11028 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11029 spin_unlock_irq(shost->host_lock); 11030 11031 if (mb->mbxStatus) { 11032 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11033 "0915 Register VPI failed : Status: x%x" 11034 " upd bit: x%x \n", mb->mbxStatus, 11035 mb->un.varRegVpi.upd); 11036 if (phba->sli_rev == LPFC_SLI_REV4 && 11037 mb->un.varRegVpi.upd) 11038 goto mbox_err_exit ; 11039 11040 switch (mb->mbxStatus) { 11041 case 0x11: /* unsupported feature */ 11042 case 0x9603: /* max_vpi exceeded */ 11043 case 0x9602: /* Link event since CLEAR_LA */ 11044 /* giving up on vport registration */ 11045 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11046 spin_lock_irq(shost->host_lock); 11047 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 11048 spin_unlock_irq(shost->host_lock); 11049 lpfc_can_disctmo(vport); 11050 break; 11051 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 11052 case 0x20: 11053 spin_lock_irq(shost->host_lock); 11054 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11055 spin_unlock_irq(shost->host_lock); 11056 lpfc_init_vpi(phba, pmb, vport->vpi); 11057 pmb->vport = vport; 11058 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11059 rc = lpfc_sli_issue_mbox(phba, pmb, 11060 MBX_NOWAIT); 11061 if (rc == MBX_NOT_FINISHED) { 11062 lpfc_printf_vlog(vport, KERN_ERR, 11063 LOG_TRACE_EVENT, 11064 "2732 Failed to issue INIT_VPI" 11065 " mailbox command\n"); 11066 } else { 11067 lpfc_nlp_put(ndlp); 11068 return; 11069 } 11070 fallthrough; 11071 default: 11072 /* Try to recover from this error */ 11073 if (phba->sli_rev == LPFC_SLI_REV4) 11074 lpfc_sli4_unreg_all_rpis(vport); 11075 lpfc_mbx_unreg_vpi(vport); 11076 spin_lock_irq(shost->host_lock); 11077 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11078 spin_unlock_irq(shost->host_lock); 11079 if (mb->mbxStatus == MBX_NOT_FINISHED) 11080 break; 11081 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11082 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 11083 if (phba->sli_rev == LPFC_SLI_REV4) 11084 lpfc_issue_init_vfi(vport); 11085 else 11086 lpfc_initial_flogi(vport); 11087 } else { 11088 lpfc_initial_fdisc(vport); 11089 } 11090 break; 11091 } 11092 } else { 11093 spin_lock_irq(shost->host_lock); 11094 vport->vpi_state |= LPFC_VPI_REGISTERED; 11095 spin_unlock_irq(shost->host_lock); 11096 if (vport == phba->pport) { 11097 if (phba->sli_rev < LPFC_SLI_REV4) 11098 lpfc_issue_fabric_reglogin(vport); 11099 else { 11100 /* 11101 * If the physical port is instantiated using 11102 * FDISC, do not start vport discovery. 11103 */ 11104 if (vport->port_state != LPFC_FDISC) 11105 lpfc_start_fdiscs(phba); 11106 lpfc_do_scr_ns_plogi(phba, vport); 11107 } 11108 } else { 11109 lpfc_do_scr_ns_plogi(phba, vport); 11110 } 11111 } 11112 mbox_err_exit: 11113 /* Now, we decrement the ndlp reference count held for this 11114 * callback function 11115 */ 11116 lpfc_nlp_put(ndlp); 11117 11118 mempool_free(pmb, phba->mbox_mem_pool); 11119 11120 /* reinitialize the VMID datastructure before returning. 11121 * this is specifically for vport 11122 */ 11123 if (lpfc_is_vmid_enabled(phba)) 11124 lpfc_reinit_vmid(vport); 11125 vport->vmid_flag = vport->phba->pport->vmid_flag; 11126 11127 return; 11128 } 11129 11130 /** 11131 * lpfc_register_new_vport - Register a new vport with a HBA 11132 * @phba: pointer to lpfc hba data structure. 11133 * @vport: pointer to a host virtual N_Port data structure. 11134 * @ndlp: pointer to a node-list data structure. 11135 * 11136 * This routine registers the @vport as a new virtual port with a HBA. 11137 * It is done through a registering vpi mailbox command. 11138 **/ 11139 void 11140 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11141 struct lpfc_nodelist *ndlp) 11142 { 11143 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11144 LPFC_MBOXQ_t *mbox; 11145 11146 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11147 if (mbox) { 11148 lpfc_reg_vpi(vport, mbox); 11149 mbox->vport = vport; 11150 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11151 if (!mbox->ctx_ndlp) { 11152 mempool_free(mbox, phba->mbox_mem_pool); 11153 goto mbox_err_exit; 11154 } 11155 11156 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11157 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11158 == MBX_NOT_FINISHED) { 11159 /* mailbox command not success, decrement ndlp 11160 * reference count for this command 11161 */ 11162 lpfc_nlp_put(ndlp); 11163 mempool_free(mbox, phba->mbox_mem_pool); 11164 11165 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11166 "0253 Register VPI: Can't send mbox\n"); 11167 goto mbox_err_exit; 11168 } 11169 } else { 11170 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11171 "0254 Register VPI: no memory\n"); 11172 goto mbox_err_exit; 11173 } 11174 return; 11175 11176 mbox_err_exit: 11177 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11178 spin_lock_irq(shost->host_lock); 11179 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11180 spin_unlock_irq(shost->host_lock); 11181 return; 11182 } 11183 11184 /** 11185 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11186 * @phba: pointer to lpfc hba data structure. 11187 * 11188 * This routine cancels the retry delay timers to all the vports. 11189 **/ 11190 void 11191 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11192 { 11193 struct lpfc_vport **vports; 11194 struct lpfc_nodelist *ndlp; 11195 uint32_t link_state; 11196 int i; 11197 11198 /* Treat this failure as linkdown for all vports */ 11199 link_state = phba->link_state; 11200 lpfc_linkdown(phba); 11201 phba->link_state = link_state; 11202 11203 vports = lpfc_create_vport_work_array(phba); 11204 11205 if (vports) { 11206 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11207 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11208 if (ndlp) 11209 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11210 lpfc_els_flush_cmd(vports[i]); 11211 } 11212 lpfc_destroy_vport_work_array(phba, vports); 11213 } 11214 } 11215 11216 /** 11217 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11218 * @phba: pointer to lpfc hba data structure. 11219 * 11220 * This routine abort all pending discovery commands and 11221 * start a timer to retry FLOGI for the physical port 11222 * discovery. 11223 **/ 11224 void 11225 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11226 { 11227 struct lpfc_nodelist *ndlp; 11228 11229 /* Cancel the all vports retry delay retry timers */ 11230 lpfc_cancel_all_vport_retry_delay_timer(phba); 11231 11232 /* If fabric require FLOGI, then re-instantiate physical login */ 11233 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11234 if (!ndlp) 11235 return; 11236 11237 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11238 spin_lock_irq(&ndlp->lock); 11239 ndlp->nlp_flag |= NLP_DELAY_TMO; 11240 spin_unlock_irq(&ndlp->lock); 11241 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11242 phba->pport->port_state = LPFC_FLOGI; 11243 return; 11244 } 11245 11246 /** 11247 * lpfc_fabric_login_reqd - Check if FLOGI required. 11248 * @phba: pointer to lpfc hba data structure. 11249 * @cmdiocb: pointer to FDISC command iocb. 11250 * @rspiocb: pointer to FDISC response iocb. 11251 * 11252 * This routine checks if a FLOGI is reguired for FDISC 11253 * to succeed. 11254 **/ 11255 static int 11256 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11257 struct lpfc_iocbq *cmdiocb, 11258 struct lpfc_iocbq *rspiocb) 11259 { 11260 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11261 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11262 11263 if (ulp_status != IOSTAT_FABRIC_RJT || 11264 ulp_word4 != RJT_LOGIN_REQUIRED) 11265 return 0; 11266 else 11267 return 1; 11268 } 11269 11270 /** 11271 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11272 * @phba: pointer to lpfc hba data structure. 11273 * @cmdiocb: pointer to lpfc command iocb data structure. 11274 * @rspiocb: pointer to lpfc response iocb data structure. 11275 * 11276 * This routine is the completion callback function to a Fabric Discover 11277 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11278 * single threaded, each FDISC completion callback function will reset 11279 * the discovery timer for all vports such that the timers will not get 11280 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11281 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11282 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11283 * assigned to the vport has been changed with the completion of the FDISC 11284 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11285 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11286 * routine is invoked to register new vport with the HBA. Otherwise, the 11287 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11288 * Server for State Change Request (SCR). 11289 **/ 11290 static void 11291 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11292 struct lpfc_iocbq *rspiocb) 11293 { 11294 struct lpfc_vport *vport = cmdiocb->vport; 11295 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11296 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11297 struct lpfc_nodelist *np; 11298 struct lpfc_nodelist *next_np; 11299 struct lpfc_iocbq *piocb; 11300 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11301 struct serv_parm *sp; 11302 uint8_t fabric_param_changed; 11303 u32 ulp_status, ulp_word4; 11304 11305 ulp_status = get_job_ulpstatus(phba, rspiocb); 11306 ulp_word4 = get_job_word4(phba, rspiocb); 11307 11308 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11309 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11310 ulp_status, ulp_word4, 11311 vport->fc_prevDID); 11312 /* Since all FDISCs are being single threaded, we 11313 * must reset the discovery timer for ALL vports 11314 * waiting to send FDISC when one completes. 11315 */ 11316 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11317 lpfc_set_disctmo(piocb->vport); 11318 } 11319 11320 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11321 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11322 ulp_status, ulp_word4, vport->fc_prevDID); 11323 11324 if (ulp_status) { 11325 11326 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11327 lpfc_retry_pport_discovery(phba); 11328 goto out; 11329 } 11330 11331 /* Check for retry */ 11332 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11333 goto out; 11334 /* FDISC failed */ 11335 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11336 "0126 FDISC failed. (x%x/x%x)\n", 11337 ulp_status, ulp_word4); 11338 goto fdisc_failed; 11339 } 11340 11341 lpfc_check_nlp_post_devloss(vport, ndlp); 11342 11343 spin_lock_irq(shost->host_lock); 11344 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11345 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11346 vport->fc_flag |= FC_FABRIC; 11347 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11348 vport->fc_flag |= FC_PUBLIC_LOOP; 11349 spin_unlock_irq(shost->host_lock); 11350 11351 vport->fc_myDID = ulp_word4 & Mask_DID; 11352 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11353 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11354 if (!prsp) 11355 goto out; 11356 sp = prsp->virt + sizeof(uint32_t); 11357 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11358 memcpy(&vport->fabric_portname, &sp->portName, 11359 sizeof(struct lpfc_name)); 11360 memcpy(&vport->fabric_nodename, &sp->nodeName, 11361 sizeof(struct lpfc_name)); 11362 if (fabric_param_changed && 11363 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11364 /* If our NportID changed, we need to ensure all 11365 * remaining NPORTs get unreg_login'ed so we can 11366 * issue unreg_vpi. 11367 */ 11368 list_for_each_entry_safe(np, next_np, 11369 &vport->fc_nodes, nlp_listp) { 11370 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11371 !(np->nlp_flag & NLP_NPR_ADISC)) 11372 continue; 11373 spin_lock_irq(&ndlp->lock); 11374 np->nlp_flag &= ~NLP_NPR_ADISC; 11375 spin_unlock_irq(&ndlp->lock); 11376 lpfc_unreg_rpi(vport, np); 11377 } 11378 lpfc_cleanup_pending_mbox(vport); 11379 11380 if (phba->sli_rev == LPFC_SLI_REV4) 11381 lpfc_sli4_unreg_all_rpis(vport); 11382 11383 lpfc_mbx_unreg_vpi(vport); 11384 spin_lock_irq(shost->host_lock); 11385 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11386 if (phba->sli_rev == LPFC_SLI_REV4) 11387 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11388 else 11389 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11390 spin_unlock_irq(shost->host_lock); 11391 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11392 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11393 /* 11394 * Driver needs to re-reg VPI in order for f/w 11395 * to update the MAC address. 11396 */ 11397 lpfc_register_new_vport(phba, vport, ndlp); 11398 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11399 goto out; 11400 } 11401 11402 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11403 lpfc_issue_init_vpi(vport); 11404 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11405 lpfc_register_new_vport(phba, vport, ndlp); 11406 else 11407 lpfc_do_scr_ns_plogi(phba, vport); 11408 11409 /* The FDISC completed successfully. Move the fabric ndlp to 11410 * UNMAPPED state and register with the transport. 11411 */ 11412 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11413 goto out; 11414 11415 fdisc_failed: 11416 if (vport->fc_vport && 11417 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11418 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11419 /* Cancel discovery timer */ 11420 lpfc_can_disctmo(vport); 11421 out: 11422 lpfc_els_free_iocb(phba, cmdiocb); 11423 lpfc_nlp_put(ndlp); 11424 } 11425 11426 /** 11427 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11428 * @vport: pointer to a virtual N_Port data structure. 11429 * @ndlp: pointer to a node-list data structure. 11430 * @retry: number of retries to the command IOCB. 11431 * 11432 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11433 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11434 * routine to issue the IOCB, which makes sure only one outstanding fabric 11435 * IOCB will be sent off HBA at any given time. 11436 * 11437 * Note that the ndlp reference count will be incremented by 1 for holding the 11438 * ndlp and the reference to ndlp will be stored into the ndlp field of 11439 * the IOCB for the completion callback function to the FDISC ELS command. 11440 * 11441 * Return code 11442 * 0 - Successfully issued fdisc iocb command 11443 * 1 - Failed to issue fdisc iocb command 11444 **/ 11445 static int 11446 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11447 uint8_t retry) 11448 { 11449 struct lpfc_hba *phba = vport->phba; 11450 IOCB_t *icmd; 11451 union lpfc_wqe128 *wqe = NULL; 11452 struct lpfc_iocbq *elsiocb; 11453 struct serv_parm *sp; 11454 uint8_t *pcmd; 11455 uint16_t cmdsize; 11456 int did = ndlp->nlp_DID; 11457 int rc; 11458 11459 vport->port_state = LPFC_FDISC; 11460 vport->fc_myDID = 0; 11461 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11462 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11463 ELS_CMD_FDISC); 11464 if (!elsiocb) { 11465 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11466 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11467 "0255 Issue FDISC: no IOCB\n"); 11468 return 1; 11469 } 11470 11471 if (phba->sli_rev == LPFC_SLI_REV4) { 11472 wqe = &elsiocb->wqe; 11473 bf_set(els_req64_sid, &wqe->els_req, 0); 11474 bf_set(els_req64_sp, &wqe->els_req, 1); 11475 } else { 11476 icmd = &elsiocb->iocb; 11477 icmd->un.elsreq64.myID = 0; 11478 icmd->un.elsreq64.fl = 1; 11479 icmd->ulpCt_h = 1; 11480 icmd->ulpCt_l = 0; 11481 } 11482 11483 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11484 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11485 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11486 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11487 sp = (struct serv_parm *) pcmd; 11488 /* Setup CSPs accordingly for Fabric */ 11489 sp->cmn.e_d_tov = 0; 11490 sp->cmn.w2.r_a_tov = 0; 11491 sp->cmn.virtual_fabric_support = 0; 11492 sp->cls1.classValid = 0; 11493 sp->cls2.seqDelivery = 1; 11494 sp->cls3.seqDelivery = 1; 11495 11496 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11497 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11498 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11499 pcmd += sizeof(uint32_t); /* Port Name */ 11500 memcpy(pcmd, &vport->fc_portname, 8); 11501 pcmd += sizeof(uint32_t); /* Node Name */ 11502 pcmd += sizeof(uint32_t); /* Node Name */ 11503 memcpy(pcmd, &vport->fc_nodename, 8); 11504 sp->cmn.valid_vendor_ver_level = 0; 11505 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11506 lpfc_set_disctmo(vport); 11507 11508 phba->fc_stat.elsXmitFDISC++; 11509 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11510 11511 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11512 "Issue FDISC: did:x%x", 11513 did, 0, 0); 11514 11515 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11516 if (!elsiocb->ndlp) 11517 goto err_out; 11518 11519 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11520 if (rc == IOCB_ERROR) { 11521 lpfc_nlp_put(ndlp); 11522 goto err_out; 11523 } 11524 11525 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11526 return 0; 11527 11528 err_out: 11529 lpfc_els_free_iocb(phba, elsiocb); 11530 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11531 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11532 "0256 Issue FDISC: Cannot send IOCB\n"); 11533 return 1; 11534 } 11535 11536 /** 11537 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11538 * @phba: pointer to lpfc hba data structure. 11539 * @cmdiocb: pointer to lpfc command iocb data structure. 11540 * @rspiocb: pointer to lpfc response iocb data structure. 11541 * 11542 * This routine is the completion callback function to the issuing of a LOGO 11543 * ELS command off a vport. It frees the command IOCB and then decrement the 11544 * reference count held on ndlp for this completion function, indicating that 11545 * the reference to the ndlp is no long needed. Note that the 11546 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11547 * callback function and an additional explicit ndlp reference decrementation 11548 * will trigger the actual release of the ndlp. 11549 **/ 11550 static void 11551 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11552 struct lpfc_iocbq *rspiocb) 11553 { 11554 struct lpfc_vport *vport = cmdiocb->vport; 11555 IOCB_t *irsp; 11556 struct lpfc_nodelist *ndlp; 11557 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11558 u32 ulp_status, ulp_word4, did, tmo; 11559 11560 ndlp = cmdiocb->ndlp; 11561 11562 ulp_status = get_job_ulpstatus(phba, rspiocb); 11563 ulp_word4 = get_job_word4(phba, rspiocb); 11564 11565 if (phba->sli_rev == LPFC_SLI_REV4) { 11566 did = get_job_els_rsp64_did(phba, cmdiocb); 11567 tmo = get_wqe_tmo(cmdiocb); 11568 } else { 11569 irsp = &rspiocb->iocb; 11570 did = get_job_els_rsp64_did(phba, rspiocb); 11571 tmo = irsp->ulpTimeout; 11572 } 11573 11574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11575 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11576 ulp_status, ulp_word4, did); 11577 11578 /* NPIV LOGO completes to NPort <nlp_DID> */ 11579 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11580 "2928 NPIV LOGO completes to NPort x%x " 11581 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11582 ndlp->nlp_DID, ulp_status, ulp_word4, 11583 tmo, vport->num_disc_nodes, 11584 kref_read(&ndlp->kref), ndlp->nlp_flag, 11585 ndlp->fc4_xpt_flags); 11586 11587 if (ulp_status == IOSTAT_SUCCESS) { 11588 spin_lock_irq(shost->host_lock); 11589 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11590 vport->fc_flag &= ~FC_FABRIC; 11591 spin_unlock_irq(shost->host_lock); 11592 lpfc_can_disctmo(vport); 11593 } 11594 11595 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11596 /* Wake up lpfc_vport_delete if waiting...*/ 11597 if (ndlp->logo_waitq) 11598 wake_up(ndlp->logo_waitq); 11599 spin_lock_irq(&ndlp->lock); 11600 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11601 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11602 spin_unlock_irq(&ndlp->lock); 11603 } 11604 11605 /* Safe to release resources now. */ 11606 lpfc_els_free_iocb(phba, cmdiocb); 11607 lpfc_nlp_put(ndlp); 11608 } 11609 11610 /** 11611 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11612 * @vport: pointer to a virtual N_Port data structure. 11613 * @ndlp: pointer to a node-list data structure. 11614 * 11615 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11616 * 11617 * Note that the ndlp reference count will be incremented by 1 for holding the 11618 * ndlp and the reference to ndlp will be stored into the ndlp field of 11619 * the IOCB for the completion callback function to the LOGO ELS command. 11620 * 11621 * Return codes 11622 * 0 - Successfully issued logo off the @vport 11623 * 1 - Failed to issue logo off the @vport 11624 **/ 11625 int 11626 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11627 { 11628 int rc = 0; 11629 struct lpfc_hba *phba = vport->phba; 11630 struct lpfc_iocbq *elsiocb; 11631 uint8_t *pcmd; 11632 uint16_t cmdsize; 11633 11634 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11635 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11636 ELS_CMD_LOGO); 11637 if (!elsiocb) 11638 return 1; 11639 11640 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11641 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11642 pcmd += sizeof(uint32_t); 11643 11644 /* Fill in LOGO payload */ 11645 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11646 pcmd += sizeof(uint32_t); 11647 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11648 11649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11650 "Issue LOGO npiv did:x%x flg:x%x", 11651 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11652 11653 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11654 spin_lock_irq(&ndlp->lock); 11655 ndlp->nlp_flag |= NLP_LOGO_SND; 11656 spin_unlock_irq(&ndlp->lock); 11657 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11658 if (!elsiocb->ndlp) { 11659 lpfc_els_free_iocb(phba, elsiocb); 11660 goto err; 11661 } 11662 11663 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11664 if (rc == IOCB_ERROR) { 11665 lpfc_els_free_iocb(phba, elsiocb); 11666 lpfc_nlp_put(ndlp); 11667 goto err; 11668 } 11669 return 0; 11670 11671 err: 11672 spin_lock_irq(&ndlp->lock); 11673 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11674 spin_unlock_irq(&ndlp->lock); 11675 return 1; 11676 } 11677 11678 /** 11679 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11680 * @t: timer context used to obtain the lpfc hba. 11681 * 11682 * This routine is invoked by the fabric iocb block timer after 11683 * timeout. It posts the fabric iocb block timeout event by setting the 11684 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11685 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11686 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11687 * posted event WORKER_FABRIC_BLOCK_TMO. 11688 **/ 11689 void 11690 lpfc_fabric_block_timeout(struct timer_list *t) 11691 { 11692 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11693 unsigned long iflags; 11694 uint32_t tmo_posted; 11695 11696 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11697 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11698 if (!tmo_posted) 11699 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11700 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11701 11702 if (!tmo_posted) 11703 lpfc_worker_wake_up(phba); 11704 return; 11705 } 11706 11707 /** 11708 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11709 * @phba: pointer to lpfc hba data structure. 11710 * 11711 * This routine issues one fabric iocb from the driver internal list to 11712 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11713 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11714 * remove one pending fabric iocb from the driver internal list and invokes 11715 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11716 **/ 11717 static void 11718 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11719 { 11720 struct lpfc_iocbq *iocb; 11721 unsigned long iflags; 11722 int ret; 11723 11724 repeat: 11725 iocb = NULL; 11726 spin_lock_irqsave(&phba->hbalock, iflags); 11727 /* Post any pending iocb to the SLI layer */ 11728 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11729 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11730 list); 11731 if (iocb) 11732 /* Increment fabric iocb count to hold the position */ 11733 atomic_inc(&phba->fabric_iocb_count); 11734 } 11735 spin_unlock_irqrestore(&phba->hbalock, iflags); 11736 if (iocb) { 11737 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11738 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11739 iocb->cmd_flag |= LPFC_IO_FABRIC; 11740 11741 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11742 "Fabric sched1: ste:x%x", 11743 iocb->vport->port_state, 0, 0); 11744 11745 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11746 11747 if (ret == IOCB_ERROR) { 11748 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11749 iocb->fabric_cmd_cmpl = NULL; 11750 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11751 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11752 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11753 iocb->cmd_cmpl(phba, iocb, iocb); 11754 11755 atomic_dec(&phba->fabric_iocb_count); 11756 goto repeat; 11757 } 11758 } 11759 } 11760 11761 /** 11762 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11763 * @phba: pointer to lpfc hba data structure. 11764 * 11765 * This routine unblocks the issuing fabric iocb command. The function 11766 * will clear the fabric iocb block bit and then invoke the routine 11767 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11768 * from the driver internal fabric iocb list. 11769 **/ 11770 void 11771 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11772 { 11773 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11774 11775 lpfc_resume_fabric_iocbs(phba); 11776 return; 11777 } 11778 11779 /** 11780 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11781 * @phba: pointer to lpfc hba data structure. 11782 * 11783 * This routine blocks the issuing fabric iocb for a specified amount of 11784 * time (currently 100 ms). This is done by set the fabric iocb block bit 11785 * and set up a timeout timer for 100ms. When the block bit is set, no more 11786 * fabric iocb will be issued out of the HBA. 11787 **/ 11788 static void 11789 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11790 { 11791 int blocked; 11792 11793 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11794 /* Start a timer to unblock fabric iocbs after 100ms */ 11795 if (!blocked) 11796 mod_timer(&phba->fabric_block_timer, 11797 jiffies + msecs_to_jiffies(100)); 11798 11799 return; 11800 } 11801 11802 /** 11803 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11804 * @phba: pointer to lpfc hba data structure. 11805 * @cmdiocb: pointer to lpfc command iocb data structure. 11806 * @rspiocb: pointer to lpfc response iocb data structure. 11807 * 11808 * This routine is the callback function that is put to the fabric iocb's 11809 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11810 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11811 * function first restores and invokes the original iocb's callback function 11812 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11813 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11814 **/ 11815 static void 11816 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11817 struct lpfc_iocbq *rspiocb) 11818 { 11819 struct ls_rjt stat; 11820 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11821 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11822 11823 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11824 11825 switch (ulp_status) { 11826 case IOSTAT_NPORT_RJT: 11827 case IOSTAT_FABRIC_RJT: 11828 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11829 lpfc_block_fabric_iocbs(phba); 11830 break; 11831 11832 case IOSTAT_NPORT_BSY: 11833 case IOSTAT_FABRIC_BSY: 11834 lpfc_block_fabric_iocbs(phba); 11835 break; 11836 11837 case IOSTAT_LS_RJT: 11838 stat.un.ls_rjt_error_be = 11839 cpu_to_be32(ulp_word4); 11840 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11841 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11842 lpfc_block_fabric_iocbs(phba); 11843 break; 11844 } 11845 11846 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11847 11848 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11849 cmdiocb->fabric_cmd_cmpl = NULL; 11850 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11851 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11852 11853 atomic_dec(&phba->fabric_iocb_count); 11854 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11855 /* Post any pending iocbs to HBA */ 11856 lpfc_resume_fabric_iocbs(phba); 11857 } 11858 } 11859 11860 /** 11861 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11862 * @phba: pointer to lpfc hba data structure. 11863 * @iocb: pointer to lpfc command iocb data structure. 11864 * 11865 * This routine is used as the top-level API for issuing a fabric iocb command 11866 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11867 * function makes sure that only one fabric bound iocb will be outstanding at 11868 * any given time. As such, this function will first check to see whether there 11869 * is already an outstanding fabric iocb on the wire. If so, it will put the 11870 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11871 * issued later. Otherwise, it will issue the iocb on the wire and update the 11872 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11873 * 11874 * Note, this implementation has a potential sending out fabric IOCBs out of 11875 * order. The problem is caused by the construction of the "ready" boolen does 11876 * not include the condition that the internal fabric IOCB list is empty. As 11877 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11878 * ahead of the fabric IOCBs in the internal list. 11879 * 11880 * Return code 11881 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11882 * IOCB_ERROR - failed to issue fabric iocb 11883 **/ 11884 static int 11885 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11886 { 11887 unsigned long iflags; 11888 int ready; 11889 int ret; 11890 11891 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11892 11893 spin_lock_irqsave(&phba->hbalock, iflags); 11894 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11895 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11896 11897 if (ready) 11898 /* Increment fabric iocb count to hold the position */ 11899 atomic_inc(&phba->fabric_iocb_count); 11900 spin_unlock_irqrestore(&phba->hbalock, iflags); 11901 if (ready) { 11902 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11903 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11904 iocb->cmd_flag |= LPFC_IO_FABRIC; 11905 11906 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11907 "Fabric sched2: ste:x%x", 11908 iocb->vport->port_state, 0, 0); 11909 11910 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11911 11912 if (ret == IOCB_ERROR) { 11913 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11914 iocb->fabric_cmd_cmpl = NULL; 11915 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11916 atomic_dec(&phba->fabric_iocb_count); 11917 } 11918 } else { 11919 spin_lock_irqsave(&phba->hbalock, iflags); 11920 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11921 spin_unlock_irqrestore(&phba->hbalock, iflags); 11922 ret = IOCB_SUCCESS; 11923 } 11924 return ret; 11925 } 11926 11927 /** 11928 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11929 * @vport: pointer to a virtual N_Port data structure. 11930 * 11931 * This routine aborts all the IOCBs associated with a @vport from the 11932 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11933 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11934 * list, removes each IOCB associated with the @vport off the list, set the 11935 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11936 * associated with the IOCB. 11937 **/ 11938 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11939 { 11940 LIST_HEAD(completions); 11941 struct lpfc_hba *phba = vport->phba; 11942 struct lpfc_iocbq *tmp_iocb, *piocb; 11943 11944 spin_lock_irq(&phba->hbalock); 11945 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11946 list) { 11947 11948 if (piocb->vport != vport) 11949 continue; 11950 11951 list_move_tail(&piocb->list, &completions); 11952 } 11953 spin_unlock_irq(&phba->hbalock); 11954 11955 /* Cancel all the IOCBs from the completions list */ 11956 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11957 IOERR_SLI_ABORTED); 11958 } 11959 11960 /** 11961 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11962 * @ndlp: pointer to a node-list data structure. 11963 * 11964 * This routine aborts all the IOCBs associated with an @ndlp from the 11965 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11966 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11967 * list, removes each IOCB associated with the @ndlp off the list, set the 11968 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11969 * associated with the IOCB. 11970 **/ 11971 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11972 { 11973 LIST_HEAD(completions); 11974 struct lpfc_hba *phba = ndlp->phba; 11975 struct lpfc_iocbq *tmp_iocb, *piocb; 11976 struct lpfc_sli_ring *pring; 11977 11978 pring = lpfc_phba_elsring(phba); 11979 11980 if (unlikely(!pring)) 11981 return; 11982 11983 spin_lock_irq(&phba->hbalock); 11984 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11985 list) { 11986 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11987 11988 list_move_tail(&piocb->list, &completions); 11989 } 11990 } 11991 spin_unlock_irq(&phba->hbalock); 11992 11993 /* Cancel all the IOCBs from the completions list */ 11994 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11995 IOERR_SLI_ABORTED); 11996 } 11997 11998 /** 11999 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 12000 * @phba: pointer to lpfc hba data structure. 12001 * 12002 * This routine aborts all the IOCBs currently on the driver internal 12003 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 12004 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 12005 * list, removes IOCBs off the list, set the status field to 12006 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 12007 * the IOCB. 12008 **/ 12009 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 12010 { 12011 LIST_HEAD(completions); 12012 12013 spin_lock_irq(&phba->hbalock); 12014 list_splice_init(&phba->fabric_iocb_list, &completions); 12015 spin_unlock_irq(&phba->hbalock); 12016 12017 /* Cancel all the IOCBs from the completions list */ 12018 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12019 IOERR_SLI_ABORTED); 12020 } 12021 12022 /** 12023 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 12024 * @vport: pointer to lpfc vport data structure. 12025 * 12026 * This routine is invoked by the vport cleanup for deletions and the cleanup 12027 * for an ndlp on removal. 12028 **/ 12029 void 12030 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 12031 { 12032 struct lpfc_hba *phba = vport->phba; 12033 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12034 struct lpfc_nodelist *ndlp = NULL; 12035 unsigned long iflag = 0; 12036 12037 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12038 list_for_each_entry_safe(sglq_entry, sglq_next, 12039 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12040 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 12041 lpfc_nlp_put(sglq_entry->ndlp); 12042 ndlp = sglq_entry->ndlp; 12043 sglq_entry->ndlp = NULL; 12044 12045 /* If the xri on the abts_els_sgl list is for the Fport 12046 * node and the vport is unloading, the xri aborted wcqe 12047 * likely isn't coming back. Just release the sgl. 12048 */ 12049 if ((vport->load_flag & FC_UNLOADING) && 12050 ndlp->nlp_DID == Fabric_DID) { 12051 list_del(&sglq_entry->list); 12052 sglq_entry->state = SGL_FREED; 12053 list_add_tail(&sglq_entry->list, 12054 &phba->sli4_hba.lpfc_els_sgl_list); 12055 } 12056 } 12057 } 12058 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12059 return; 12060 } 12061 12062 /** 12063 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 12064 * @phba: pointer to lpfc hba data structure. 12065 * @axri: pointer to the els xri abort wcqe structure. 12066 * 12067 * This routine is invoked by the worker thread to process a SLI4 slow-path 12068 * ELS aborted xri. 12069 **/ 12070 void 12071 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12072 struct sli4_wcqe_xri_aborted *axri) 12073 { 12074 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12075 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12076 uint16_t lxri = 0; 12077 12078 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12079 unsigned long iflag = 0; 12080 struct lpfc_nodelist *ndlp; 12081 struct lpfc_sli_ring *pring; 12082 12083 pring = lpfc_phba_elsring(phba); 12084 12085 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12086 list_for_each_entry_safe(sglq_entry, sglq_next, 12087 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12088 if (sglq_entry->sli4_xritag == xri) { 12089 list_del(&sglq_entry->list); 12090 ndlp = sglq_entry->ndlp; 12091 sglq_entry->ndlp = NULL; 12092 list_add_tail(&sglq_entry->list, 12093 &phba->sli4_hba.lpfc_els_sgl_list); 12094 sglq_entry->state = SGL_FREED; 12095 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12096 iflag); 12097 12098 if (ndlp) { 12099 lpfc_set_rrq_active(phba, ndlp, 12100 sglq_entry->sli4_lxritag, 12101 rxid, 1); 12102 lpfc_nlp_put(ndlp); 12103 } 12104 12105 /* Check if TXQ queue needs to be serviced */ 12106 if (pring && !list_empty(&pring->txq)) 12107 lpfc_worker_wake_up(phba); 12108 return; 12109 } 12110 } 12111 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12112 lxri = lpfc_sli4_xri_inrange(phba, xri); 12113 if (lxri == NO_XRI) 12114 return; 12115 12116 spin_lock_irqsave(&phba->hbalock, iflag); 12117 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12118 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12119 spin_unlock_irqrestore(&phba->hbalock, iflag); 12120 return; 12121 } 12122 sglq_entry->state = SGL_XRI_ABORTED; 12123 spin_unlock_irqrestore(&phba->hbalock, iflag); 12124 return; 12125 } 12126 12127 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12128 * @vport: pointer to virtual port object. 12129 * @ndlp: nodelist pointer for the impacted node. 12130 * 12131 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12132 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12133 * the driver is required to send a LOGO to the remote node before it 12134 * attempts to recover its login to the remote node. 12135 */ 12136 void 12137 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12138 struct lpfc_nodelist *ndlp) 12139 { 12140 struct Scsi_Host *shost; 12141 struct lpfc_hba *phba; 12142 unsigned long flags = 0; 12143 12144 shost = lpfc_shost_from_vport(vport); 12145 phba = vport->phba; 12146 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12147 lpfc_printf_log(phba, KERN_INFO, 12148 LOG_SLI, "3093 No rport recovery needed. " 12149 "rport in state 0x%x\n", ndlp->nlp_state); 12150 return; 12151 } 12152 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12153 "3094 Start rport recovery on shost id 0x%x " 12154 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12155 "flags 0x%x\n", 12156 shost->host_no, ndlp->nlp_DID, 12157 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12158 ndlp->nlp_flag); 12159 /* 12160 * The rport is not responding. Remove the FCP-2 flag to prevent 12161 * an ADISC in the follow-up recovery code. 12162 */ 12163 spin_lock_irqsave(&ndlp->lock, flags); 12164 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12165 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12166 spin_unlock_irqrestore(&ndlp->lock, flags); 12167 lpfc_unreg_rpi(vport, ndlp); 12168 } 12169 12170 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12171 { 12172 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12173 } 12174 12175 static void 12176 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12177 { 12178 u32 i; 12179 12180 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12181 return; 12182 12183 for (i = min; i <= max; i++) 12184 set_bit(i, vport->vmid_priority_range); 12185 } 12186 12187 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12188 { 12189 set_bit(ctcl_vmid, vport->vmid_priority_range); 12190 } 12191 12192 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12193 { 12194 u32 i; 12195 12196 i = find_first_bit(vport->vmid_priority_range, 12197 LPFC_VMID_MAX_PRIORITY_RANGE); 12198 12199 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12200 return 0; 12201 12202 clear_bit(i, vport->vmid_priority_range); 12203 return i; 12204 } 12205 12206 #define MAX_PRIORITY_DESC 255 12207 12208 static void 12209 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12210 struct lpfc_iocbq *rspiocb) 12211 { 12212 struct lpfc_vport *vport = cmdiocb->vport; 12213 struct priority_range_desc *desc; 12214 struct lpfc_dmabuf *prsp = NULL; 12215 struct lpfc_vmid_priority_range *vmid_range = NULL; 12216 u32 *data; 12217 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12218 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12219 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12220 u8 *pcmd, max_desc; 12221 u32 len, i; 12222 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12223 12224 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12225 if (!prsp) 12226 goto out; 12227 12228 pcmd = prsp->virt; 12229 data = (u32 *)pcmd; 12230 if (data[0] == ELS_CMD_LS_RJT) { 12231 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12232 "3277 QFPA LS_RJT x%x x%x\n", 12233 data[0], data[1]); 12234 goto out; 12235 } 12236 if (ulp_status) { 12237 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12238 "6529 QFPA failed with status x%x x%x\n", 12239 ulp_status, ulp_word4); 12240 goto out; 12241 } 12242 12243 if (!vport->qfpa_res) { 12244 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12245 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12246 GFP_KERNEL); 12247 if (!vport->qfpa_res) 12248 goto out; 12249 } 12250 12251 len = *((u32 *)(pcmd + 4)); 12252 len = be32_to_cpu(len); 12253 memcpy(vport->qfpa_res, pcmd, len + 8); 12254 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12255 12256 desc = (struct priority_range_desc *)(pcmd + 8); 12257 vmid_range = vport->vmid_priority.vmid_range; 12258 if (!vmid_range) { 12259 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12260 GFP_KERNEL); 12261 if (!vmid_range) { 12262 kfree(vport->qfpa_res); 12263 goto out; 12264 } 12265 vport->vmid_priority.vmid_range = vmid_range; 12266 } 12267 vport->vmid_priority.num_descriptors = len; 12268 12269 for (i = 0; i < len; i++, vmid_range++, desc++) { 12270 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12271 "6539 vmid values low=%d, high=%d, qos=%d, " 12272 "local ve id=%d\n", desc->lo_range, 12273 desc->hi_range, desc->qos_priority, 12274 desc->local_ve_id); 12275 12276 vmid_range->low = desc->lo_range << 1; 12277 if (desc->local_ve_id == QFPA_ODD_ONLY) 12278 vmid_range->low++; 12279 if (desc->qos_priority) 12280 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12281 vmid_range->qos = desc->qos_priority; 12282 12283 vmid_range->high = desc->hi_range << 1; 12284 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12285 (desc->local_ve_id == QFPA_EVEN_ODD)) 12286 vmid_range->high++; 12287 } 12288 lpfc_init_cs_ctl_bitmap(vport); 12289 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12290 lpfc_vmid_set_cs_ctl_range(vport, 12291 vport->vmid_priority.vmid_range[i].low, 12292 vport->vmid_priority.vmid_range[i].high); 12293 } 12294 12295 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12296 out: 12297 lpfc_els_free_iocb(phba, cmdiocb); 12298 lpfc_nlp_put(ndlp); 12299 } 12300 12301 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12302 { 12303 struct lpfc_hba *phba = vport->phba; 12304 struct lpfc_nodelist *ndlp; 12305 struct lpfc_iocbq *elsiocb; 12306 u8 *pcmd; 12307 int ret; 12308 12309 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12310 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12311 return -ENXIO; 12312 12313 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12314 ndlp->nlp_DID, ELS_CMD_QFPA); 12315 if (!elsiocb) 12316 return -ENOMEM; 12317 12318 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12319 12320 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12321 pcmd += 4; 12322 12323 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12324 12325 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12326 if (!elsiocb->ndlp) { 12327 lpfc_els_free_iocb(vport->phba, elsiocb); 12328 return -ENXIO; 12329 } 12330 12331 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12332 if (ret != IOCB_SUCCESS) { 12333 lpfc_els_free_iocb(phba, elsiocb); 12334 lpfc_nlp_put(ndlp); 12335 return -EIO; 12336 } 12337 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12338 return 0; 12339 } 12340 12341 int 12342 lpfc_vmid_uvem(struct lpfc_vport *vport, 12343 struct lpfc_vmid *vmid, bool instantiated) 12344 { 12345 struct lpfc_vem_id_desc *vem_id_desc; 12346 struct lpfc_nodelist *ndlp; 12347 struct lpfc_iocbq *elsiocb; 12348 struct instantiated_ve_desc *inst_desc; 12349 struct lpfc_vmid_context *vmid_context; 12350 u8 *pcmd; 12351 u32 *len; 12352 int ret = 0; 12353 12354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12355 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12356 return -ENXIO; 12357 12358 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12359 if (!vmid_context) 12360 return -ENOMEM; 12361 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12362 ndlp, Fabric_DID, ELS_CMD_UVEM); 12363 if (!elsiocb) 12364 goto out; 12365 12366 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12367 "3427 Host vmid %s %d\n", 12368 vmid->host_vmid, instantiated); 12369 vmid_context->vmp = vmid; 12370 vmid_context->nlp = ndlp; 12371 vmid_context->instantiated = instantiated; 12372 elsiocb->vmid_tag.vmid_context = vmid_context; 12373 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12374 12375 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 12376 sizeof(vport->lpfc_vmid_host_uuid))) 12377 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12378 sizeof(vport->lpfc_vmid_host_uuid)); 12379 12380 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12381 len = (u32 *)(pcmd + 4); 12382 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12383 12384 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12385 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12386 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12387 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12388 sizeof(vem_id_desc->vem_id)); 12389 12390 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12391 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12392 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12393 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12394 sizeof(inst_desc->global_vem_id)); 12395 12396 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12397 bf_set(lpfc_instantiated_local_id, inst_desc, 12398 vmid->un.cs_ctl_vmid); 12399 if (instantiated) { 12400 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12401 } else { 12402 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12403 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12404 } 12405 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12406 12407 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12408 12409 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12410 if (!elsiocb->ndlp) { 12411 lpfc_els_free_iocb(vport->phba, elsiocb); 12412 goto out; 12413 } 12414 12415 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12416 if (ret != IOCB_SUCCESS) { 12417 lpfc_els_free_iocb(vport->phba, elsiocb); 12418 lpfc_nlp_put(ndlp); 12419 goto out; 12420 } 12421 12422 return 0; 12423 out: 12424 kfree(vmid_context); 12425 return -EIO; 12426 } 12427 12428 static void 12429 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12430 struct lpfc_iocbq *rspiocb) 12431 { 12432 struct lpfc_vport *vport = icmdiocb->vport; 12433 struct lpfc_dmabuf *prsp = NULL; 12434 struct lpfc_vmid_context *vmid_context = 12435 icmdiocb->vmid_tag.vmid_context; 12436 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12437 u8 *pcmd; 12438 u32 *data; 12439 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12440 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12441 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12442 struct lpfc_vmid *vmid; 12443 12444 vmid = vmid_context->vmp; 12445 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12446 ndlp = NULL; 12447 12448 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12449 if (!prsp) 12450 goto out; 12451 pcmd = prsp->virt; 12452 data = (u32 *)pcmd; 12453 if (data[0] == ELS_CMD_LS_RJT) { 12454 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12455 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12456 goto out; 12457 } 12458 if (ulp_status) { 12459 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12460 "4533 UVEM error status %x: %x\n", 12461 ulp_status, ulp_word4); 12462 goto out; 12463 } 12464 spin_lock(&phba->hbalock); 12465 /* Set IN USE flag */ 12466 vport->vmid_flag |= LPFC_VMID_IN_USE; 12467 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12468 spin_unlock(&phba->hbalock); 12469 12470 if (vmid_context->instantiated) { 12471 write_lock(&vport->vmid_lock); 12472 vmid->flag |= LPFC_VMID_REGISTERED; 12473 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12474 write_unlock(&vport->vmid_lock); 12475 } 12476 12477 out: 12478 kfree(vmid_context); 12479 lpfc_els_free_iocb(phba, icmdiocb); 12480 lpfc_nlp_put(ndlp); 12481 } 12482