1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 /* See Fibre Channel protocol T11 FC-LS for details */ 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_hw4.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_sli4.h" 36 #include "lpfc_nl.h" 37 #include "lpfc_disc.h" 38 #include "lpfc_scsi.h" 39 #include "lpfc.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_crtn.h" 42 #include "lpfc_vport.h" 43 #include "lpfc_debugfs.h" 44 45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 46 struct lpfc_iocbq *); 47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 51 struct lpfc_nodelist *ndlp, uint8_t retry); 52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 53 struct lpfc_iocbq *iocb); 54 55 static int lpfc_max_els_tries = 3; 56 57 /** 58 * lpfc_els_chk_latt - Check host link attention event for a vport 59 * @vport: pointer to a host virtual N_Port data structure. 60 * 61 * This routine checks whether there is an outstanding host link 62 * attention event during the discovery process with the @vport. It is done 63 * by reading the HBA's Host Attention (HA) register. If there is any host 64 * link attention events during this @vport's discovery process, the @vport 65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 66 * be issued if the link state is not already in host link cleared state, 67 * and a return code shall indicate whether the host link attention event 68 * had happened. 69 * 70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 71 * state in LPFC_VPORT_READY, the request for checking host link attention 72 * event will be ignored and a return code shall indicate no host link 73 * attention event had happened. 74 * 75 * Return codes 76 * 0 - no host link attention event happened 77 * 1 - host link attention event happened 78 **/ 79 int 80 lpfc_els_chk_latt(struct lpfc_vport *vport) 81 { 82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 83 struct lpfc_hba *phba = vport->phba; 84 uint32_t ha_copy; 85 86 if (vport->port_state >= LPFC_VPORT_READY || 87 phba->link_state == LPFC_LINK_DOWN || 88 phba->sli_rev > LPFC_SLI_REV3) 89 return 0; 90 91 /* Read the HBA Host Attention Register */ 92 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 93 return 1; 94 95 if (!(ha_copy & HA_LATT)) 96 return 0; 97 98 /* Pending Link Event during Discovery */ 99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 100 "0237 Pending Link Event during " 101 "Discovery: State x%x\n", 102 phba->pport->port_state); 103 104 /* CLEAR_LA should re-enable link attention events and 105 * we should then immediately take a LATT event. The 106 * LATT processing should call lpfc_linkdown() which 107 * will cleanup any left over in-progress discovery 108 * events. 109 */ 110 spin_lock_irq(shost->host_lock); 111 vport->fc_flag |= FC_ABORT_DISCOVERY; 112 spin_unlock_irq(shost->host_lock); 113 114 if (phba->link_state != LPFC_CLEAR_LA) 115 lpfc_issue_clear_la(phba, vport); 116 117 return 1; 118 } 119 120 /** 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 122 * @vport: pointer to a host virtual N_Port data structure. 123 * @expectRsp: flag indicating whether response is expected. 124 * @cmdSize: size of the ELS command. 125 * @retry: number of retries to the command IOCB when it fails. 126 * @ndlp: pointer to a node-list data structure. 127 * @did: destination identifier. 128 * @elscmd: the ELS command code. 129 * 130 * This routine is used for allocating a lpfc-IOCB data structure from 131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 132 * passed into the routine for discovery state machine to issue an Extended 133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 134 * and preparation routine that is used by all the discovery state machine 135 * routines and the ELS command-specific fields will be later set up by 136 * the individual discovery machine routines after calling this routine 137 * allocating and preparing a generic IOCB data structure. It fills in the 138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 139 * payload and response payload (if expected). The reference count on the 140 * ndlp is incremented by 1 and the reference to the ndlp is put into 141 * context1 of the IOCB data structure for this IOCB to hold the ndlp 142 * reference for the command's callback function to access later. 143 * 144 * Return code 145 * Pointer to the newly allocated/prepared els iocb data structure 146 * NULL - when els iocb data structure allocation/preparation failed 147 **/ 148 struct lpfc_iocbq * 149 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150 uint16_t cmdSize, uint8_t retry, 151 struct lpfc_nodelist *ndlp, uint32_t did, 152 uint32_t elscmd) 153 { 154 struct lpfc_hba *phba = vport->phba; 155 struct lpfc_iocbq *elsiocb; 156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 157 struct ulp_bde64 *bpl; 158 IOCB_t *icmd; 159 160 161 if (!lpfc_is_link_up(phba)) 162 return NULL; 163 164 /* Allocate buffer for command iocb */ 165 elsiocb = lpfc_sli_get_iocbq(phba); 166 167 if (elsiocb == NULL) 168 return NULL; 169 170 /* 171 * If this command is for fabric controller and HBA running 172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 */ 174 if ((did == Fabric_DID) && 175 (phba->hba_flag & HBA_FIP_SUPPORT) && 176 ((elscmd == ELS_CMD_FLOGI) || 177 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_LOGO))) 179 switch (elscmd) { 180 case ELS_CMD_FLOGI: 181 elsiocb->iocb_flag |= 182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 183 & LPFC_FIP_ELS_ID_MASK); 184 break; 185 case ELS_CMD_FDISC: 186 elsiocb->iocb_flag |= 187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 188 & LPFC_FIP_ELS_ID_MASK); 189 break; 190 case ELS_CMD_LOGO: 191 elsiocb->iocb_flag |= 192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 193 & LPFC_FIP_ELS_ID_MASK); 194 break; 195 } 196 else 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 198 199 icmd = &elsiocb->iocb; 200 201 /* fill in BDEs for command */ 202 /* Allocate buffer for command payload */ 203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 204 if (pcmd) 205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 206 if (!pcmd || !pcmd->virt) 207 goto els_iocb_free_pcmb_exit; 208 209 INIT_LIST_HEAD(&pcmd->list); 210 211 /* Allocate buffer for response payload */ 212 if (expectRsp) { 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 214 if (prsp) 215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 216 &prsp->phys); 217 if (!prsp || !prsp->virt) 218 goto els_iocb_free_prsp_exit; 219 INIT_LIST_HEAD(&prsp->list); 220 } else 221 prsp = NULL; 222 223 /* Allocate buffer for Buffer ptr list */ 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 225 if (pbuflist) 226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 227 &pbuflist->phys); 228 if (!pbuflist || !pbuflist->virt) 229 goto els_iocb_free_pbuf_exit; 230 231 INIT_LIST_HEAD(&pbuflist->list); 232 233 if (expectRsp) { 234 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 235 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 236 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 237 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 238 239 icmd->un.elsreq64.remoteID = did; /* DID */ 240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 241 icmd->ulpTimeout = phba->fc_ratov * 2; 242 } else { 243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 245 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 246 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 247 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 248 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 249 } 250 icmd->ulpBdeCount = 1; 251 icmd->ulpLe = 1; 252 icmd->ulpClass = CLASS3; 253 254 /* 255 * If we have NPIV enabled, we want to send ELS traffic by VPI. 256 * For SLI4, since the driver controls VPIs we also want to include 257 * all ELS pt2pt protocol traffic as well. 258 */ 259 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 260 ((phba->sli_rev == LPFC_SLI_REV4) && 261 (vport->fc_flag & FC_PT2PT))) { 262 263 if (expectRsp) { 264 icmd->un.elsreq64.myID = vport->fc_myDID; 265 266 /* For ELS_REQUEST64_CR, use the VPI by default */ 267 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 268 } 269 270 icmd->ulpCt_h = 0; 271 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 272 if (elscmd == ELS_CMD_ECHO) 273 icmd->ulpCt_l = 0; /* context = invalid RPI */ 274 else 275 icmd->ulpCt_l = 1; /* context = VPI */ 276 } 277 278 bpl = (struct ulp_bde64 *) pbuflist->virt; 279 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 280 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 281 bpl->tus.f.bdeSize = cmdSize; 282 bpl->tus.f.bdeFlags = 0; 283 bpl->tus.w = le32_to_cpu(bpl->tus.w); 284 285 if (expectRsp) { 286 bpl++; 287 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 288 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 289 bpl->tus.f.bdeSize = FCELSSIZE; 290 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 291 bpl->tus.w = le32_to_cpu(bpl->tus.w); 292 } 293 294 /* prevent preparing iocb with NULL ndlp reference */ 295 elsiocb->context1 = lpfc_nlp_get(ndlp); 296 if (!elsiocb->context1) 297 goto els_iocb_free_pbuf_exit; 298 elsiocb->context2 = pcmd; 299 elsiocb->context3 = pbuflist; 300 elsiocb->retry = retry; 301 elsiocb->vport = vport; 302 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 303 304 if (prsp) { 305 list_add(&prsp->list, &pcmd->list); 306 } 307 if (expectRsp) { 308 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 310 "0116 Xmit ELS command x%x to remote " 311 "NPORT x%x I/O tag: x%x, port state: x%x\n", 312 elscmd, did, elsiocb->iotag, 313 vport->port_state); 314 } else { 315 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 317 "0117 Xmit ELS response x%x to remote " 318 "NPORT x%x I/O tag: x%x, size: x%x\n", 319 elscmd, ndlp->nlp_DID, elsiocb->iotag, 320 cmdSize); 321 } 322 return elsiocb; 323 324 els_iocb_free_pbuf_exit: 325 if (expectRsp) 326 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 327 kfree(pbuflist); 328 329 els_iocb_free_prsp_exit: 330 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 331 kfree(prsp); 332 333 els_iocb_free_pcmb_exit: 334 kfree(pcmd); 335 lpfc_sli_release_iocbq(phba, elsiocb); 336 return NULL; 337 } 338 339 /** 340 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 341 * @vport: pointer to a host virtual N_Port data structure. 342 * 343 * This routine issues a fabric registration login for a @vport. An 344 * active ndlp node with Fabric_DID must already exist for this @vport. 345 * The routine invokes two mailbox commands to carry out fabric registration 346 * login through the HBA firmware: the first mailbox command requests the 347 * HBA to perform link configuration for the @vport; and the second mailbox 348 * command requests the HBA to perform the actual fabric registration login 349 * with the @vport. 350 * 351 * Return code 352 * 0 - successfully issued fabric registration login for @vport 353 * -ENXIO -- failed to issue fabric registration login for @vport 354 **/ 355 int 356 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 357 { 358 struct lpfc_hba *phba = vport->phba; 359 LPFC_MBOXQ_t *mbox; 360 struct lpfc_dmabuf *mp; 361 struct lpfc_nodelist *ndlp; 362 struct serv_parm *sp; 363 int rc; 364 int err = 0; 365 366 sp = &phba->fc_fabparam; 367 ndlp = lpfc_findnode_did(vport, Fabric_DID); 368 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 369 err = 1; 370 goto fail; 371 } 372 373 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 374 if (!mbox) { 375 err = 2; 376 goto fail; 377 } 378 379 vport->port_state = LPFC_FABRIC_CFG_LINK; 380 lpfc_config_link(phba, mbox); 381 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 382 mbox->vport = vport; 383 384 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 385 if (rc == MBX_NOT_FINISHED) { 386 err = 3; 387 goto fail_free_mbox; 388 } 389 390 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 391 if (!mbox) { 392 err = 4; 393 goto fail; 394 } 395 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 396 ndlp->nlp_rpi); 397 if (rc) { 398 err = 5; 399 goto fail_free_mbox; 400 } 401 402 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 403 mbox->vport = vport; 404 /* increment the reference count on ndlp to hold reference 405 * for the callback routine. 406 */ 407 mbox->context2 = lpfc_nlp_get(ndlp); 408 409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 410 if (rc == MBX_NOT_FINISHED) { 411 err = 6; 412 goto fail_issue_reg_login; 413 } 414 415 return 0; 416 417 fail_issue_reg_login: 418 /* decrement the reference count on ndlp just incremented 419 * for the failed mbox command. 420 */ 421 lpfc_nlp_put(ndlp); 422 mp = (struct lpfc_dmabuf *) mbox->context1; 423 lpfc_mbuf_free(phba, mp->virt, mp->phys); 424 kfree(mp); 425 fail_free_mbox: 426 mempool_free(mbox, phba->mbox_mem_pool); 427 428 fail: 429 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 430 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 431 "0249 Cannot issue Register Fabric login: Err %d\n", err); 432 return -ENXIO; 433 } 434 435 /** 436 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 437 * @vport: pointer to a host virtual N_Port data structure. 438 * 439 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 440 * the @vport. This mailbox command is necessary for SLI4 port only. 441 * 442 * Return code 443 * 0 - successfully issued REG_VFI for @vport 444 * A failure code otherwise. 445 **/ 446 int 447 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 448 { 449 struct lpfc_hba *phba = vport->phba; 450 LPFC_MBOXQ_t *mboxq; 451 struct lpfc_nodelist *ndlp; 452 struct serv_parm *sp; 453 struct lpfc_dmabuf *dmabuf; 454 int rc = 0; 455 456 sp = &phba->fc_fabparam; 457 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 458 if ((phba->sli_rev == LPFC_SLI_REV4) && 459 !(phba->link_flag & LS_LOOPBACK_MODE) && 460 !(vport->fc_flag & FC_PT2PT)) { 461 ndlp = lpfc_findnode_did(vport, Fabric_DID); 462 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 463 rc = -ENODEV; 464 goto fail; 465 } 466 } 467 468 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 469 if (!dmabuf) { 470 rc = -ENOMEM; 471 goto fail; 472 } 473 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 474 if (!dmabuf->virt) { 475 rc = -ENOMEM; 476 goto fail_free_dmabuf; 477 } 478 479 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 480 if (!mboxq) { 481 rc = -ENOMEM; 482 goto fail_free_coherent; 483 } 484 vport->port_state = LPFC_FABRIC_CFG_LINK; 485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 487 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 488 mboxq->vport = vport; 489 mboxq->context1 = dmabuf; 490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 491 if (rc == MBX_NOT_FINISHED) { 492 rc = -ENXIO; 493 goto fail_free_mbox; 494 } 495 return 0; 496 497 fail_free_mbox: 498 mempool_free(mboxq, phba->mbox_mem_pool); 499 fail_free_coherent: 500 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 501 fail_free_dmabuf: 502 kfree(dmabuf); 503 fail: 504 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 505 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 506 "0289 Issue Register VFI failed: Err %d\n", rc); 507 return rc; 508 } 509 510 /** 511 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 512 * @vport: pointer to a host virtual N_Port data structure. 513 * 514 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 515 * the @vport. This mailbox command is necessary for SLI4 port only. 516 * 517 * Return code 518 * 0 - successfully issued REG_VFI for @vport 519 * A failure code otherwise. 520 **/ 521 int 522 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 523 { 524 struct lpfc_hba *phba = vport->phba; 525 struct Scsi_Host *shost; 526 LPFC_MBOXQ_t *mboxq; 527 int rc; 528 529 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 530 if (!mboxq) { 531 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 532 "2556 UNREG_VFI mbox allocation failed" 533 "HBA state x%x\n", phba->pport->port_state); 534 return -ENOMEM; 535 } 536 537 lpfc_unreg_vfi(mboxq, vport); 538 mboxq->vport = vport; 539 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 540 541 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 542 if (rc == MBX_NOT_FINISHED) { 543 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 544 "2557 UNREG_VFI issue mbox failed rc x%x " 545 "HBA state x%x\n", 546 rc, phba->pport->port_state); 547 mempool_free(mboxq, phba->mbox_mem_pool); 548 return -EIO; 549 } 550 551 shost = lpfc_shost_from_vport(vport); 552 spin_lock_irq(shost->host_lock); 553 vport->fc_flag &= ~FC_VFI_REGISTERED; 554 spin_unlock_irq(shost->host_lock); 555 return 0; 556 } 557 558 /** 559 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 560 * @vport: pointer to a host virtual N_Port data structure. 561 * @sp: pointer to service parameter data structure. 562 * 563 * This routine is called from FLOGI/FDISC completion handler functions. 564 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 565 * node nodename is changed in the completion service parameter else return 566 * 0. This function also set flag in the vport data structure to delay 567 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 568 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 569 * node nodename is changed in the completion service parameter. 570 * 571 * Return code 572 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 573 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 574 * 575 **/ 576 static uint8_t 577 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 578 struct serv_parm *sp) 579 { 580 uint8_t fabric_param_changed = 0; 581 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 582 583 if ((vport->fc_prevDID != vport->fc_myDID) || 584 memcmp(&vport->fabric_portname, &sp->portName, 585 sizeof(struct lpfc_name)) || 586 memcmp(&vport->fabric_nodename, &sp->nodeName, 587 sizeof(struct lpfc_name))) 588 fabric_param_changed = 1; 589 590 /* 591 * Word 1 Bit 31 in common service parameter is overloaded. 592 * Word 1 Bit 31 in FLOGI request is multiple NPort request 593 * Word 1 Bit 31 in FLOGI response is clean address bit 594 * 595 * If fabric parameter is changed and clean address bit is 596 * cleared delay nport discovery if 597 * - vport->fc_prevDID != 0 (not initial discovery) OR 598 * - lpfc_delay_discovery module parameter is set. 599 */ 600 if (fabric_param_changed && !sp->cmn.clean_address_bit && 601 (vport->fc_prevDID || lpfc_delay_discovery)) { 602 spin_lock_irq(shost->host_lock); 603 vport->fc_flag |= FC_DISC_DELAYED; 604 spin_unlock_irq(shost->host_lock); 605 } 606 607 return fabric_param_changed; 608 } 609 610 611 /** 612 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 613 * @vport: pointer to a host virtual N_Port data structure. 614 * @ndlp: pointer to a node-list data structure. 615 * @sp: pointer to service parameter data structure. 616 * @irsp: pointer to the IOCB within the lpfc response IOCB. 617 * 618 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 619 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 620 * port in a fabric topology. It properly sets up the parameters to the @ndlp 621 * from the IOCB response. It also check the newly assigned N_Port ID to the 622 * @vport against the previously assigned N_Port ID. If it is different from 623 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 624 * is invoked on all the remaining nodes with the @vport to unregister the 625 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 626 * is invoked to register login to the fabric. 627 * 628 * Return code 629 * 0 - Success (currently, always return 0) 630 **/ 631 static int 632 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 633 struct serv_parm *sp, IOCB_t *irsp) 634 { 635 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 636 struct lpfc_hba *phba = vport->phba; 637 struct lpfc_nodelist *np; 638 struct lpfc_nodelist *next_np; 639 uint8_t fabric_param_changed; 640 641 spin_lock_irq(shost->host_lock); 642 vport->fc_flag |= FC_FABRIC; 643 spin_unlock_irq(shost->host_lock); 644 645 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 646 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 647 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 648 649 phba->fc_edtovResol = sp->cmn.edtovResolution; 650 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 651 652 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 653 spin_lock_irq(shost->host_lock); 654 vport->fc_flag |= FC_PUBLIC_LOOP; 655 spin_unlock_irq(shost->host_lock); 656 } 657 658 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 659 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 660 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 661 ndlp->nlp_class_sup = 0; 662 if (sp->cls1.classValid) 663 ndlp->nlp_class_sup |= FC_COS_CLASS1; 664 if (sp->cls2.classValid) 665 ndlp->nlp_class_sup |= FC_COS_CLASS2; 666 if (sp->cls3.classValid) 667 ndlp->nlp_class_sup |= FC_COS_CLASS3; 668 if (sp->cls4.classValid) 669 ndlp->nlp_class_sup |= FC_COS_CLASS4; 670 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 671 sp->cmn.bbRcvSizeLsb; 672 673 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 674 memcpy(&vport->fabric_portname, &sp->portName, 675 sizeof(struct lpfc_name)); 676 memcpy(&vport->fabric_nodename, &sp->nodeName, 677 sizeof(struct lpfc_name)); 678 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 679 680 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 681 if (sp->cmn.response_multiple_NPort) { 682 lpfc_printf_vlog(vport, KERN_WARNING, 683 LOG_ELS | LOG_VPORT, 684 "1816 FLOGI NPIV supported, " 685 "response data 0x%x\n", 686 sp->cmn.response_multiple_NPort); 687 spin_lock_irq(&phba->hbalock); 688 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 689 spin_unlock_irq(&phba->hbalock); 690 } else { 691 /* Because we asked f/w for NPIV it still expects us 692 to call reg_vnpid atleast for the physcial host */ 693 lpfc_printf_vlog(vport, KERN_WARNING, 694 LOG_ELS | LOG_VPORT, 695 "1817 Fabric does not support NPIV " 696 "- configuring single port mode.\n"); 697 spin_lock_irq(&phba->hbalock); 698 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 699 spin_unlock_irq(&phba->hbalock); 700 } 701 } 702 703 if (fabric_param_changed && 704 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 705 706 /* If our NportID changed, we need to ensure all 707 * remaining NPORTs get unreg_login'ed. 708 */ 709 list_for_each_entry_safe(np, next_np, 710 &vport->fc_nodes, nlp_listp) { 711 if (!NLP_CHK_NODE_ACT(np)) 712 continue; 713 if ((np->nlp_state != NLP_STE_NPR_NODE) || 714 !(np->nlp_flag & NLP_NPR_ADISC)) 715 continue; 716 spin_lock_irq(shost->host_lock); 717 np->nlp_flag &= ~NLP_NPR_ADISC; 718 spin_unlock_irq(shost->host_lock); 719 lpfc_unreg_rpi(vport, np); 720 } 721 lpfc_cleanup_pending_mbox(vport); 722 723 if (phba->sli_rev == LPFC_SLI_REV4) { 724 lpfc_sli4_unreg_all_rpis(vport); 725 lpfc_mbx_unreg_vpi(vport); 726 spin_lock_irq(shost->host_lock); 727 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 728 spin_unlock_irq(shost->host_lock); 729 } 730 731 /* 732 * For SLI3 and SLI4, the VPI needs to be reregistered in 733 * response to this fabric parameter change event. 734 */ 735 spin_lock_irq(shost->host_lock); 736 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 737 spin_unlock_irq(shost->host_lock); 738 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 739 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 740 /* 741 * Driver needs to re-reg VPI in order for f/w 742 * to update the MAC address. 743 */ 744 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 745 lpfc_register_new_vport(phba, vport, ndlp); 746 return 0; 747 } 748 749 if (phba->sli_rev < LPFC_SLI_REV4) { 750 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 751 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 752 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 753 lpfc_register_new_vport(phba, vport, ndlp); 754 else 755 lpfc_issue_fabric_reglogin(vport); 756 } else { 757 ndlp->nlp_type |= NLP_FABRIC; 758 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 759 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 760 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 761 lpfc_start_fdiscs(phba); 762 lpfc_do_scr_ns_plogi(phba, vport); 763 } else if (vport->fc_flag & FC_VFI_REGISTERED) 764 lpfc_issue_init_vpi(vport); 765 else { 766 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 767 "3135 Need register VFI: (x%x/%x)\n", 768 vport->fc_prevDID, vport->fc_myDID); 769 lpfc_issue_reg_vfi(vport); 770 } 771 } 772 return 0; 773 } 774 775 /** 776 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 777 * @vport: pointer to a host virtual N_Port data structure. 778 * @ndlp: pointer to a node-list data structure. 779 * @sp: pointer to service parameter data structure. 780 * 781 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 782 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 783 * in a point-to-point topology. First, the @vport's N_Port Name is compared 784 * with the received N_Port Name: if the @vport's N_Port Name is greater than 785 * the received N_Port Name lexicographically, this node shall assign local 786 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 787 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 788 * this node shall just wait for the remote node to issue PLOGI and assign 789 * N_Port IDs. 790 * 791 * Return code 792 * 0 - Success 793 * -ENXIO - Fail 794 **/ 795 static int 796 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 797 struct serv_parm *sp) 798 { 799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 800 struct lpfc_hba *phba = vport->phba; 801 LPFC_MBOXQ_t *mbox; 802 int rc; 803 804 spin_lock_irq(shost->host_lock); 805 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 806 spin_unlock_irq(shost->host_lock); 807 808 phba->fc_edtov = FF_DEF_EDTOV; 809 phba->fc_ratov = FF_DEF_RATOV; 810 rc = memcmp(&vport->fc_portname, &sp->portName, 811 sizeof(vport->fc_portname)); 812 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 813 814 if (rc >= 0) { 815 /* This side will initiate the PLOGI */ 816 spin_lock_irq(shost->host_lock); 817 vport->fc_flag |= FC_PT2PT_PLOGI; 818 spin_unlock_irq(shost->host_lock); 819 820 /* 821 * N_Port ID cannot be 0, set our to LocalID the other 822 * side will be RemoteID. 823 */ 824 825 /* not equal */ 826 if (rc) 827 vport->fc_myDID = PT2PT_LocalID; 828 829 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 if (!mbox) 831 goto fail; 832 833 lpfc_config_link(phba, mbox); 834 835 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 836 mbox->vport = vport; 837 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 838 if (rc == MBX_NOT_FINISHED) { 839 mempool_free(mbox, phba->mbox_mem_pool); 840 goto fail; 841 } 842 843 /* 844 * For SLI4, the VFI/VPI are registered AFTER the 845 * Nport with the higher WWPN sends the PLOGI with 846 * an assigned NPortId. 847 */ 848 849 /* not equal */ 850 if ((phba->sli_rev == LPFC_SLI_REV4) && rc) 851 lpfc_issue_reg_vfi(vport); 852 853 /* Decrement ndlp reference count indicating that ndlp can be 854 * safely released when other references to it are done. 855 */ 856 lpfc_nlp_put(ndlp); 857 858 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 859 if (!ndlp) { 860 /* 861 * Cannot find existing Fabric ndlp, so allocate a 862 * new one 863 */ 864 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 865 if (!ndlp) 866 goto fail; 867 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 868 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 869 ndlp = lpfc_enable_node(vport, ndlp, 870 NLP_STE_UNUSED_NODE); 871 if(!ndlp) 872 goto fail; 873 } 874 875 memcpy(&ndlp->nlp_portname, &sp->portName, 876 sizeof(struct lpfc_name)); 877 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 878 sizeof(struct lpfc_name)); 879 /* Set state will put ndlp onto node list if not already done */ 880 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 881 spin_lock_irq(shost->host_lock); 882 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 883 spin_unlock_irq(shost->host_lock); 884 } else 885 /* This side will wait for the PLOGI, decrement ndlp reference 886 * count indicating that ndlp can be released when other 887 * references to it are done. 888 */ 889 lpfc_nlp_put(ndlp); 890 891 /* If we are pt2pt with another NPort, force NPIV off! */ 892 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 893 894 spin_lock_irq(shost->host_lock); 895 vport->fc_flag |= FC_PT2PT; 896 spin_unlock_irq(shost->host_lock); 897 898 /* Start discovery - this should just do CLEAR_LA */ 899 lpfc_disc_start(vport); 900 return 0; 901 fail: 902 return -ENXIO; 903 } 904 905 /** 906 * lpfc_cmpl_els_flogi - Completion callback function for flogi 907 * @phba: pointer to lpfc hba data structure. 908 * @cmdiocb: pointer to lpfc command iocb data structure. 909 * @rspiocb: pointer to lpfc response iocb data structure. 910 * 911 * This routine is the top-level completion callback function for issuing 912 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 913 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 914 * retry has been made (either immediately or delayed with lpfc_els_retry() 915 * returning 1), the command IOCB will be released and function returned. 916 * If the retry attempt has been given up (possibly reach the maximum 917 * number of retries), one additional decrement of ndlp reference shall be 918 * invoked before going out after releasing the command IOCB. This will 919 * actually release the remote node (Note, lpfc_els_free_iocb() will also 920 * invoke one decrement of ndlp reference count). If no error reported in 921 * the IOCB status, the command Port ID field is used to determine whether 922 * this is a point-to-point topology or a fabric topology: if the Port ID 923 * field is assigned, it is a fabric topology; otherwise, it is a 924 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 925 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 926 * specific topology completion conditions. 927 **/ 928 static void 929 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 930 struct lpfc_iocbq *rspiocb) 931 { 932 struct lpfc_vport *vport = cmdiocb->vport; 933 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 934 IOCB_t *irsp = &rspiocb->iocb; 935 struct lpfc_nodelist *ndlp = cmdiocb->context1; 936 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 937 struct serv_parm *sp; 938 uint16_t fcf_index; 939 int rc; 940 941 /* Check to see if link went down during discovery */ 942 if (lpfc_els_chk_latt(vport)) { 943 /* One additional decrement on node reference count to 944 * trigger the release of the node 945 */ 946 lpfc_nlp_put(ndlp); 947 goto out; 948 } 949 950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 951 "FLOGI cmpl: status:x%x/x%x state:x%x", 952 irsp->ulpStatus, irsp->un.ulpWord[4], 953 vport->port_state); 954 955 if (irsp->ulpStatus) { 956 /* 957 * In case of FIP mode, perform roundrobin FCF failover 958 * due to new FCF discovery 959 */ 960 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 961 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 962 if (phba->link_state < LPFC_LINK_UP) 963 goto stop_rr_fcf_flogi; 964 if ((phba->fcoe_cvl_eventtag_attn == 965 phba->fcoe_cvl_eventtag) && 966 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 967 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 968 IOERR_SLI_ABORTED)) 969 goto stop_rr_fcf_flogi; 970 else 971 phba->fcoe_cvl_eventtag_attn = 972 phba->fcoe_cvl_eventtag; 973 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 974 "2611 FLOGI failed on FCF (x%x), " 975 "status:x%x/x%x, tmo:x%x, perform " 976 "roundrobin FCF failover\n", 977 phba->fcf.current_rec.fcf_indx, 978 irsp->ulpStatus, irsp->un.ulpWord[4], 979 irsp->ulpTimeout); 980 lpfc_sli4_set_fcf_flogi_fail(phba, 981 phba->fcf.current_rec.fcf_indx); 982 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 983 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 984 if (rc) 985 goto out; 986 } 987 988 stop_rr_fcf_flogi: 989 /* FLOGI failure */ 990 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 991 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", 992 irsp->ulpStatus, irsp->un.ulpWord[4], 993 irsp->ulpTimeout); 994 995 /* Check for retry */ 996 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 997 goto out; 998 999 /* FLOGI failure */ 1000 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1001 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 1002 irsp->ulpStatus, irsp->un.ulpWord[4], 1003 irsp->ulpTimeout); 1004 1005 /* FLOGI failed, so there is no fabric */ 1006 spin_lock_irq(shost->host_lock); 1007 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1008 spin_unlock_irq(shost->host_lock); 1009 1010 /* If private loop, then allow max outstanding els to be 1011 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1012 * alpa map would take too long otherwise. 1013 */ 1014 if (phba->alpa_map[0] == 0) 1015 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1016 if ((phba->sli_rev == LPFC_SLI_REV4) && 1017 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1018 (vport->fc_prevDID != vport->fc_myDID))) { 1019 if (vport->fc_flag & FC_VFI_REGISTERED) 1020 lpfc_sli4_unreg_all_rpis(vport); 1021 lpfc_issue_reg_vfi(vport); 1022 lpfc_nlp_put(ndlp); 1023 goto out; 1024 } 1025 goto flogifail; 1026 } 1027 spin_lock_irq(shost->host_lock); 1028 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1029 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1030 spin_unlock_irq(shost->host_lock); 1031 1032 /* 1033 * The FLogI succeeded. Sync the data for the CPU before 1034 * accessing it. 1035 */ 1036 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1037 1038 sp = prsp->virt + sizeof(uint32_t); 1039 1040 /* FLOGI completes successfully */ 1041 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1042 "0101 FLOGI completes successfully " 1043 "Data: x%x x%x x%x x%x\n", 1044 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1045 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1046 1047 if (vport->port_state == LPFC_FLOGI) { 1048 /* 1049 * If Common Service Parameters indicate Nport 1050 * we are point to point, if Fport we are Fabric. 1051 */ 1052 if (sp->cmn.fPort) 1053 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1054 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1055 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1056 else { 1057 lpfc_printf_vlog(vport, KERN_ERR, 1058 LOG_FIP | LOG_ELS, 1059 "2831 FLOGI response with cleared Fabric " 1060 "bit fcf_index 0x%x " 1061 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1062 "Fabric Name " 1063 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1064 phba->fcf.current_rec.fcf_indx, 1065 phba->fcf.current_rec.switch_name[0], 1066 phba->fcf.current_rec.switch_name[1], 1067 phba->fcf.current_rec.switch_name[2], 1068 phba->fcf.current_rec.switch_name[3], 1069 phba->fcf.current_rec.switch_name[4], 1070 phba->fcf.current_rec.switch_name[5], 1071 phba->fcf.current_rec.switch_name[6], 1072 phba->fcf.current_rec.switch_name[7], 1073 phba->fcf.current_rec.fabric_name[0], 1074 phba->fcf.current_rec.fabric_name[1], 1075 phba->fcf.current_rec.fabric_name[2], 1076 phba->fcf.current_rec.fabric_name[3], 1077 phba->fcf.current_rec.fabric_name[4], 1078 phba->fcf.current_rec.fabric_name[5], 1079 phba->fcf.current_rec.fabric_name[6], 1080 phba->fcf.current_rec.fabric_name[7]); 1081 lpfc_nlp_put(ndlp); 1082 spin_lock_irq(&phba->hbalock); 1083 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1084 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1085 spin_unlock_irq(&phba->hbalock); 1086 goto out; 1087 } 1088 if (!rc) { 1089 /* Mark the FCF discovery process done */ 1090 if (phba->hba_flag & HBA_FIP_SUPPORT) 1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1092 LOG_ELS, 1093 "2769 FLOGI to FCF (x%x) " 1094 "completed successfully\n", 1095 phba->fcf.current_rec.fcf_indx); 1096 spin_lock_irq(&phba->hbalock); 1097 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1098 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1099 spin_unlock_irq(&phba->hbalock); 1100 goto out; 1101 } 1102 } 1103 1104 flogifail: 1105 lpfc_nlp_put(ndlp); 1106 1107 if (!lpfc_error_lost_link(irsp)) { 1108 /* FLOGI failed, so just use loop map to make discovery list */ 1109 lpfc_disc_list_loopmap(vport); 1110 1111 /* Start discovery */ 1112 lpfc_disc_start(vport); 1113 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1114 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1115 IOERR_SLI_ABORTED) && 1116 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1117 IOERR_SLI_DOWN))) && 1118 (phba->link_state != LPFC_CLEAR_LA)) { 1119 /* If FLOGI failed enable link interrupt. */ 1120 lpfc_issue_clear_la(phba, vport); 1121 } 1122 out: 1123 lpfc_els_free_iocb(phba, cmdiocb); 1124 } 1125 1126 /** 1127 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1128 * @vport: pointer to a host virtual N_Port data structure. 1129 * @ndlp: pointer to a node-list data structure. 1130 * @retry: number of retries to the command IOCB. 1131 * 1132 * This routine issues a Fabric Login (FLOGI) Request ELS command 1133 * for a @vport. The initiator service parameters are put into the payload 1134 * of the FLOGI Request IOCB and the top-level callback function pointer 1135 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1136 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1137 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1138 * 1139 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1140 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1141 * will be stored into the context1 field of the IOCB for the completion 1142 * callback function to the FLOGI ELS command. 1143 * 1144 * Return code 1145 * 0 - successfully issued flogi iocb for @vport 1146 * 1 - failed to issue flogi iocb for @vport 1147 **/ 1148 static int 1149 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1150 uint8_t retry) 1151 { 1152 struct lpfc_hba *phba = vport->phba; 1153 struct serv_parm *sp; 1154 IOCB_t *icmd; 1155 struct lpfc_iocbq *elsiocb; 1156 struct lpfc_sli_ring *pring; 1157 uint8_t *pcmd; 1158 uint16_t cmdsize; 1159 uint32_t tmo; 1160 int rc; 1161 1162 pring = &phba->sli.ring[LPFC_ELS_RING]; 1163 1164 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1165 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1166 ndlp->nlp_DID, ELS_CMD_FLOGI); 1167 1168 if (!elsiocb) 1169 return 1; 1170 1171 icmd = &elsiocb->iocb; 1172 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1173 1174 /* For FLOGI request, remainder of payload is service parameters */ 1175 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1176 pcmd += sizeof(uint32_t); 1177 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1178 sp = (struct serv_parm *) pcmd; 1179 1180 /* Setup CSPs accordingly for Fabric */ 1181 sp->cmn.e_d_tov = 0; 1182 sp->cmn.w2.r_a_tov = 0; 1183 sp->cmn.virtual_fabric_support = 0; 1184 sp->cls1.classValid = 0; 1185 if (sp->cmn.fcphLow < FC_PH3) 1186 sp->cmn.fcphLow = FC_PH3; 1187 if (sp->cmn.fcphHigh < FC_PH3) 1188 sp->cmn.fcphHigh = FC_PH3; 1189 1190 if (phba->sli_rev == LPFC_SLI_REV4) { 1191 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1192 LPFC_SLI_INTF_IF_TYPE_0) { 1193 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1194 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1195 /* FLOGI needs to be 3 for WQE FCFI */ 1196 /* Set the fcfi to the fcfi we registered with */ 1197 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1198 } 1199 /* Can't do SLI4 class2 without support sequence coalescing */ 1200 sp->cls2.classValid = 0; 1201 sp->cls2.seqDelivery = 0; 1202 } else { 1203 /* Historical, setting sequential-delivery bit for SLI3 */ 1204 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1205 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1206 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1207 sp->cmn.request_multiple_Nport = 1; 1208 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1209 icmd->ulpCt_h = 1; 1210 icmd->ulpCt_l = 0; 1211 } else 1212 sp->cmn.request_multiple_Nport = 0; 1213 } 1214 1215 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1216 icmd->un.elsreq64.myID = 0; 1217 icmd->un.elsreq64.fl = 1; 1218 } 1219 1220 tmo = phba->fc_ratov; 1221 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1222 lpfc_set_disctmo(vport); 1223 phba->fc_ratov = tmo; 1224 1225 phba->fc_stat.elsXmitFLOGI++; 1226 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1227 1228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1229 "Issue FLOGI: opt:x%x", 1230 phba->sli3_options, 0, 0); 1231 1232 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1233 if (rc == IOCB_ERROR) { 1234 lpfc_els_free_iocb(phba, elsiocb); 1235 return 1; 1236 } 1237 return 0; 1238 } 1239 1240 /** 1241 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1242 * @phba: pointer to lpfc hba data structure. 1243 * 1244 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1245 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1246 * list and issues an abort IOCB commond on each outstanding IOCB that 1247 * contains a active Fabric_DID ndlp. Note that this function is to issue 1248 * the abort IOCB command on all the outstanding IOCBs, thus when this 1249 * function returns, it does not guarantee all the IOCBs are actually aborted. 1250 * 1251 * Return code 1252 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1253 **/ 1254 int 1255 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1256 { 1257 struct lpfc_sli_ring *pring; 1258 struct lpfc_iocbq *iocb, *next_iocb; 1259 struct lpfc_nodelist *ndlp; 1260 IOCB_t *icmd; 1261 1262 /* Abort outstanding I/O on NPort <nlp_DID> */ 1263 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1264 "0201 Abort outstanding I/O on NPort x%x\n", 1265 Fabric_DID); 1266 1267 pring = &phba->sli.ring[LPFC_ELS_RING]; 1268 1269 /* 1270 * Check the txcmplq for an iocb that matches the nport the driver is 1271 * searching for. 1272 */ 1273 spin_lock_irq(&phba->hbalock); 1274 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1275 icmd = &iocb->iocb; 1276 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1277 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1278 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1279 (ndlp->nlp_DID == Fabric_DID)) 1280 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1281 } 1282 } 1283 spin_unlock_irq(&phba->hbalock); 1284 1285 return 0; 1286 } 1287 1288 /** 1289 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1290 * @vport: pointer to a host virtual N_Port data structure. 1291 * 1292 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1293 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1294 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1295 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1296 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1297 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1298 * @vport. 1299 * 1300 * Return code 1301 * 0 - failed to issue initial flogi for @vport 1302 * 1 - successfully issued initial flogi for @vport 1303 **/ 1304 int 1305 lpfc_initial_flogi(struct lpfc_vport *vport) 1306 { 1307 struct lpfc_hba *phba = vport->phba; 1308 struct lpfc_nodelist *ndlp; 1309 1310 vport->port_state = LPFC_FLOGI; 1311 lpfc_set_disctmo(vport); 1312 1313 /* First look for the Fabric ndlp */ 1314 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1315 if (!ndlp) { 1316 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1317 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1318 if (!ndlp) 1319 return 0; 1320 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1321 /* Set the node type */ 1322 ndlp->nlp_type |= NLP_FABRIC; 1323 /* Put ndlp onto node list */ 1324 lpfc_enqueue_node(vport, ndlp); 1325 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1326 /* re-setup ndlp without removing from node list */ 1327 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1328 if (!ndlp) 1329 return 0; 1330 } 1331 1332 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1333 /* This decrement of reference count to node shall kick off 1334 * the release of the node. 1335 */ 1336 lpfc_nlp_put(ndlp); 1337 return 0; 1338 } 1339 return 1; 1340 } 1341 1342 /** 1343 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1344 * @vport: pointer to a host virtual N_Port data structure. 1345 * 1346 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1347 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1348 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1349 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1350 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1351 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1352 * @vport. 1353 * 1354 * Return code 1355 * 0 - failed to issue initial fdisc for @vport 1356 * 1 - successfully issued initial fdisc for @vport 1357 **/ 1358 int 1359 lpfc_initial_fdisc(struct lpfc_vport *vport) 1360 { 1361 struct lpfc_hba *phba = vport->phba; 1362 struct lpfc_nodelist *ndlp; 1363 1364 /* First look for the Fabric ndlp */ 1365 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1366 if (!ndlp) { 1367 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1368 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1369 if (!ndlp) 1370 return 0; 1371 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1372 /* Put ndlp onto node list */ 1373 lpfc_enqueue_node(vport, ndlp); 1374 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1375 /* re-setup ndlp without removing from node list */ 1376 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1377 if (!ndlp) 1378 return 0; 1379 } 1380 1381 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1382 /* decrement node reference count to trigger the release of 1383 * the node. 1384 */ 1385 lpfc_nlp_put(ndlp); 1386 return 0; 1387 } 1388 return 1; 1389 } 1390 1391 /** 1392 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1393 * @vport: pointer to a host virtual N_Port data structure. 1394 * 1395 * This routine checks whether there are more remaining Port Logins 1396 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1397 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1398 * to issue ELS PLOGIs up to the configured discover threads with the 1399 * @vport (@vport->cfg_discovery_threads). The function also decrement 1400 * the @vport's num_disc_node by 1 if it is not already 0. 1401 **/ 1402 void 1403 lpfc_more_plogi(struct lpfc_vport *vport) 1404 { 1405 int sentplogi; 1406 1407 if (vport->num_disc_nodes) 1408 vport->num_disc_nodes--; 1409 1410 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1411 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1412 "0232 Continue discovery with %d PLOGIs to go " 1413 "Data: x%x x%x x%x\n", 1414 vport->num_disc_nodes, vport->fc_plogi_cnt, 1415 vport->fc_flag, vport->port_state); 1416 /* Check to see if there are more PLOGIs to be sent */ 1417 if (vport->fc_flag & FC_NLP_MORE) 1418 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1419 sentplogi = lpfc_els_disc_plogi(vport); 1420 1421 return; 1422 } 1423 1424 /** 1425 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1426 * @phba: pointer to lpfc hba data structure. 1427 * @prsp: pointer to response IOCB payload. 1428 * @ndlp: pointer to a node-list data structure. 1429 * 1430 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1431 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1432 * The following cases are considered N_Port confirmed: 1433 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1434 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1435 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1436 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1437 * 1) if there is a node on vport list other than the @ndlp with the same 1438 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1439 * on that node to release the RPI associated with the node; 2) if there is 1440 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1441 * into, a new node shall be allocated (or activated). In either case, the 1442 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1443 * be released and the new_ndlp shall be put on to the vport node list and 1444 * its pointer returned as the confirmed node. 1445 * 1446 * Note that before the @ndlp got "released", the keepDID from not-matching 1447 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1448 * of the @ndlp. This is because the release of @ndlp is actually to put it 1449 * into an inactive state on the vport node list and the vport node list 1450 * management algorithm does not allow two node with a same DID. 1451 * 1452 * Return code 1453 * pointer to the PLOGI N_Port @ndlp 1454 **/ 1455 static struct lpfc_nodelist * 1456 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1457 struct lpfc_nodelist *ndlp) 1458 { 1459 struct lpfc_vport *vport = ndlp->vport; 1460 struct lpfc_nodelist *new_ndlp; 1461 struct lpfc_rport_data *rdata; 1462 struct fc_rport *rport; 1463 struct serv_parm *sp; 1464 uint8_t name[sizeof(struct lpfc_name)]; 1465 uint32_t rc, keepDID = 0; 1466 int put_node; 1467 int put_rport; 1468 struct lpfc_node_rrqs rrq; 1469 1470 /* Fabric nodes can have the same WWPN so we don't bother searching 1471 * by WWPN. Just return the ndlp that was given to us. 1472 */ 1473 if (ndlp->nlp_type & NLP_FABRIC) 1474 return ndlp; 1475 1476 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1477 memset(name, 0, sizeof(struct lpfc_name)); 1478 1479 /* Now we find out if the NPort we are logging into, matches the WWPN 1480 * we have for that ndlp. If not, we have some work to do. 1481 */ 1482 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1483 1484 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1485 return ndlp; 1486 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1487 1488 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1489 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", 1490 ndlp, ndlp->nlp_DID, new_ndlp); 1491 1492 if (!new_ndlp) { 1493 rc = memcmp(&ndlp->nlp_portname, name, 1494 sizeof(struct lpfc_name)); 1495 if (!rc) 1496 return ndlp; 1497 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 1498 if (!new_ndlp) 1499 return ndlp; 1500 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 1501 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1502 rc = memcmp(&ndlp->nlp_portname, name, 1503 sizeof(struct lpfc_name)); 1504 if (!rc) 1505 return ndlp; 1506 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1507 NLP_STE_UNUSED_NODE); 1508 if (!new_ndlp) 1509 return ndlp; 1510 keepDID = new_ndlp->nlp_DID; 1511 if (phba->sli_rev == LPFC_SLI_REV4) 1512 memcpy(&rrq.xri_bitmap, 1513 &new_ndlp->active_rrqs.xri_bitmap, 1514 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1515 } else { 1516 keepDID = new_ndlp->nlp_DID; 1517 if (phba->sli_rev == LPFC_SLI_REV4) 1518 memcpy(&rrq.xri_bitmap, 1519 &new_ndlp->active_rrqs.xri_bitmap, 1520 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1521 } 1522 1523 lpfc_unreg_rpi(vport, new_ndlp); 1524 new_ndlp->nlp_DID = ndlp->nlp_DID; 1525 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1526 if (phba->sli_rev == LPFC_SLI_REV4) 1527 memcpy(new_ndlp->active_rrqs.xri_bitmap, 1528 &ndlp->active_rrqs.xri_bitmap, 1529 sizeof(ndlp->active_rrqs.xri_bitmap)); 1530 1531 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1532 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1533 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1534 1535 /* Set state will put new_ndlp on to node list if not already done */ 1536 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1537 1538 /* Move this back to NPR state */ 1539 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1540 /* The new_ndlp is replacing ndlp totally, so we need 1541 * to put ndlp on UNUSED list and try to free it. 1542 */ 1543 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1544 "3179 PLOGI confirm NEW: %x %x\n", 1545 new_ndlp->nlp_DID, keepDID); 1546 1547 /* Fix up the rport accordingly */ 1548 rport = ndlp->rport; 1549 if (rport) { 1550 rdata = rport->dd_data; 1551 if (rdata->pnode == ndlp) { 1552 lpfc_nlp_put(ndlp); 1553 ndlp->rport = NULL; 1554 rdata->pnode = lpfc_nlp_get(new_ndlp); 1555 new_ndlp->rport = rport; 1556 } 1557 new_ndlp->nlp_type = ndlp->nlp_type; 1558 } 1559 /* We shall actually free the ndlp with both nlp_DID and 1560 * nlp_portname fields equals 0 to avoid any ndlp on the 1561 * nodelist never to be used. 1562 */ 1563 if (ndlp->nlp_DID == 0) { 1564 spin_lock_irq(&phba->ndlp_lock); 1565 NLP_SET_FREE_REQ(ndlp); 1566 spin_unlock_irq(&phba->ndlp_lock); 1567 } 1568 1569 /* Two ndlps cannot have the same did on the nodelist */ 1570 ndlp->nlp_DID = keepDID; 1571 if (phba->sli_rev == LPFC_SLI_REV4) 1572 memcpy(&ndlp->active_rrqs.xri_bitmap, 1573 &rrq.xri_bitmap, 1574 sizeof(ndlp->active_rrqs.xri_bitmap)); 1575 lpfc_drop_node(vport, ndlp); 1576 } 1577 else { 1578 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1579 "3180 PLOGI confirm SWAP: %x %x\n", 1580 new_ndlp->nlp_DID, keepDID); 1581 1582 lpfc_unreg_rpi(vport, ndlp); 1583 1584 /* Two ndlps cannot have the same did */ 1585 ndlp->nlp_DID = keepDID; 1586 if (phba->sli_rev == LPFC_SLI_REV4) 1587 memcpy(&ndlp->active_rrqs.xri_bitmap, 1588 &rrq.xri_bitmap, 1589 sizeof(ndlp->active_rrqs.xri_bitmap)); 1590 1591 /* Since we are swapping the ndlp passed in with the new one 1592 * and the did has already been swapped, copy over state. 1593 * The new WWNs are already in new_ndlp since thats what 1594 * we looked it up by in the begining of this routine. 1595 */ 1596 new_ndlp->nlp_state = ndlp->nlp_state; 1597 1598 /* Since we are switching over to the new_ndlp, the old 1599 * ndlp should be put in the NPR state, unless we have 1600 * already started re-discovery on it. 1601 */ 1602 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1603 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1604 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1605 1606 /* Fix up the rport accordingly */ 1607 rport = ndlp->rport; 1608 if (rport) { 1609 rdata = rport->dd_data; 1610 put_node = rdata->pnode != NULL; 1611 put_rport = ndlp->rport != NULL; 1612 rdata->pnode = NULL; 1613 ndlp->rport = NULL; 1614 if (put_node) 1615 lpfc_nlp_put(ndlp); 1616 if (put_rport) 1617 put_device(&rport->dev); 1618 } 1619 } 1620 return new_ndlp; 1621 } 1622 1623 /** 1624 * lpfc_end_rscn - Check and handle more rscn for a vport 1625 * @vport: pointer to a host virtual N_Port data structure. 1626 * 1627 * This routine checks whether more Registration State Change 1628 * Notifications (RSCNs) came in while the discovery state machine was in 1629 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1630 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1631 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1632 * handling the RSCNs. 1633 **/ 1634 void 1635 lpfc_end_rscn(struct lpfc_vport *vport) 1636 { 1637 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1638 1639 if (vport->fc_flag & FC_RSCN_MODE) { 1640 /* 1641 * Check to see if more RSCNs came in while we were 1642 * processing this one. 1643 */ 1644 if (vport->fc_rscn_id_cnt || 1645 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1646 lpfc_els_handle_rscn(vport); 1647 else { 1648 spin_lock_irq(shost->host_lock); 1649 vport->fc_flag &= ~FC_RSCN_MODE; 1650 spin_unlock_irq(shost->host_lock); 1651 } 1652 } 1653 } 1654 1655 /** 1656 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1657 * @phba: pointer to lpfc hba data structure. 1658 * @cmdiocb: pointer to lpfc command iocb data structure. 1659 * @rspiocb: pointer to lpfc response iocb data structure. 1660 * 1661 * This routine will call the clear rrq function to free the rrq and 1662 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1663 * exist then the clear_rrq is still called because the rrq needs to 1664 * be freed. 1665 **/ 1666 1667 static void 1668 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1669 struct lpfc_iocbq *rspiocb) 1670 { 1671 struct lpfc_vport *vport = cmdiocb->vport; 1672 IOCB_t *irsp; 1673 struct lpfc_nodelist *ndlp; 1674 struct lpfc_node_rrq *rrq; 1675 1676 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1677 rrq = cmdiocb->context_un.rrq; 1678 cmdiocb->context_un.rsp_iocb = rspiocb; 1679 1680 irsp = &rspiocb->iocb; 1681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1682 "RRQ cmpl: status:x%x/x%x did:x%x", 1683 irsp->ulpStatus, irsp->un.ulpWord[4], 1684 irsp->un.elsreq64.remoteID); 1685 1686 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1687 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1688 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1689 "2882 RRQ completes to NPort x%x " 1690 "with no ndlp. Data: x%x x%x x%x\n", 1691 irsp->un.elsreq64.remoteID, 1692 irsp->ulpStatus, irsp->un.ulpWord[4], 1693 irsp->ulpIoTag); 1694 goto out; 1695 } 1696 1697 /* rrq completes to NPort <nlp_DID> */ 1698 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1699 "2880 RRQ completes to NPort x%x " 1700 "Data: x%x x%x x%x x%x x%x\n", 1701 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1702 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1703 1704 if (irsp->ulpStatus) { 1705 /* Check for retry */ 1706 /* RRQ failed Don't print the vport to vport rjts */ 1707 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1708 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1709 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1710 (phba)->pport->cfg_log_verbose & LOG_ELS) 1711 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1712 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1713 ndlp->nlp_DID, irsp->ulpStatus, 1714 irsp->un.ulpWord[4]); 1715 } 1716 out: 1717 if (rrq) 1718 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1719 lpfc_els_free_iocb(phba, cmdiocb); 1720 return; 1721 } 1722 /** 1723 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1724 * @phba: pointer to lpfc hba data structure. 1725 * @cmdiocb: pointer to lpfc command iocb data structure. 1726 * @rspiocb: pointer to lpfc response iocb data structure. 1727 * 1728 * This routine is the completion callback function for issuing the Port 1729 * Login (PLOGI) command. For PLOGI completion, there must be an active 1730 * ndlp on the vport node list that matches the remote node ID from the 1731 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1732 * ignored and command IOCB released. The PLOGI response IOCB status is 1733 * checked for error conditons. If there is error status reported, PLOGI 1734 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1735 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1736 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1737 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1738 * there are additional N_Port nodes with the vport that need to perform 1739 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1740 * PLOGIs. 1741 **/ 1742 static void 1743 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1744 struct lpfc_iocbq *rspiocb) 1745 { 1746 struct lpfc_vport *vport = cmdiocb->vport; 1747 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1748 IOCB_t *irsp; 1749 struct lpfc_nodelist *ndlp; 1750 struct lpfc_dmabuf *prsp; 1751 int disc, rc, did, type; 1752 1753 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1754 cmdiocb->context_un.rsp_iocb = rspiocb; 1755 1756 irsp = &rspiocb->iocb; 1757 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1758 "PLOGI cmpl: status:x%x/x%x did:x%x", 1759 irsp->ulpStatus, irsp->un.ulpWord[4], 1760 irsp->un.elsreq64.remoteID); 1761 1762 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1763 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1764 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1765 "0136 PLOGI completes to NPort x%x " 1766 "with no ndlp. Data: x%x x%x x%x\n", 1767 irsp->un.elsreq64.remoteID, 1768 irsp->ulpStatus, irsp->un.ulpWord[4], 1769 irsp->ulpIoTag); 1770 goto out; 1771 } 1772 1773 /* Since ndlp can be freed in the disc state machine, note if this node 1774 * is being used during discovery. 1775 */ 1776 spin_lock_irq(shost->host_lock); 1777 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1778 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1779 spin_unlock_irq(shost->host_lock); 1780 rc = 0; 1781 1782 /* PLOGI completes to NPort <nlp_DID> */ 1783 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1784 "0102 PLOGI completes to NPort x%x " 1785 "Data: x%x x%x x%x x%x x%x\n", 1786 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1787 irsp->ulpTimeout, disc, vport->num_disc_nodes); 1788 /* Check to see if link went down during discovery */ 1789 if (lpfc_els_chk_latt(vport)) { 1790 spin_lock_irq(shost->host_lock); 1791 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1792 spin_unlock_irq(shost->host_lock); 1793 goto out; 1794 } 1795 1796 /* ndlp could be freed in DSM, save these values now */ 1797 type = ndlp->nlp_type; 1798 did = ndlp->nlp_DID; 1799 1800 if (irsp->ulpStatus) { 1801 /* Check for retry */ 1802 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1803 /* ELS command is being retried */ 1804 if (disc) { 1805 spin_lock_irq(shost->host_lock); 1806 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1807 spin_unlock_irq(shost->host_lock); 1808 } 1809 goto out; 1810 } 1811 /* PLOGI failed Don't print the vport to vport rjts */ 1812 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1813 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1814 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1815 (phba)->pport->cfg_log_verbose & LOG_ELS) 1816 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1817 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1818 ndlp->nlp_DID, irsp->ulpStatus, 1819 irsp->un.ulpWord[4]); 1820 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1821 if (lpfc_error_lost_link(irsp)) 1822 rc = NLP_STE_FREED_NODE; 1823 else 1824 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1825 NLP_EVT_CMPL_PLOGI); 1826 } else { 1827 /* Good status, call state machine */ 1828 prsp = list_entry(((struct lpfc_dmabuf *) 1829 cmdiocb->context2)->list.next, 1830 struct lpfc_dmabuf, list); 1831 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 1832 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1833 NLP_EVT_CMPL_PLOGI); 1834 } 1835 1836 if (disc && vport->num_disc_nodes) { 1837 /* Check to see if there are more PLOGIs to be sent */ 1838 lpfc_more_plogi(vport); 1839 1840 if (vport->num_disc_nodes == 0) { 1841 spin_lock_irq(shost->host_lock); 1842 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1843 spin_unlock_irq(shost->host_lock); 1844 1845 lpfc_can_disctmo(vport); 1846 lpfc_end_rscn(vport); 1847 } 1848 } 1849 1850 out: 1851 lpfc_els_free_iocb(phba, cmdiocb); 1852 return; 1853 } 1854 1855 /** 1856 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 1857 * @vport: pointer to a host virtual N_Port data structure. 1858 * @did: destination port identifier. 1859 * @retry: number of retries to the command IOCB. 1860 * 1861 * This routine issues a Port Login (PLOGI) command to a remote N_Port 1862 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 1863 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 1864 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 1865 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 1866 * 1867 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1868 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1869 * will be stored into the context1 field of the IOCB for the completion 1870 * callback function to the PLOGI ELS command. 1871 * 1872 * Return code 1873 * 0 - Successfully issued a plogi for @vport 1874 * 1 - failed to issue a plogi for @vport 1875 **/ 1876 int 1877 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1878 { 1879 struct lpfc_hba *phba = vport->phba; 1880 struct serv_parm *sp; 1881 IOCB_t *icmd; 1882 struct lpfc_nodelist *ndlp; 1883 struct lpfc_iocbq *elsiocb; 1884 struct lpfc_sli *psli; 1885 uint8_t *pcmd; 1886 uint16_t cmdsize; 1887 int ret; 1888 1889 psli = &phba->sli; 1890 1891 ndlp = lpfc_findnode_did(vport, did); 1892 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1893 ndlp = NULL; 1894 1895 /* If ndlp is not NULL, we will bump the reference count on it */ 1896 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1897 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1898 ELS_CMD_PLOGI); 1899 if (!elsiocb) 1900 return 1; 1901 1902 icmd = &elsiocb->iocb; 1903 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1904 1905 /* For PLOGI request, remainder of payload is service parameters */ 1906 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 1907 pcmd += sizeof(uint32_t); 1908 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1909 sp = (struct serv_parm *) pcmd; 1910 1911 /* 1912 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 1913 * to device on remote loops work. 1914 */ 1915 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 1916 sp->cmn.altBbCredit = 1; 1917 1918 if (sp->cmn.fcphLow < FC_PH_4_3) 1919 sp->cmn.fcphLow = FC_PH_4_3; 1920 1921 if (sp->cmn.fcphHigh < FC_PH3) 1922 sp->cmn.fcphHigh = FC_PH3; 1923 1924 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1925 "Issue PLOGI: did:x%x", 1926 did, 0, 0); 1927 1928 phba->fc_stat.elsXmitPLOGI++; 1929 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1930 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 1931 1932 if (ret == IOCB_ERROR) { 1933 lpfc_els_free_iocb(phba, elsiocb); 1934 return 1; 1935 } 1936 return 0; 1937 } 1938 1939 /** 1940 * lpfc_cmpl_els_prli - Completion callback function for prli 1941 * @phba: pointer to lpfc hba data structure. 1942 * @cmdiocb: pointer to lpfc command iocb data structure. 1943 * @rspiocb: pointer to lpfc response iocb data structure. 1944 * 1945 * This routine is the completion callback function for a Process Login 1946 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 1947 * status. If there is error status reported, PRLI retry shall be attempted 1948 * by invoking the lpfc_els_retry() routine. Otherwise, the state 1949 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 1950 * ndlp to mark the PRLI completion. 1951 **/ 1952 static void 1953 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1954 struct lpfc_iocbq *rspiocb) 1955 { 1956 struct lpfc_vport *vport = cmdiocb->vport; 1957 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1958 IOCB_t *irsp; 1959 struct lpfc_sli *psli; 1960 struct lpfc_nodelist *ndlp; 1961 1962 psli = &phba->sli; 1963 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1964 cmdiocb->context_un.rsp_iocb = rspiocb; 1965 1966 irsp = &(rspiocb->iocb); 1967 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1968 spin_lock_irq(shost->host_lock); 1969 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1970 spin_unlock_irq(shost->host_lock); 1971 1972 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1973 "PRLI cmpl: status:x%x/x%x did:x%x", 1974 irsp->ulpStatus, irsp->un.ulpWord[4], 1975 ndlp->nlp_DID); 1976 /* PRLI completes to NPort <nlp_DID> */ 1977 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1978 "0103 PRLI completes to NPort x%x " 1979 "Data: x%x x%x x%x x%x\n", 1980 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1981 irsp->ulpTimeout, vport->num_disc_nodes); 1982 1983 vport->fc_prli_sent--; 1984 /* Check to see if link went down during discovery */ 1985 if (lpfc_els_chk_latt(vport)) 1986 goto out; 1987 1988 if (irsp->ulpStatus) { 1989 /* Check for retry */ 1990 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1991 /* ELS command is being retried */ 1992 goto out; 1993 } 1994 /* PRLI failed */ 1995 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1996 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 1997 ndlp->nlp_DID, irsp->ulpStatus, 1998 irsp->un.ulpWord[4]); 1999 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2000 if (lpfc_error_lost_link(irsp)) 2001 goto out; 2002 else 2003 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2004 NLP_EVT_CMPL_PRLI); 2005 } else 2006 /* Good status, call state machine */ 2007 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2008 NLP_EVT_CMPL_PRLI); 2009 out: 2010 lpfc_els_free_iocb(phba, cmdiocb); 2011 return; 2012 } 2013 2014 /** 2015 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2016 * @vport: pointer to a host virtual N_Port data structure. 2017 * @ndlp: pointer to a node-list data structure. 2018 * @retry: number of retries to the command IOCB. 2019 * 2020 * This routine issues a Process Login (PRLI) ELS command for the 2021 * @vport. The PRLI service parameters are set up in the payload of the 2022 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2023 * is put to the IOCB completion callback func field before invoking the 2024 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2025 * 2026 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2027 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2028 * will be stored into the context1 field of the IOCB for the completion 2029 * callback function to the PRLI ELS command. 2030 * 2031 * Return code 2032 * 0 - successfully issued prli iocb command for @vport 2033 * 1 - failed to issue prli iocb command for @vport 2034 **/ 2035 int 2036 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2037 uint8_t retry) 2038 { 2039 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2040 struct lpfc_hba *phba = vport->phba; 2041 PRLI *npr; 2042 IOCB_t *icmd; 2043 struct lpfc_iocbq *elsiocb; 2044 uint8_t *pcmd; 2045 uint16_t cmdsize; 2046 2047 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2048 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2049 ndlp->nlp_DID, ELS_CMD_PRLI); 2050 if (!elsiocb) 2051 return 1; 2052 2053 icmd = &elsiocb->iocb; 2054 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2055 2056 /* For PRLI request, remainder of payload is service parameters */ 2057 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); 2058 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 2059 pcmd += sizeof(uint32_t); 2060 2061 /* For PRLI, remainder of payload is PRLI parameter page */ 2062 npr = (PRLI *) pcmd; 2063 /* 2064 * If our firmware version is 3.20 or later, 2065 * set the following bits for FC-TAPE support. 2066 */ 2067 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2068 npr->ConfmComplAllowed = 1; 2069 npr->Retry = 1; 2070 npr->TaskRetryIdReq = 1; 2071 } 2072 npr->estabImagePair = 1; 2073 npr->readXferRdyDis = 1; 2074 2075 /* For FCP support */ 2076 npr->prliType = PRLI_FCP_TYPE; 2077 npr->initiatorFunc = 1; 2078 2079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2080 "Issue PRLI: did:x%x", 2081 ndlp->nlp_DID, 0, 0); 2082 2083 phba->fc_stat.elsXmitPRLI++; 2084 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2085 spin_lock_irq(shost->host_lock); 2086 ndlp->nlp_flag |= NLP_PRLI_SND; 2087 spin_unlock_irq(shost->host_lock); 2088 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2089 IOCB_ERROR) { 2090 spin_lock_irq(shost->host_lock); 2091 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2092 spin_unlock_irq(shost->host_lock); 2093 lpfc_els_free_iocb(phba, elsiocb); 2094 return 1; 2095 } 2096 vport->fc_prli_sent++; 2097 return 0; 2098 } 2099 2100 /** 2101 * lpfc_rscn_disc - Perform rscn discovery for a vport 2102 * @vport: pointer to a host virtual N_Port data structure. 2103 * 2104 * This routine performs Registration State Change Notification (RSCN) 2105 * discovery for a @vport. If the @vport's node port recovery count is not 2106 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2107 * the nodes that need recovery. If none of the PLOGI were needed through 2108 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2109 * invoked to check and handle possible more RSCN came in during the period 2110 * of processing the current ones. 2111 **/ 2112 static void 2113 lpfc_rscn_disc(struct lpfc_vport *vport) 2114 { 2115 lpfc_can_disctmo(vport); 2116 2117 /* RSCN discovery */ 2118 /* go thru NPR nodes and issue ELS PLOGIs */ 2119 if (vport->fc_npr_cnt) 2120 if (lpfc_els_disc_plogi(vport)) 2121 return; 2122 2123 lpfc_end_rscn(vport); 2124 } 2125 2126 /** 2127 * lpfc_adisc_done - Complete the adisc phase of discovery 2128 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2129 * 2130 * This function is called when the final ADISC is completed during discovery. 2131 * This function handles clearing link attention or issuing reg_vpi depending 2132 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2133 * discovery. 2134 * This function is called with no locks held. 2135 **/ 2136 static void 2137 lpfc_adisc_done(struct lpfc_vport *vport) 2138 { 2139 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2140 struct lpfc_hba *phba = vport->phba; 2141 2142 /* 2143 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2144 * and continue discovery. 2145 */ 2146 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2147 !(vport->fc_flag & FC_RSCN_MODE) && 2148 (phba->sli_rev < LPFC_SLI_REV4)) { 2149 lpfc_issue_reg_vpi(phba, vport); 2150 return; 2151 } 2152 /* 2153 * For SLI2, we need to set port_state to READY 2154 * and continue discovery. 2155 */ 2156 if (vport->port_state < LPFC_VPORT_READY) { 2157 /* If we get here, there is nothing to ADISC */ 2158 if (vport->port_type == LPFC_PHYSICAL_PORT) 2159 lpfc_issue_clear_la(phba, vport); 2160 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2161 vport->num_disc_nodes = 0; 2162 /* go thru NPR list, issue ELS PLOGIs */ 2163 if (vport->fc_npr_cnt) 2164 lpfc_els_disc_plogi(vport); 2165 if (!vport->num_disc_nodes) { 2166 spin_lock_irq(shost->host_lock); 2167 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2168 spin_unlock_irq(shost->host_lock); 2169 lpfc_can_disctmo(vport); 2170 lpfc_end_rscn(vport); 2171 } 2172 } 2173 vport->port_state = LPFC_VPORT_READY; 2174 } else 2175 lpfc_rscn_disc(vport); 2176 } 2177 2178 /** 2179 * lpfc_more_adisc - Issue more adisc as needed 2180 * @vport: pointer to a host virtual N_Port data structure. 2181 * 2182 * This routine determines whether there are more ndlps on a @vport 2183 * node list need to have Address Discover (ADISC) issued. If so, it will 2184 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2185 * remaining nodes which need to have ADISC sent. 2186 **/ 2187 void 2188 lpfc_more_adisc(struct lpfc_vport *vport) 2189 { 2190 int sentadisc; 2191 2192 if (vport->num_disc_nodes) 2193 vport->num_disc_nodes--; 2194 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2195 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2196 "0210 Continue discovery with %d ADISCs to go " 2197 "Data: x%x x%x x%x\n", 2198 vport->num_disc_nodes, vport->fc_adisc_cnt, 2199 vport->fc_flag, vport->port_state); 2200 /* Check to see if there are more ADISCs to be sent */ 2201 if (vport->fc_flag & FC_NLP_MORE) { 2202 lpfc_set_disctmo(vport); 2203 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2204 sentadisc = lpfc_els_disc_adisc(vport); 2205 } 2206 if (!vport->num_disc_nodes) 2207 lpfc_adisc_done(vport); 2208 return; 2209 } 2210 2211 /** 2212 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2213 * @phba: pointer to lpfc hba data structure. 2214 * @cmdiocb: pointer to lpfc command iocb data structure. 2215 * @rspiocb: pointer to lpfc response iocb data structure. 2216 * 2217 * This routine is the completion function for issuing the Address Discover 2218 * (ADISC) command. It first checks to see whether link went down during 2219 * the discovery process. If so, the node will be marked as node port 2220 * recovery for issuing discover IOCB by the link attention handler and 2221 * exit. Otherwise, the response status is checked. If error was reported 2222 * in the response status, the ADISC command shall be retried by invoking 2223 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2224 * the response status, the state machine is invoked to set transition 2225 * with respect to NLP_EVT_CMPL_ADISC event. 2226 **/ 2227 static void 2228 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2229 struct lpfc_iocbq *rspiocb) 2230 { 2231 struct lpfc_vport *vport = cmdiocb->vport; 2232 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2233 IOCB_t *irsp; 2234 struct lpfc_nodelist *ndlp; 2235 int disc; 2236 2237 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2238 cmdiocb->context_un.rsp_iocb = rspiocb; 2239 2240 irsp = &(rspiocb->iocb); 2241 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2242 2243 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2244 "ADISC cmpl: status:x%x/x%x did:x%x", 2245 irsp->ulpStatus, irsp->un.ulpWord[4], 2246 ndlp->nlp_DID); 2247 2248 /* Since ndlp can be freed in the disc state machine, note if this node 2249 * is being used during discovery. 2250 */ 2251 spin_lock_irq(shost->host_lock); 2252 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2253 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2254 spin_unlock_irq(shost->host_lock); 2255 /* ADISC completes to NPort <nlp_DID> */ 2256 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2257 "0104 ADISC completes to NPort x%x " 2258 "Data: x%x x%x x%x x%x x%x\n", 2259 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2260 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2261 /* Check to see if link went down during discovery */ 2262 if (lpfc_els_chk_latt(vport)) { 2263 spin_lock_irq(shost->host_lock); 2264 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2265 spin_unlock_irq(shost->host_lock); 2266 goto out; 2267 } 2268 2269 if (irsp->ulpStatus) { 2270 /* Check for retry */ 2271 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2272 /* ELS command is being retried */ 2273 if (disc) { 2274 spin_lock_irq(shost->host_lock); 2275 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2276 spin_unlock_irq(shost->host_lock); 2277 lpfc_set_disctmo(vport); 2278 } 2279 goto out; 2280 } 2281 /* ADISC failed */ 2282 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2283 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2284 ndlp->nlp_DID, irsp->ulpStatus, 2285 irsp->un.ulpWord[4]); 2286 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2287 if (!lpfc_error_lost_link(irsp)) 2288 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2289 NLP_EVT_CMPL_ADISC); 2290 } else 2291 /* Good status, call state machine */ 2292 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2293 NLP_EVT_CMPL_ADISC); 2294 2295 /* Check to see if there are more ADISCs to be sent */ 2296 if (disc && vport->num_disc_nodes) 2297 lpfc_more_adisc(vport); 2298 out: 2299 lpfc_els_free_iocb(phba, cmdiocb); 2300 return; 2301 } 2302 2303 /** 2304 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2305 * @vport: pointer to a virtual N_Port data structure. 2306 * @ndlp: pointer to a node-list data structure. 2307 * @retry: number of retries to the command IOCB. 2308 * 2309 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2310 * @vport. It prepares the payload of the ADISC ELS command, updates the 2311 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2312 * to issue the ADISC ELS command. 2313 * 2314 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2315 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2316 * will be stored into the context1 field of the IOCB for the completion 2317 * callback function to the ADISC ELS command. 2318 * 2319 * Return code 2320 * 0 - successfully issued adisc 2321 * 1 - failed to issue adisc 2322 **/ 2323 int 2324 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2325 uint8_t retry) 2326 { 2327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2328 struct lpfc_hba *phba = vport->phba; 2329 ADISC *ap; 2330 IOCB_t *icmd; 2331 struct lpfc_iocbq *elsiocb; 2332 uint8_t *pcmd; 2333 uint16_t cmdsize; 2334 2335 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2336 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2337 ndlp->nlp_DID, ELS_CMD_ADISC); 2338 if (!elsiocb) 2339 return 1; 2340 2341 icmd = &elsiocb->iocb; 2342 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2343 2344 /* For ADISC request, remainder of payload is service parameters */ 2345 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2346 pcmd += sizeof(uint32_t); 2347 2348 /* Fill in ADISC payload */ 2349 ap = (ADISC *) pcmd; 2350 ap->hardAL_PA = phba->fc_pref_ALPA; 2351 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2352 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2353 ap->DID = be32_to_cpu(vport->fc_myDID); 2354 2355 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2356 "Issue ADISC: did:x%x", 2357 ndlp->nlp_DID, 0, 0); 2358 2359 phba->fc_stat.elsXmitADISC++; 2360 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2361 spin_lock_irq(shost->host_lock); 2362 ndlp->nlp_flag |= NLP_ADISC_SND; 2363 spin_unlock_irq(shost->host_lock); 2364 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2365 IOCB_ERROR) { 2366 spin_lock_irq(shost->host_lock); 2367 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2368 spin_unlock_irq(shost->host_lock); 2369 lpfc_els_free_iocb(phba, elsiocb); 2370 return 1; 2371 } 2372 return 0; 2373 } 2374 2375 /** 2376 * lpfc_cmpl_els_logo - Completion callback function for logo 2377 * @phba: pointer to lpfc hba data structure. 2378 * @cmdiocb: pointer to lpfc command iocb data structure. 2379 * @rspiocb: pointer to lpfc response iocb data structure. 2380 * 2381 * This routine is the completion function for issuing the ELS Logout (LOGO) 2382 * command. If no error status was reported from the LOGO response, the 2383 * state machine of the associated ndlp shall be invoked for transition with 2384 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2385 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2386 **/ 2387 static void 2388 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2389 struct lpfc_iocbq *rspiocb) 2390 { 2391 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2392 struct lpfc_vport *vport = ndlp->vport; 2393 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2394 IOCB_t *irsp; 2395 struct lpfc_sli *psli; 2396 struct lpfcMboxq *mbox; 2397 unsigned long flags; 2398 uint32_t skip_recovery = 0; 2399 2400 psli = &phba->sli; 2401 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2402 cmdiocb->context_un.rsp_iocb = rspiocb; 2403 2404 irsp = &(rspiocb->iocb); 2405 spin_lock_irq(shost->host_lock); 2406 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2407 spin_unlock_irq(shost->host_lock); 2408 2409 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2410 "LOGO cmpl: status:x%x/x%x did:x%x", 2411 irsp->ulpStatus, irsp->un.ulpWord[4], 2412 ndlp->nlp_DID); 2413 2414 /* LOGO completes to NPort <nlp_DID> */ 2415 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2416 "0105 LOGO completes to NPort x%x " 2417 "Data: x%x x%x x%x x%x\n", 2418 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2419 irsp->ulpTimeout, vport->num_disc_nodes); 2420 2421 if (lpfc_els_chk_latt(vport)) { 2422 skip_recovery = 1; 2423 goto out; 2424 } 2425 2426 /* Check to see if link went down during discovery */ 2427 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2428 /* NLP_EVT_DEVICE_RM should unregister the RPI 2429 * which should abort all outstanding IOs. 2430 */ 2431 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2432 NLP_EVT_DEVICE_RM); 2433 skip_recovery = 1; 2434 goto out; 2435 } 2436 2437 if (irsp->ulpStatus) { 2438 /* Check for retry */ 2439 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2440 /* ELS command is being retried */ 2441 skip_recovery = 1; 2442 goto out; 2443 } 2444 /* LOGO failed */ 2445 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2446 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2447 ndlp->nlp_DID, irsp->ulpStatus, 2448 irsp->un.ulpWord[4]); 2449 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2450 if (lpfc_error_lost_link(irsp)) { 2451 skip_recovery = 1; 2452 goto out; 2453 } 2454 } 2455 2456 /* Call state machine. This will unregister the rpi if needed. */ 2457 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2458 2459 out: 2460 lpfc_els_free_iocb(phba, cmdiocb); 2461 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2462 if ((vport->fc_flag & FC_PT2PT) && 2463 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2464 phba->pport->fc_myDID = 0; 2465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2466 if (mbox) { 2467 lpfc_config_link(phba, mbox); 2468 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2469 mbox->vport = vport; 2470 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2471 MBX_NOT_FINISHED) { 2472 mempool_free(mbox, phba->mbox_mem_pool); 2473 skip_recovery = 1; 2474 } 2475 } 2476 } 2477 2478 /* 2479 * If the node is a target, the handling attempts to recover the port. 2480 * For any other port type, the rpi is unregistered as an implicit 2481 * LOGO. 2482 */ 2483 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { 2484 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2485 spin_lock_irqsave(shost->host_lock, flags); 2486 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2487 spin_unlock_irqrestore(shost->host_lock, flags); 2488 2489 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2490 "3187 LOGO completes to NPort x%x: Start " 2491 "Recovery Data: x%x x%x x%x x%x\n", 2492 ndlp->nlp_DID, irsp->ulpStatus, 2493 irsp->un.ulpWord[4], irsp->ulpTimeout, 2494 vport->num_disc_nodes); 2495 lpfc_disc_start(vport); 2496 } 2497 return; 2498 } 2499 2500 /** 2501 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2502 * @vport: pointer to a virtual N_Port data structure. 2503 * @ndlp: pointer to a node-list data structure. 2504 * @retry: number of retries to the command IOCB. 2505 * 2506 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2507 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2508 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2509 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2510 * 2511 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2512 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2513 * will be stored into the context1 field of the IOCB for the completion 2514 * callback function to the LOGO ELS command. 2515 * 2516 * Return code 2517 * 0 - successfully issued logo 2518 * 1 - failed to issue logo 2519 **/ 2520 int 2521 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2522 uint8_t retry) 2523 { 2524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2525 struct lpfc_hba *phba = vport->phba; 2526 IOCB_t *icmd; 2527 struct lpfc_iocbq *elsiocb; 2528 uint8_t *pcmd; 2529 uint16_t cmdsize; 2530 int rc; 2531 2532 spin_lock_irq(shost->host_lock); 2533 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2534 spin_unlock_irq(shost->host_lock); 2535 return 0; 2536 } 2537 spin_unlock_irq(shost->host_lock); 2538 2539 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2540 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2541 ndlp->nlp_DID, ELS_CMD_LOGO); 2542 if (!elsiocb) 2543 return 1; 2544 2545 icmd = &elsiocb->iocb; 2546 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2547 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2548 pcmd += sizeof(uint32_t); 2549 2550 /* Fill in LOGO payload */ 2551 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2552 pcmd += sizeof(uint32_t); 2553 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2554 2555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2556 "Issue LOGO: did:x%x", 2557 ndlp->nlp_DID, 0, 0); 2558 2559 /* 2560 * If we are issuing a LOGO, we may try to recover the remote NPort 2561 * by issuing a PLOGI later. Even though we issue ELS cmds by the 2562 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while 2563 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI 2564 * for that ELS cmd. To avoid this situation, lets get rid of the 2565 * RPI right now, before any ELS cmds are sent. 2566 */ 2567 spin_lock_irq(shost->host_lock); 2568 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 2569 spin_unlock_irq(shost->host_lock); 2570 if (lpfc_unreg_rpi(vport, ndlp)) { 2571 lpfc_els_free_iocb(phba, elsiocb); 2572 return 0; 2573 } 2574 2575 phba->fc_stat.elsXmitLOGO++; 2576 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2577 spin_lock_irq(shost->host_lock); 2578 ndlp->nlp_flag |= NLP_LOGO_SND; 2579 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2580 spin_unlock_irq(shost->host_lock); 2581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2582 2583 if (rc == IOCB_ERROR) { 2584 spin_lock_irq(shost->host_lock); 2585 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2586 spin_unlock_irq(shost->host_lock); 2587 lpfc_els_free_iocb(phba, elsiocb); 2588 return 1; 2589 } 2590 return 0; 2591 } 2592 2593 /** 2594 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2595 * @phba: pointer to lpfc hba data structure. 2596 * @cmdiocb: pointer to lpfc command iocb data structure. 2597 * @rspiocb: pointer to lpfc response iocb data structure. 2598 * 2599 * This routine is a generic completion callback function for ELS commands. 2600 * Specifically, it is the callback function which does not need to perform 2601 * any command specific operations. It is currently used by the ELS command 2602 * issuing routines for the ELS State Change Request (SCR), 2603 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2604 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2605 * certain debug loggings, this callback function simply invokes the 2606 * lpfc_els_chk_latt() routine to check whether link went down during the 2607 * discovery process. 2608 **/ 2609 static void 2610 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2611 struct lpfc_iocbq *rspiocb) 2612 { 2613 struct lpfc_vport *vport = cmdiocb->vport; 2614 IOCB_t *irsp; 2615 2616 irsp = &rspiocb->iocb; 2617 2618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2619 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2620 irsp->ulpStatus, irsp->un.ulpWord[4], 2621 irsp->un.elsreq64.remoteID); 2622 /* ELS cmd tag <ulpIoTag> completes */ 2623 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2624 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2625 irsp->ulpIoTag, irsp->ulpStatus, 2626 irsp->un.ulpWord[4], irsp->ulpTimeout); 2627 /* Check to see if link went down during discovery */ 2628 lpfc_els_chk_latt(vport); 2629 lpfc_els_free_iocb(phba, cmdiocb); 2630 return; 2631 } 2632 2633 /** 2634 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2635 * @vport: pointer to a host virtual N_Port data structure. 2636 * @nportid: N_Port identifier to the remote node. 2637 * @retry: number of retries to the command IOCB. 2638 * 2639 * This routine issues a State Change Request (SCR) to a fabric node 2640 * on a @vport. The remote node @nportid is passed into the function. It 2641 * first search the @vport node list to find the matching ndlp. If no such 2642 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 2643 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 2644 * routine is invoked to send the SCR IOCB. 2645 * 2646 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2647 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2648 * will be stored into the context1 field of the IOCB for the completion 2649 * callback function to the SCR ELS command. 2650 * 2651 * Return code 2652 * 0 - Successfully issued scr command 2653 * 1 - Failed to issue scr command 2654 **/ 2655 int 2656 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2657 { 2658 struct lpfc_hba *phba = vport->phba; 2659 IOCB_t *icmd; 2660 struct lpfc_iocbq *elsiocb; 2661 struct lpfc_sli *psli; 2662 uint8_t *pcmd; 2663 uint16_t cmdsize; 2664 struct lpfc_nodelist *ndlp; 2665 2666 psli = &phba->sli; 2667 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2668 2669 ndlp = lpfc_findnode_did(vport, nportid); 2670 if (!ndlp) { 2671 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2672 if (!ndlp) 2673 return 1; 2674 lpfc_nlp_init(vport, ndlp, nportid); 2675 lpfc_enqueue_node(vport, ndlp); 2676 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2677 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2678 if (!ndlp) 2679 return 1; 2680 } 2681 2682 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2683 ndlp->nlp_DID, ELS_CMD_SCR); 2684 2685 if (!elsiocb) { 2686 /* This will trigger the release of the node just 2687 * allocated 2688 */ 2689 lpfc_nlp_put(ndlp); 2690 return 1; 2691 } 2692 2693 icmd = &elsiocb->iocb; 2694 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2695 2696 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 2697 pcmd += sizeof(uint32_t); 2698 2699 /* For SCR, remainder of payload is SCR parameter page */ 2700 memset(pcmd, 0, sizeof(SCR)); 2701 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 2702 2703 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2704 "Issue SCR: did:x%x", 2705 ndlp->nlp_DID, 0, 0); 2706 2707 phba->fc_stat.elsXmitSCR++; 2708 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2709 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2710 IOCB_ERROR) { 2711 /* The additional lpfc_nlp_put will cause the following 2712 * lpfc_els_free_iocb routine to trigger the rlease of 2713 * the node. 2714 */ 2715 lpfc_nlp_put(ndlp); 2716 lpfc_els_free_iocb(phba, elsiocb); 2717 return 1; 2718 } 2719 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2720 * trigger the release of node. 2721 */ 2722 lpfc_nlp_put(ndlp); 2723 return 0; 2724 } 2725 2726 /** 2727 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 2728 * @vport: pointer to a host virtual N_Port data structure. 2729 * @nportid: N_Port identifier to the remote node. 2730 * @retry: number of retries to the command IOCB. 2731 * 2732 * This routine issues a Fibre Channel Address Resolution Response 2733 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 2734 * is passed into the function. It first search the @vport node list to find 2735 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 2736 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 2737 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 2738 * 2739 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2740 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2741 * will be stored into the context1 field of the IOCB for the completion 2742 * callback function to the PARPR ELS command. 2743 * 2744 * Return code 2745 * 0 - Successfully issued farpr command 2746 * 1 - Failed to issue farpr command 2747 **/ 2748 static int 2749 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2750 { 2751 struct lpfc_hba *phba = vport->phba; 2752 IOCB_t *icmd; 2753 struct lpfc_iocbq *elsiocb; 2754 struct lpfc_sli *psli; 2755 FARP *fp; 2756 uint8_t *pcmd; 2757 uint32_t *lp; 2758 uint16_t cmdsize; 2759 struct lpfc_nodelist *ondlp; 2760 struct lpfc_nodelist *ndlp; 2761 2762 psli = &phba->sli; 2763 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2764 2765 ndlp = lpfc_findnode_did(vport, nportid); 2766 if (!ndlp) { 2767 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2768 if (!ndlp) 2769 return 1; 2770 lpfc_nlp_init(vport, ndlp, nportid); 2771 lpfc_enqueue_node(vport, ndlp); 2772 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2773 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2774 if (!ndlp) 2775 return 1; 2776 } 2777 2778 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2779 ndlp->nlp_DID, ELS_CMD_RNID); 2780 if (!elsiocb) { 2781 /* This will trigger the release of the node just 2782 * allocated 2783 */ 2784 lpfc_nlp_put(ndlp); 2785 return 1; 2786 } 2787 2788 icmd = &elsiocb->iocb; 2789 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2790 2791 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 2792 pcmd += sizeof(uint32_t); 2793 2794 /* Fill in FARPR payload */ 2795 fp = (FARP *) (pcmd); 2796 memset(fp, 0, sizeof(FARP)); 2797 lp = (uint32_t *) pcmd; 2798 *lp++ = be32_to_cpu(nportid); 2799 *lp++ = be32_to_cpu(vport->fc_myDID); 2800 fp->Rflags = 0; 2801 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 2802 2803 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 2804 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2805 ondlp = lpfc_findnode_did(vport, nportid); 2806 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 2807 memcpy(&fp->OportName, &ondlp->nlp_portname, 2808 sizeof(struct lpfc_name)); 2809 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 2810 sizeof(struct lpfc_name)); 2811 } 2812 2813 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2814 "Issue FARPR: did:x%x", 2815 ndlp->nlp_DID, 0, 0); 2816 2817 phba->fc_stat.elsXmitFARPR++; 2818 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2819 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2820 IOCB_ERROR) { 2821 /* The additional lpfc_nlp_put will cause the following 2822 * lpfc_els_free_iocb routine to trigger the release of 2823 * the node. 2824 */ 2825 lpfc_nlp_put(ndlp); 2826 lpfc_els_free_iocb(phba, elsiocb); 2827 return 1; 2828 } 2829 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2830 * trigger the release of the node. 2831 */ 2832 lpfc_nlp_put(ndlp); 2833 return 0; 2834 } 2835 2836 /** 2837 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 2838 * @vport: pointer to a host virtual N_Port data structure. 2839 * @nlp: pointer to a node-list data structure. 2840 * 2841 * This routine cancels the timer with a delayed IOCB-command retry for 2842 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 2843 * removes the ELS retry event if it presents. In addition, if the 2844 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 2845 * commands are sent for the @vport's nodes that require issuing discovery 2846 * ADISC. 2847 **/ 2848 void 2849 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 2850 { 2851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2852 struct lpfc_work_evt *evtp; 2853 2854 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 2855 return; 2856 spin_lock_irq(shost->host_lock); 2857 nlp->nlp_flag &= ~NLP_DELAY_TMO; 2858 spin_unlock_irq(shost->host_lock); 2859 del_timer_sync(&nlp->nlp_delayfunc); 2860 nlp->nlp_last_elscmd = 0; 2861 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 2862 list_del_init(&nlp->els_retry_evt.evt_listp); 2863 /* Decrement nlp reference count held for the delayed retry */ 2864 evtp = &nlp->els_retry_evt; 2865 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 2866 } 2867 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 2868 spin_lock_irq(shost->host_lock); 2869 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2870 spin_unlock_irq(shost->host_lock); 2871 if (vport->num_disc_nodes) { 2872 if (vport->port_state < LPFC_VPORT_READY) { 2873 /* Check if there are more ADISCs to be sent */ 2874 lpfc_more_adisc(vport); 2875 } else { 2876 /* Check if there are more PLOGIs to be sent */ 2877 lpfc_more_plogi(vport); 2878 if (vport->num_disc_nodes == 0) { 2879 spin_lock_irq(shost->host_lock); 2880 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2881 spin_unlock_irq(shost->host_lock); 2882 lpfc_can_disctmo(vport); 2883 lpfc_end_rscn(vport); 2884 } 2885 } 2886 } 2887 } 2888 return; 2889 } 2890 2891 /** 2892 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 2893 * @ptr: holder for the pointer to the timer function associated data (ndlp). 2894 * 2895 * This routine is invoked by the ndlp delayed-function timer to check 2896 * whether there is any pending ELS retry event(s) with the node. If not, it 2897 * simply returns. Otherwise, if there is at least one ELS delayed event, it 2898 * adds the delayed events to the HBA work list and invokes the 2899 * lpfc_worker_wake_up() routine to wake up worker thread to process the 2900 * event. Note that lpfc_nlp_get() is called before posting the event to 2901 * the work list to hold reference count of ndlp so that it guarantees the 2902 * reference to ndlp will still be available when the worker thread gets 2903 * to the event associated with the ndlp. 2904 **/ 2905 void 2906 lpfc_els_retry_delay(unsigned long ptr) 2907 { 2908 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 2909 struct lpfc_vport *vport = ndlp->vport; 2910 struct lpfc_hba *phba = vport->phba; 2911 unsigned long flags; 2912 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 2913 2914 spin_lock_irqsave(&phba->hbalock, flags); 2915 if (!list_empty(&evtp->evt_listp)) { 2916 spin_unlock_irqrestore(&phba->hbalock, flags); 2917 return; 2918 } 2919 2920 /* We need to hold the node by incrementing the reference 2921 * count until the queued work is done 2922 */ 2923 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 2924 if (evtp->evt_arg1) { 2925 evtp->evt = LPFC_EVT_ELS_RETRY; 2926 list_add_tail(&evtp->evt_listp, &phba->work_list); 2927 lpfc_worker_wake_up(phba); 2928 } 2929 spin_unlock_irqrestore(&phba->hbalock, flags); 2930 return; 2931 } 2932 2933 /** 2934 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 2935 * @ndlp: pointer to a node-list data structure. 2936 * 2937 * This routine is the worker-thread handler for processing the @ndlp delayed 2938 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 2939 * the last ELS command from the associated ndlp and invokes the proper ELS 2940 * function according to the delayed ELS command to retry the command. 2941 **/ 2942 void 2943 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 2944 { 2945 struct lpfc_vport *vport = ndlp->vport; 2946 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2947 uint32_t cmd, did, retry; 2948 2949 spin_lock_irq(shost->host_lock); 2950 did = ndlp->nlp_DID; 2951 cmd = ndlp->nlp_last_elscmd; 2952 ndlp->nlp_last_elscmd = 0; 2953 2954 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2955 spin_unlock_irq(shost->host_lock); 2956 return; 2957 } 2958 2959 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2960 spin_unlock_irq(shost->host_lock); 2961 /* 2962 * If a discovery event readded nlp_delayfunc after timer 2963 * firing and before processing the timer, cancel the 2964 * nlp_delayfunc. 2965 */ 2966 del_timer_sync(&ndlp->nlp_delayfunc); 2967 retry = ndlp->nlp_retry; 2968 ndlp->nlp_retry = 0; 2969 2970 switch (cmd) { 2971 case ELS_CMD_FLOGI: 2972 lpfc_issue_els_flogi(vport, ndlp, retry); 2973 break; 2974 case ELS_CMD_PLOGI: 2975 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 2976 ndlp->nlp_prev_state = ndlp->nlp_state; 2977 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2978 } 2979 break; 2980 case ELS_CMD_ADISC: 2981 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 2982 ndlp->nlp_prev_state = ndlp->nlp_state; 2983 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2984 } 2985 break; 2986 case ELS_CMD_PRLI: 2987 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 2988 ndlp->nlp_prev_state = ndlp->nlp_state; 2989 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2990 } 2991 break; 2992 case ELS_CMD_LOGO: 2993 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2994 ndlp->nlp_prev_state = ndlp->nlp_state; 2995 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 2996 } 2997 break; 2998 case ELS_CMD_FDISC: 2999 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3000 lpfc_issue_els_fdisc(vport, ndlp, retry); 3001 break; 3002 } 3003 return; 3004 } 3005 3006 /** 3007 * lpfc_els_retry - Make retry decision on an els command iocb 3008 * @phba: pointer to lpfc hba data structure. 3009 * @cmdiocb: pointer to lpfc command iocb data structure. 3010 * @rspiocb: pointer to lpfc response iocb data structure. 3011 * 3012 * This routine makes a retry decision on an ELS command IOCB, which has 3013 * failed. The following ELS IOCBs use this function for retrying the command 3014 * when previously issued command responsed with error status: FLOGI, PLOGI, 3015 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 3016 * returned error status, it makes the decision whether a retry shall be 3017 * issued for the command, and whether a retry shall be made immediately or 3018 * delayed. In the former case, the corresponding ELS command issuing-function 3019 * is called to retry the command. In the later case, the ELS command shall 3020 * be posted to the ndlp delayed event and delayed function timer set to the 3021 * ndlp for the delayed command issusing. 3022 * 3023 * Return code 3024 * 0 - No retry of els command is made 3025 * 1 - Immediate or delayed retry of els command is made 3026 **/ 3027 static int 3028 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3029 struct lpfc_iocbq *rspiocb) 3030 { 3031 struct lpfc_vport *vport = cmdiocb->vport; 3032 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3033 IOCB_t *irsp = &rspiocb->iocb; 3034 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3035 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3036 uint32_t *elscmd; 3037 struct ls_rjt stat; 3038 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3039 int logerr = 0; 3040 uint32_t cmd = 0; 3041 uint32_t did; 3042 3043 3044 /* Note: context2 may be 0 for internal driver abort 3045 * of delays ELS command. 3046 */ 3047 3048 if (pcmd && pcmd->virt) { 3049 elscmd = (uint32_t *) (pcmd->virt); 3050 cmd = *elscmd++; 3051 } 3052 3053 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 3054 did = ndlp->nlp_DID; 3055 else { 3056 /* We should only hit this case for retrying PLOGI */ 3057 did = irsp->un.elsreq64.remoteID; 3058 ndlp = lpfc_findnode_did(vport, did); 3059 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 3060 && (cmd != ELS_CMD_PLOGI)) 3061 return 1; 3062 } 3063 3064 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3065 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3066 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 3067 3068 switch (irsp->ulpStatus) { 3069 case IOSTAT_FCP_RSP_ERROR: 3070 break; 3071 case IOSTAT_REMOTE_STOP: 3072 if (phba->sli_rev == LPFC_SLI_REV4) { 3073 /* This IO was aborted by the target, we don't 3074 * know the rxid and because we did not send the 3075 * ABTS we cannot generate and RRQ. 3076 */ 3077 lpfc_set_rrq_active(phba, ndlp, 3078 cmdiocb->sli4_lxritag, 0, 0); 3079 } 3080 break; 3081 case IOSTAT_LOCAL_REJECT: 3082 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3083 case IOERR_LOOP_OPEN_FAILURE: 3084 if (cmd == ELS_CMD_FLOGI) { 3085 if (PCI_DEVICE_ID_HORNET == 3086 phba->pcidev->device) { 3087 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3088 phba->pport->fc_myDID = 0; 3089 phba->alpa_map[0] = 0; 3090 phba->alpa_map[1] = 0; 3091 } 3092 } 3093 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3094 delay = 1000; 3095 retry = 1; 3096 break; 3097 3098 case IOERR_ILLEGAL_COMMAND: 3099 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3100 "0124 Retry illegal cmd x%x " 3101 "retry:x%x delay:x%x\n", 3102 cmd, cmdiocb->retry, delay); 3103 retry = 1; 3104 /* All command's retry policy */ 3105 maxretry = 8; 3106 if (cmdiocb->retry > 2) 3107 delay = 1000; 3108 break; 3109 3110 case IOERR_NO_RESOURCES: 3111 logerr = 1; /* HBA out of resources */ 3112 retry = 1; 3113 if (cmdiocb->retry > 100) 3114 delay = 100; 3115 maxretry = 250; 3116 break; 3117 3118 case IOERR_ILLEGAL_FRAME: 3119 delay = 100; 3120 retry = 1; 3121 break; 3122 3123 case IOERR_SEQUENCE_TIMEOUT: 3124 case IOERR_INVALID_RPI: 3125 retry = 1; 3126 break; 3127 } 3128 break; 3129 3130 case IOSTAT_NPORT_RJT: 3131 case IOSTAT_FABRIC_RJT: 3132 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3133 retry = 1; 3134 break; 3135 } 3136 break; 3137 3138 case IOSTAT_NPORT_BSY: 3139 case IOSTAT_FABRIC_BSY: 3140 logerr = 1; /* Fabric / Remote NPort out of resources */ 3141 retry = 1; 3142 break; 3143 3144 case IOSTAT_LS_RJT: 3145 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3146 /* Added for Vendor specifc support 3147 * Just keep retrying for these Rsn / Exp codes 3148 */ 3149 switch (stat.un.b.lsRjtRsnCode) { 3150 case LSRJT_UNABLE_TPC: 3151 if (stat.un.b.lsRjtRsnCodeExp == 3152 LSEXP_CMD_IN_PROGRESS) { 3153 if (cmd == ELS_CMD_PLOGI) { 3154 delay = 1000; 3155 maxretry = 48; 3156 } 3157 retry = 1; 3158 break; 3159 } 3160 if (stat.un.b.lsRjtRsnCodeExp == 3161 LSEXP_CANT_GIVE_DATA) { 3162 if (cmd == ELS_CMD_PLOGI) { 3163 delay = 1000; 3164 maxretry = 48; 3165 } 3166 retry = 1; 3167 break; 3168 } 3169 if ((cmd == ELS_CMD_PLOGI) || 3170 (cmd == ELS_CMD_PRLI)) { 3171 delay = 1000; 3172 maxretry = lpfc_max_els_tries + 1; 3173 retry = 1; 3174 break; 3175 } 3176 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3177 (cmd == ELS_CMD_FDISC) && 3178 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3179 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3180 "0125 FDISC Failed (x%x). " 3181 "Fabric out of resources\n", 3182 stat.un.lsRjtError); 3183 lpfc_vport_set_state(vport, 3184 FC_VPORT_NO_FABRIC_RSCS); 3185 } 3186 break; 3187 3188 case LSRJT_LOGICAL_BSY: 3189 if ((cmd == ELS_CMD_PLOGI) || 3190 (cmd == ELS_CMD_PRLI)) { 3191 delay = 1000; 3192 maxretry = 48; 3193 } else if (cmd == ELS_CMD_FDISC) { 3194 /* FDISC retry policy */ 3195 maxretry = 48; 3196 if (cmdiocb->retry >= 32) 3197 delay = 1000; 3198 } 3199 retry = 1; 3200 break; 3201 3202 case LSRJT_LOGICAL_ERR: 3203 /* There are some cases where switches return this 3204 * error when they are not ready and should be returning 3205 * Logical Busy. We should delay every time. 3206 */ 3207 if (cmd == ELS_CMD_FDISC && 3208 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3209 maxretry = 3; 3210 delay = 1000; 3211 retry = 1; 3212 break; 3213 } 3214 case LSRJT_PROTOCOL_ERR: 3215 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3216 (cmd == ELS_CMD_FDISC) && 3217 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3218 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3219 ) { 3220 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3221 "0122 FDISC Failed (x%x). " 3222 "Fabric Detected Bad WWN\n", 3223 stat.un.lsRjtError); 3224 lpfc_vport_set_state(vport, 3225 FC_VPORT_FABRIC_REJ_WWN); 3226 } 3227 break; 3228 } 3229 break; 3230 3231 case IOSTAT_INTERMED_RSP: 3232 case IOSTAT_BA_RJT: 3233 break; 3234 3235 default: 3236 break; 3237 } 3238 3239 if (did == FDMI_DID) 3240 retry = 1; 3241 3242 if ((cmd == ELS_CMD_FLOGI) && 3243 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3244 !lpfc_error_lost_link(irsp)) { 3245 /* FLOGI retry policy */ 3246 retry = 1; 3247 /* retry FLOGI forever */ 3248 maxretry = 0; 3249 if (cmdiocb->retry >= 100) 3250 delay = 5000; 3251 else if (cmdiocb->retry >= 32) 3252 delay = 1000; 3253 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 3254 /* retry FDISCs every second up to devloss */ 3255 retry = 1; 3256 maxretry = vport->cfg_devloss_tmo; 3257 delay = 1000; 3258 } 3259 3260 cmdiocb->retry++; 3261 if (maxretry && (cmdiocb->retry >= maxretry)) { 3262 phba->fc_stat.elsRetryExceeded++; 3263 retry = 0; 3264 } 3265 3266 if ((vport->load_flag & FC_UNLOADING) != 0) 3267 retry = 0; 3268 3269 if (retry) { 3270 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3271 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3272 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3274 "2849 Stop retry ELS command " 3275 "x%x to remote NPORT x%x, " 3276 "Data: x%x x%x\n", cmd, did, 3277 cmdiocb->retry, delay); 3278 return 0; 3279 } 3280 } 3281 3282 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3283 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3284 "0107 Retry ELS command x%x to remote " 3285 "NPORT x%x Data: x%x x%x\n", 3286 cmd, did, cmdiocb->retry, delay); 3287 3288 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3289 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3290 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 3291 IOERR_NO_RESOURCES))) { 3292 /* Don't reset timer for no resources */ 3293 3294 /* If discovery / RSCN timer is running, reset it */ 3295 if (timer_pending(&vport->fc_disctmo) || 3296 (vport->fc_flag & FC_RSCN_MODE)) 3297 lpfc_set_disctmo(vport); 3298 } 3299 3300 phba->fc_stat.elsXmitRetry++; 3301 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3302 phba->fc_stat.elsDelayRetry++; 3303 ndlp->nlp_retry = cmdiocb->retry; 3304 3305 /* delay is specified in milliseconds */ 3306 mod_timer(&ndlp->nlp_delayfunc, 3307 jiffies + msecs_to_jiffies(delay)); 3308 spin_lock_irq(shost->host_lock); 3309 ndlp->nlp_flag |= NLP_DELAY_TMO; 3310 spin_unlock_irq(shost->host_lock); 3311 3312 ndlp->nlp_prev_state = ndlp->nlp_state; 3313 if (cmd == ELS_CMD_PRLI) 3314 lpfc_nlp_set_state(vport, ndlp, 3315 NLP_STE_PRLI_ISSUE); 3316 else 3317 lpfc_nlp_set_state(vport, ndlp, 3318 NLP_STE_NPR_NODE); 3319 ndlp->nlp_last_elscmd = cmd; 3320 3321 return 1; 3322 } 3323 switch (cmd) { 3324 case ELS_CMD_FLOGI: 3325 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3326 return 1; 3327 case ELS_CMD_FDISC: 3328 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3329 return 1; 3330 case ELS_CMD_PLOGI: 3331 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3332 ndlp->nlp_prev_state = ndlp->nlp_state; 3333 lpfc_nlp_set_state(vport, ndlp, 3334 NLP_STE_PLOGI_ISSUE); 3335 } 3336 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3337 return 1; 3338 case ELS_CMD_ADISC: 3339 ndlp->nlp_prev_state = ndlp->nlp_state; 3340 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3341 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3342 return 1; 3343 case ELS_CMD_PRLI: 3344 ndlp->nlp_prev_state = ndlp->nlp_state; 3345 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3346 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3347 return 1; 3348 case ELS_CMD_LOGO: 3349 ndlp->nlp_prev_state = ndlp->nlp_state; 3350 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3351 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3352 return 1; 3353 } 3354 } 3355 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3356 if (logerr) { 3357 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3358 "0137 No retry ELS command x%x to remote " 3359 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3360 cmd, did, irsp->ulpStatus, 3361 irsp->un.ulpWord[4]); 3362 } 3363 else { 3364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3365 "0108 No retry ELS command x%x to remote " 3366 "NPORT x%x Retried:%d Error:x%x/%x\n", 3367 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3368 irsp->un.ulpWord[4]); 3369 } 3370 return 0; 3371 } 3372 3373 /** 3374 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3375 * @phba: pointer to lpfc hba data structure. 3376 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3377 * 3378 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3379 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3380 * checks to see whether there is a lpfc DMA buffer associated with the 3381 * response of the command IOCB. If so, it will be released before releasing 3382 * the lpfc DMA buffer associated with the IOCB itself. 3383 * 3384 * Return code 3385 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3386 **/ 3387 static int 3388 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3389 { 3390 struct lpfc_dmabuf *buf_ptr; 3391 3392 /* Free the response before processing the command. */ 3393 if (!list_empty(&buf_ptr1->list)) { 3394 list_remove_head(&buf_ptr1->list, buf_ptr, 3395 struct lpfc_dmabuf, 3396 list); 3397 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3398 kfree(buf_ptr); 3399 } 3400 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3401 kfree(buf_ptr1); 3402 return 0; 3403 } 3404 3405 /** 3406 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3407 * @phba: pointer to lpfc hba data structure. 3408 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3409 * 3410 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3411 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3412 * pool. 3413 * 3414 * Return code 3415 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3416 **/ 3417 static int 3418 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3419 { 3420 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3421 kfree(buf_ptr); 3422 return 0; 3423 } 3424 3425 /** 3426 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3427 * @phba: pointer to lpfc hba data structure. 3428 * @elsiocb: pointer to lpfc els command iocb data structure. 3429 * 3430 * This routine frees a command IOCB and its associated resources. The 3431 * command IOCB data structure contains the reference to various associated 3432 * resources, these fields must be set to NULL if the associated reference 3433 * not present: 3434 * context1 - reference to ndlp 3435 * context2 - reference to cmd 3436 * context2->next - reference to rsp 3437 * context3 - reference to bpl 3438 * 3439 * It first properly decrements the reference count held on ndlp for the 3440 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3441 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3442 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3443 * adds the DMA buffer the @phba data structure for the delayed release. 3444 * If reference to the Buffer Pointer List (BPL) is present, the 3445 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3446 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3447 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3448 * 3449 * Return code 3450 * 0 - Success (currently, always return 0) 3451 **/ 3452 int 3453 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3454 { 3455 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3456 struct lpfc_nodelist *ndlp; 3457 3458 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3459 if (ndlp) { 3460 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3461 lpfc_nlp_put(ndlp); 3462 3463 /* If the ndlp is not being used by another discovery 3464 * thread, free it. 3465 */ 3466 if (!lpfc_nlp_not_used(ndlp)) { 3467 /* If ndlp is being used by another discovery 3468 * thread, just clear NLP_DEFER_RM 3469 */ 3470 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3471 } 3472 } 3473 else 3474 lpfc_nlp_put(ndlp); 3475 elsiocb->context1 = NULL; 3476 } 3477 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3478 if (elsiocb->context2) { 3479 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3480 /* Firmware could still be in progress of DMAing 3481 * payload, so don't free data buffer till after 3482 * a hbeat. 3483 */ 3484 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3485 buf_ptr = elsiocb->context2; 3486 elsiocb->context2 = NULL; 3487 if (buf_ptr) { 3488 buf_ptr1 = NULL; 3489 spin_lock_irq(&phba->hbalock); 3490 if (!list_empty(&buf_ptr->list)) { 3491 list_remove_head(&buf_ptr->list, 3492 buf_ptr1, struct lpfc_dmabuf, 3493 list); 3494 INIT_LIST_HEAD(&buf_ptr1->list); 3495 list_add_tail(&buf_ptr1->list, 3496 &phba->elsbuf); 3497 phba->elsbuf_cnt++; 3498 } 3499 INIT_LIST_HEAD(&buf_ptr->list); 3500 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3501 phba->elsbuf_cnt++; 3502 spin_unlock_irq(&phba->hbalock); 3503 } 3504 } else { 3505 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3506 lpfc_els_free_data(phba, buf_ptr1); 3507 } 3508 } 3509 3510 if (elsiocb->context3) { 3511 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3512 lpfc_els_free_bpl(phba, buf_ptr); 3513 } 3514 lpfc_sli_release_iocbq(phba, elsiocb); 3515 return 0; 3516 } 3517 3518 /** 3519 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 3520 * @phba: pointer to lpfc hba data structure. 3521 * @cmdiocb: pointer to lpfc command iocb data structure. 3522 * @rspiocb: pointer to lpfc response iocb data structure. 3523 * 3524 * This routine is the completion callback function to the Logout (LOGO) 3525 * Accept (ACC) Response ELS command. This routine is invoked to indicate 3526 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 3527 * release the ndlp if it has the last reference remaining (reference count 3528 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 3529 * field to NULL to inform the following lpfc_els_free_iocb() routine no 3530 * ndlp reference count needs to be decremented. Otherwise, the ndlp 3531 * reference use-count shall be decremented by the lpfc_els_free_iocb() 3532 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 3533 * IOCB data structure. 3534 **/ 3535 static void 3536 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3537 struct lpfc_iocbq *rspiocb) 3538 { 3539 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3540 struct lpfc_vport *vport = cmdiocb->vport; 3541 IOCB_t *irsp; 3542 3543 irsp = &rspiocb->iocb; 3544 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3545 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 3546 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 3547 /* ACC to LOGO completes to NPort <nlp_DID> */ 3548 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3549 "0109 ACC to LOGO completes to NPort x%x " 3550 "Data: x%x x%x x%x\n", 3551 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3552 ndlp->nlp_rpi); 3553 3554 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 3555 /* NPort Recovery mode or node is just allocated */ 3556 if (!lpfc_nlp_not_used(ndlp)) { 3557 /* If the ndlp is being used by another discovery 3558 * thread, just unregister the RPI. 3559 */ 3560 lpfc_unreg_rpi(vport, ndlp); 3561 } else { 3562 /* Indicate the node has already released, should 3563 * not reference to it from within lpfc_els_free_iocb. 3564 */ 3565 cmdiocb->context1 = NULL; 3566 } 3567 } 3568 3569 /* 3570 * The driver received a LOGO from the rport and has ACK'd it. 3571 * At this point, the driver is done so release the IOCB 3572 */ 3573 lpfc_els_free_iocb(phba, cmdiocb); 3574 3575 /* 3576 * Remove the ndlp reference if it's a fabric node that has 3577 * sent us an unsolicted LOGO. 3578 */ 3579 if (ndlp->nlp_type & NLP_FABRIC) 3580 lpfc_nlp_put(ndlp); 3581 3582 return; 3583 } 3584 3585 /** 3586 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 3587 * @phba: pointer to lpfc hba data structure. 3588 * @pmb: pointer to the driver internal queue element for mailbox command. 3589 * 3590 * This routine is the completion callback function for unregister default 3591 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 3592 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 3593 * decrements the ndlp reference count held for this completion callback 3594 * function. After that, it invokes the lpfc_nlp_not_used() to check 3595 * whether there is only one reference left on the ndlp. If so, it will 3596 * perform one more decrement and trigger the release of the ndlp. 3597 **/ 3598 void 3599 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3600 { 3601 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3602 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3603 3604 pmb->context1 = NULL; 3605 pmb->context2 = NULL; 3606 3607 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3608 kfree(mp); 3609 mempool_free(pmb, phba->mbox_mem_pool); 3610 if (ndlp) { 3611 if (NLP_CHK_NODE_ACT(ndlp)) { 3612 lpfc_nlp_put(ndlp); 3613 /* This is the end of the default RPI cleanup logic for 3614 * this ndlp. If no other discovery threads are using 3615 * this ndlp, free all resources associated with it. 3616 */ 3617 lpfc_nlp_not_used(ndlp); 3618 } else { 3619 lpfc_drop_node(ndlp->vport, ndlp); 3620 } 3621 } 3622 3623 return; 3624 } 3625 3626 /** 3627 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 3628 * @phba: pointer to lpfc hba data structure. 3629 * @cmdiocb: pointer to lpfc command iocb data structure. 3630 * @rspiocb: pointer to lpfc response iocb data structure. 3631 * 3632 * This routine is the completion callback function for ELS Response IOCB 3633 * command. In normal case, this callback function just properly sets the 3634 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 3635 * field in the command IOCB is not NULL, the referred mailbox command will 3636 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 3637 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 3638 * link down event occurred during the discovery, the lpfc_nlp_not_used() 3639 * routine shall be invoked trying to release the ndlp if no other threads 3640 * are currently referring it. 3641 **/ 3642 static void 3643 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3644 struct lpfc_iocbq *rspiocb) 3645 { 3646 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3647 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 3648 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 3649 IOCB_t *irsp; 3650 uint8_t *pcmd; 3651 LPFC_MBOXQ_t *mbox = NULL; 3652 struct lpfc_dmabuf *mp = NULL; 3653 uint32_t ls_rjt = 0; 3654 3655 irsp = &rspiocb->iocb; 3656 3657 if (cmdiocb->context_un.mbox) 3658 mbox = cmdiocb->context_un.mbox; 3659 3660 /* First determine if this is a LS_RJT cmpl. Note, this callback 3661 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 3662 */ 3663 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 3664 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3665 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3666 /* A LS_RJT associated with Default RPI cleanup has its own 3667 * separate code path. 3668 */ 3669 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3670 ls_rjt = 1; 3671 } 3672 3673 /* Check to see if link went down during discovery */ 3674 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 3675 if (mbox) { 3676 mp = (struct lpfc_dmabuf *) mbox->context1; 3677 if (mp) { 3678 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3679 kfree(mp); 3680 } 3681 mempool_free(mbox, phba->mbox_mem_pool); 3682 } 3683 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3684 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3685 if (lpfc_nlp_not_used(ndlp)) { 3686 ndlp = NULL; 3687 /* Indicate the node has already released, 3688 * should not reference to it from within 3689 * the routine lpfc_els_free_iocb. 3690 */ 3691 cmdiocb->context1 = NULL; 3692 } 3693 goto out; 3694 } 3695 3696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3697 "ELS rsp cmpl: status:x%x/x%x did:x%x", 3698 irsp->ulpStatus, irsp->un.ulpWord[4], 3699 cmdiocb->iocb.un.elsreq64.remoteID); 3700 /* ELS response tag <ulpIoTag> completes */ 3701 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3702 "0110 ELS response tag x%x completes " 3703 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 3704 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 3705 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 3706 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3707 ndlp->nlp_rpi); 3708 if (mbox) { 3709 if ((rspiocb->iocb.ulpStatus == 0) 3710 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 3711 lpfc_unreg_rpi(vport, ndlp); 3712 /* Increment reference count to ndlp to hold the 3713 * reference to ndlp for the callback function. 3714 */ 3715 mbox->context2 = lpfc_nlp_get(ndlp); 3716 mbox->vport = vport; 3717 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 3718 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 3719 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 3720 } 3721 else { 3722 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 3723 ndlp->nlp_prev_state = ndlp->nlp_state; 3724 lpfc_nlp_set_state(vport, ndlp, 3725 NLP_STE_REG_LOGIN_ISSUE); 3726 } 3727 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 3728 != MBX_NOT_FINISHED) 3729 goto out; 3730 else 3731 /* Decrement the ndlp reference count we 3732 * set for this failed mailbox command. 3733 */ 3734 lpfc_nlp_put(ndlp); 3735 3736 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 3737 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3738 "0138 ELS rsp: Cannot issue reg_login for x%x " 3739 "Data: x%x x%x x%x\n", 3740 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3741 ndlp->nlp_rpi); 3742 3743 if (lpfc_nlp_not_used(ndlp)) { 3744 ndlp = NULL; 3745 /* Indicate node has already been released, 3746 * should not reference to it from within 3747 * the routine lpfc_els_free_iocb. 3748 */ 3749 cmdiocb->context1 = NULL; 3750 } 3751 } else { 3752 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 3753 if (!lpfc_error_lost_link(irsp) && 3754 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 3755 if (lpfc_nlp_not_used(ndlp)) { 3756 ndlp = NULL; 3757 /* Indicate node has already been 3758 * released, should not reference 3759 * to it from within the routine 3760 * lpfc_els_free_iocb. 3761 */ 3762 cmdiocb->context1 = NULL; 3763 } 3764 } 3765 } 3766 mp = (struct lpfc_dmabuf *) mbox->context1; 3767 if (mp) { 3768 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3769 kfree(mp); 3770 } 3771 mempool_free(mbox, phba->mbox_mem_pool); 3772 } 3773 out: 3774 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3775 spin_lock_irq(shost->host_lock); 3776 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 3777 spin_unlock_irq(shost->host_lock); 3778 3779 /* If the node is not being used by another discovery thread, 3780 * and we are sending a reject, we are done with it. 3781 * Release driver reference count here and free associated 3782 * resources. 3783 */ 3784 if (ls_rjt) 3785 if (lpfc_nlp_not_used(ndlp)) 3786 /* Indicate node has already been released, 3787 * should not reference to it from within 3788 * the routine lpfc_els_free_iocb. 3789 */ 3790 cmdiocb->context1 = NULL; 3791 } 3792 3793 lpfc_els_free_iocb(phba, cmdiocb); 3794 return; 3795 } 3796 3797 /** 3798 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 3799 * @vport: pointer to a host virtual N_Port data structure. 3800 * @flag: the els command code to be accepted. 3801 * @oldiocb: pointer to the original lpfc command iocb data structure. 3802 * @ndlp: pointer to a node-list data structure. 3803 * @mbox: pointer to the driver internal queue element for mailbox command. 3804 * 3805 * This routine prepares and issues an Accept (ACC) response IOCB 3806 * command. It uses the @flag to properly set up the IOCB field for the 3807 * specific ACC response command to be issued and invokes the 3808 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 3809 * @mbox pointer is passed in, it will be put into the context_un.mbox 3810 * field of the IOCB for the completion callback function to issue the 3811 * mailbox command to the HBA later when callback is invoked. 3812 * 3813 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3814 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3815 * will be stored into the context1 field of the IOCB for the completion 3816 * callback function to the corresponding response ELS IOCB command. 3817 * 3818 * Return code 3819 * 0 - Successfully issued acc response 3820 * 1 - Failed to issue acc response 3821 **/ 3822 int 3823 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 3824 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3825 LPFC_MBOXQ_t *mbox) 3826 { 3827 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3828 struct lpfc_hba *phba = vport->phba; 3829 IOCB_t *icmd; 3830 IOCB_t *oldcmd; 3831 struct lpfc_iocbq *elsiocb; 3832 struct lpfc_sli *psli; 3833 uint8_t *pcmd; 3834 uint16_t cmdsize; 3835 int rc; 3836 ELS_PKT *els_pkt_ptr; 3837 3838 psli = &phba->sli; 3839 oldcmd = &oldiocb->iocb; 3840 3841 switch (flag) { 3842 case ELS_CMD_ACC: 3843 cmdsize = sizeof(uint32_t); 3844 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3845 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3846 if (!elsiocb) { 3847 spin_lock_irq(shost->host_lock); 3848 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3849 spin_unlock_irq(shost->host_lock); 3850 return 1; 3851 } 3852 3853 icmd = &elsiocb->iocb; 3854 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3855 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3856 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3857 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3858 pcmd += sizeof(uint32_t); 3859 3860 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3861 "Issue ACC: did:x%x flg:x%x", 3862 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3863 break; 3864 case ELS_CMD_PLOGI: 3865 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 3866 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3867 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3868 if (!elsiocb) 3869 return 1; 3870 3871 icmd = &elsiocb->iocb; 3872 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3873 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3874 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3875 3876 if (mbox) 3877 elsiocb->context_un.mbox = mbox; 3878 3879 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3880 pcmd += sizeof(uint32_t); 3881 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 3882 3883 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3884 "Issue ACC PLOGI: did:x%x flg:x%x", 3885 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3886 break; 3887 case ELS_CMD_PRLO: 3888 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 3889 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3890 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 3891 if (!elsiocb) 3892 return 1; 3893 3894 icmd = &elsiocb->iocb; 3895 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3896 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3897 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3898 3899 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3900 sizeof(uint32_t) + sizeof(PRLO)); 3901 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 3902 els_pkt_ptr = (ELS_PKT *) pcmd; 3903 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 3904 3905 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3906 "Issue ACC PRLO: did:x%x flg:x%x", 3907 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3908 break; 3909 default: 3910 return 1; 3911 } 3912 /* Xmit ELS ACC response tag <ulpIoTag> */ 3913 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3914 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 3915 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x " 3916 "fc_flag x%x\n", 3917 elsiocb->iotag, elsiocb->iocb.ulpContext, 3918 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3919 ndlp->nlp_rpi, vport->fc_flag); 3920 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 3921 spin_lock_irq(shost->host_lock); 3922 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3923 spin_unlock_irq(shost->host_lock); 3924 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 3925 } else { 3926 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3927 } 3928 3929 phba->fc_stat.elsXmitACC++; 3930 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3931 if (rc == IOCB_ERROR) { 3932 lpfc_els_free_iocb(phba, elsiocb); 3933 return 1; 3934 } 3935 return 0; 3936 } 3937 3938 /** 3939 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 3940 * @vport: pointer to a virtual N_Port data structure. 3941 * @rejectError: 3942 * @oldiocb: pointer to the original lpfc command iocb data structure. 3943 * @ndlp: pointer to a node-list data structure. 3944 * @mbox: pointer to the driver internal queue element for mailbox command. 3945 * 3946 * This routine prepares and issue an Reject (RJT) response IOCB 3947 * command. If a @mbox pointer is passed in, it will be put into the 3948 * context_un.mbox field of the IOCB for the completion callback function 3949 * to issue to the HBA later. 3950 * 3951 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3952 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3953 * will be stored into the context1 field of the IOCB for the completion 3954 * callback function to the reject response ELS IOCB command. 3955 * 3956 * Return code 3957 * 0 - Successfully issued reject response 3958 * 1 - Failed to issue reject response 3959 **/ 3960 int 3961 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 3962 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3963 LPFC_MBOXQ_t *mbox) 3964 { 3965 struct lpfc_hba *phba = vport->phba; 3966 IOCB_t *icmd; 3967 IOCB_t *oldcmd; 3968 struct lpfc_iocbq *elsiocb; 3969 struct lpfc_sli *psli; 3970 uint8_t *pcmd; 3971 uint16_t cmdsize; 3972 int rc; 3973 3974 psli = &phba->sli; 3975 cmdsize = 2 * sizeof(uint32_t); 3976 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3977 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3978 if (!elsiocb) 3979 return 1; 3980 3981 icmd = &elsiocb->iocb; 3982 oldcmd = &oldiocb->iocb; 3983 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3984 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3985 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3986 3987 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3988 pcmd += sizeof(uint32_t); 3989 *((uint32_t *) (pcmd)) = rejectError; 3990 3991 if (mbox) 3992 elsiocb->context_un.mbox = mbox; 3993 3994 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 3995 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3996 "0129 Xmit ELS RJT x%x response tag x%x " 3997 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 3998 "rpi x%x\n", 3999 rejectError, elsiocb->iotag, 4000 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4001 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4003 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4004 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4005 4006 phba->fc_stat.elsXmitLSRJT++; 4007 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4008 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4009 4010 if (rc == IOCB_ERROR) { 4011 lpfc_els_free_iocb(phba, elsiocb); 4012 return 1; 4013 } 4014 return 0; 4015 } 4016 4017 /** 4018 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4019 * @vport: pointer to a virtual N_Port data structure. 4020 * @oldiocb: pointer to the original lpfc command iocb data structure. 4021 * @ndlp: pointer to a node-list data structure. 4022 * 4023 * This routine prepares and issues an Accept (ACC) response to Address 4024 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4025 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4026 * 4027 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4028 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4029 * will be stored into the context1 field of the IOCB for the completion 4030 * callback function to the ADISC Accept response ELS IOCB command. 4031 * 4032 * Return code 4033 * 0 - Successfully issued acc adisc response 4034 * 1 - Failed to issue adisc acc response 4035 **/ 4036 int 4037 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4038 struct lpfc_nodelist *ndlp) 4039 { 4040 struct lpfc_hba *phba = vport->phba; 4041 ADISC *ap; 4042 IOCB_t *icmd, *oldcmd; 4043 struct lpfc_iocbq *elsiocb; 4044 uint8_t *pcmd; 4045 uint16_t cmdsize; 4046 int rc; 4047 4048 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4049 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4050 ndlp->nlp_DID, ELS_CMD_ACC); 4051 if (!elsiocb) 4052 return 1; 4053 4054 icmd = &elsiocb->iocb; 4055 oldcmd = &oldiocb->iocb; 4056 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4057 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4058 4059 /* Xmit ADISC ACC response tag <ulpIoTag> */ 4060 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4061 "0130 Xmit ADISC ACC response iotag x%x xri: " 4062 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 4063 elsiocb->iotag, elsiocb->iocb.ulpContext, 4064 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4065 ndlp->nlp_rpi); 4066 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4067 4068 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4069 pcmd += sizeof(uint32_t); 4070 4071 ap = (ADISC *) (pcmd); 4072 ap->hardAL_PA = phba->fc_pref_ALPA; 4073 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4074 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4075 ap->DID = be32_to_cpu(vport->fc_myDID); 4076 4077 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4078 "Issue ACC ADISC: did:x%x flg:x%x", 4079 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4080 4081 phba->fc_stat.elsXmitACC++; 4082 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4083 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4084 if (rc == IOCB_ERROR) { 4085 lpfc_els_free_iocb(phba, elsiocb); 4086 return 1; 4087 } 4088 return 0; 4089 } 4090 4091 /** 4092 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 4093 * @vport: pointer to a virtual N_Port data structure. 4094 * @oldiocb: pointer to the original lpfc command iocb data structure. 4095 * @ndlp: pointer to a node-list data structure. 4096 * 4097 * This routine prepares and issues an Accept (ACC) response to Process 4098 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 4099 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4100 * 4101 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4102 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4103 * will be stored into the context1 field of the IOCB for the completion 4104 * callback function to the PRLI Accept response ELS IOCB command. 4105 * 4106 * Return code 4107 * 0 - Successfully issued acc prli response 4108 * 1 - Failed to issue acc prli response 4109 **/ 4110 int 4111 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4112 struct lpfc_nodelist *ndlp) 4113 { 4114 struct lpfc_hba *phba = vport->phba; 4115 PRLI *npr; 4116 lpfc_vpd_t *vpd; 4117 IOCB_t *icmd; 4118 IOCB_t *oldcmd; 4119 struct lpfc_iocbq *elsiocb; 4120 struct lpfc_sli *psli; 4121 uint8_t *pcmd; 4122 uint16_t cmdsize; 4123 int rc; 4124 4125 psli = &phba->sli; 4126 4127 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 4128 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4129 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 4130 if (!elsiocb) 4131 return 1; 4132 4133 icmd = &elsiocb->iocb; 4134 oldcmd = &oldiocb->iocb; 4135 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4136 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4137 4138 /* Xmit PRLI ACC response tag <ulpIoTag> */ 4139 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4140 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 4141 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4142 elsiocb->iotag, elsiocb->iocb.ulpContext, 4143 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4144 ndlp->nlp_rpi); 4145 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4146 4147 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 4148 pcmd += sizeof(uint32_t); 4149 4150 /* For PRLI, remainder of payload is PRLI parameter page */ 4151 memset(pcmd, 0, sizeof(PRLI)); 4152 4153 npr = (PRLI *) pcmd; 4154 vpd = &phba->vpd; 4155 /* 4156 * If the remote port is a target and our firmware version is 3.20 or 4157 * later, set the following bits for FC-TAPE support. 4158 */ 4159 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4160 (vpd->rev.feaLevelHigh >= 0x02)) { 4161 npr->ConfmComplAllowed = 1; 4162 npr->Retry = 1; 4163 npr->TaskRetryIdReq = 1; 4164 } 4165 4166 npr->acceptRspCode = PRLI_REQ_EXECUTED; 4167 npr->estabImagePair = 1; 4168 npr->readXferRdyDis = 1; 4169 npr->ConfmComplAllowed = 1; 4170 4171 npr->prliType = PRLI_FCP_TYPE; 4172 npr->initiatorFunc = 1; 4173 4174 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4175 "Issue ACC PRLI: did:x%x flg:x%x", 4176 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4177 4178 phba->fc_stat.elsXmitACC++; 4179 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4180 4181 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4182 if (rc == IOCB_ERROR) { 4183 lpfc_els_free_iocb(phba, elsiocb); 4184 return 1; 4185 } 4186 return 0; 4187 } 4188 4189 /** 4190 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 4191 * @vport: pointer to a virtual N_Port data structure. 4192 * @format: rnid command format. 4193 * @oldiocb: pointer to the original lpfc command iocb data structure. 4194 * @ndlp: pointer to a node-list data structure. 4195 * 4196 * This routine issues a Request Node Identification Data (RNID) Accept 4197 * (ACC) response. It constructs the RNID ACC response command according to 4198 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 4199 * issue the response. Note that this command does not need to hold the ndlp 4200 * reference count for the callback. So, the ndlp reference count taken by 4201 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 4202 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4203 * there is no ndlp reference available. 4204 * 4205 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4206 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4207 * will be stored into the context1 field of the IOCB for the completion 4208 * callback function. However, for the RNID Accept Response ELS command, 4209 * this is undone later by this routine after the IOCB is allocated. 4210 * 4211 * Return code 4212 * 0 - Successfully issued acc rnid response 4213 * 1 - Failed to issue acc rnid response 4214 **/ 4215 static int 4216 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4217 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4218 { 4219 struct lpfc_hba *phba = vport->phba; 4220 RNID *rn; 4221 IOCB_t *icmd, *oldcmd; 4222 struct lpfc_iocbq *elsiocb; 4223 struct lpfc_sli *psli; 4224 uint8_t *pcmd; 4225 uint16_t cmdsize; 4226 int rc; 4227 4228 psli = &phba->sli; 4229 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4230 + (2 * sizeof(struct lpfc_name)); 4231 if (format) 4232 cmdsize += sizeof(RNID_TOP_DISC); 4233 4234 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4235 ndlp->nlp_DID, ELS_CMD_ACC); 4236 if (!elsiocb) 4237 return 1; 4238 4239 icmd = &elsiocb->iocb; 4240 oldcmd = &oldiocb->iocb; 4241 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4242 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4243 4244 /* Xmit RNID ACC response tag <ulpIoTag> */ 4245 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4246 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4247 elsiocb->iotag, elsiocb->iocb.ulpContext); 4248 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4249 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4250 pcmd += sizeof(uint32_t); 4251 4252 memset(pcmd, 0, sizeof(RNID)); 4253 rn = (RNID *) (pcmd); 4254 rn->Format = format; 4255 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4256 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4257 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4258 switch (format) { 4259 case 0: 4260 rn->SpecificLen = 0; 4261 break; 4262 case RNID_TOPOLOGY_DISC: 4263 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4264 memcpy(&rn->un.topologyDisc.portName, 4265 &vport->fc_portname, sizeof(struct lpfc_name)); 4266 rn->un.topologyDisc.unitType = RNID_HBA; 4267 rn->un.topologyDisc.physPort = 0; 4268 rn->un.topologyDisc.attachedNodes = 0; 4269 break; 4270 default: 4271 rn->CommonLen = 0; 4272 rn->SpecificLen = 0; 4273 break; 4274 } 4275 4276 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4277 "Issue ACC RNID: did:x%x flg:x%x", 4278 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4279 4280 phba->fc_stat.elsXmitACC++; 4281 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4282 4283 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4284 if (rc == IOCB_ERROR) { 4285 lpfc_els_free_iocb(phba, elsiocb); 4286 return 1; 4287 } 4288 return 0; 4289 } 4290 4291 /** 4292 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4293 * @vport: pointer to a virtual N_Port data structure. 4294 * @iocb: pointer to the lpfc command iocb data structure. 4295 * @ndlp: pointer to a node-list data structure. 4296 * 4297 * Return 4298 **/ 4299 static void 4300 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4301 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4302 { 4303 struct lpfc_hba *phba = vport->phba; 4304 uint8_t *pcmd; 4305 struct RRQ *rrq; 4306 uint16_t rxid; 4307 uint16_t xri; 4308 struct lpfc_node_rrq *prrq; 4309 4310 4311 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4312 pcmd += sizeof(uint32_t); 4313 rrq = (struct RRQ *)pcmd; 4314 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4315 rxid = bf_get(rrq_rxid, rrq); 4316 4317 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4318 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4319 " x%x x%x\n", 4320 be32_to_cpu(bf_get(rrq_did, rrq)), 4321 bf_get(rrq_oxid, rrq), 4322 rxid, 4323 iocb->iotag, iocb->iocb.ulpContext); 4324 4325 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4326 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4327 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4328 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4329 xri = bf_get(rrq_oxid, rrq); 4330 else 4331 xri = rxid; 4332 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4333 if (prrq) 4334 lpfc_clr_rrq_active(phba, xri, prrq); 4335 return; 4336 } 4337 4338 /** 4339 * lpfc_els_rsp_echo_acc - Issue echo acc response 4340 * @vport: pointer to a virtual N_Port data structure. 4341 * @data: pointer to echo data to return in the accept. 4342 * @oldiocb: pointer to the original lpfc command iocb data structure. 4343 * @ndlp: pointer to a node-list data structure. 4344 * 4345 * Return code 4346 * 0 - Successfully issued acc echo response 4347 * 1 - Failed to issue acc echo response 4348 **/ 4349 static int 4350 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4351 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4352 { 4353 struct lpfc_hba *phba = vport->phba; 4354 struct lpfc_iocbq *elsiocb; 4355 struct lpfc_sli *psli; 4356 uint8_t *pcmd; 4357 uint16_t cmdsize; 4358 int rc; 4359 4360 psli = &phba->sli; 4361 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4362 4363 /* The accumulated length can exceed the BPL_SIZE. For 4364 * now, use this as the limit 4365 */ 4366 if (cmdsize > LPFC_BPL_SIZE) 4367 cmdsize = LPFC_BPL_SIZE; 4368 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4369 ndlp->nlp_DID, ELS_CMD_ACC); 4370 if (!elsiocb) 4371 return 1; 4372 4373 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 4374 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 4375 4376 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4377 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4378 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4379 elsiocb->iotag, elsiocb->iocb.ulpContext); 4380 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4381 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4382 pcmd += sizeof(uint32_t); 4383 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4384 4385 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4386 "Issue ACC ECHO: did:x%x flg:x%x", 4387 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4388 4389 phba->fc_stat.elsXmitACC++; 4390 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4391 4392 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4393 if (rc == IOCB_ERROR) { 4394 lpfc_els_free_iocb(phba, elsiocb); 4395 return 1; 4396 } 4397 return 0; 4398 } 4399 4400 /** 4401 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 4402 * @vport: pointer to a host virtual N_Port data structure. 4403 * 4404 * This routine issues Address Discover (ADISC) ELS commands to those 4405 * N_Ports which are in node port recovery state and ADISC has not been issued 4406 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 4407 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 4408 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 4409 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 4410 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 4411 * IOCBs quit for later pick up. On the other hand, after walking through 4412 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 4413 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 4414 * no more ADISC need to be sent. 4415 * 4416 * Return code 4417 * The number of N_Ports with adisc issued. 4418 **/ 4419 int 4420 lpfc_els_disc_adisc(struct lpfc_vport *vport) 4421 { 4422 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4423 struct lpfc_nodelist *ndlp, *next_ndlp; 4424 int sentadisc = 0; 4425 4426 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 4427 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4428 if (!NLP_CHK_NODE_ACT(ndlp)) 4429 continue; 4430 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4431 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4432 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 4433 spin_lock_irq(shost->host_lock); 4434 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4435 spin_unlock_irq(shost->host_lock); 4436 ndlp->nlp_prev_state = ndlp->nlp_state; 4437 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4438 lpfc_issue_els_adisc(vport, ndlp, 0); 4439 sentadisc++; 4440 vport->num_disc_nodes++; 4441 if (vport->num_disc_nodes >= 4442 vport->cfg_discovery_threads) { 4443 spin_lock_irq(shost->host_lock); 4444 vport->fc_flag |= FC_NLP_MORE; 4445 spin_unlock_irq(shost->host_lock); 4446 break; 4447 } 4448 } 4449 } 4450 if (sentadisc == 0) { 4451 spin_lock_irq(shost->host_lock); 4452 vport->fc_flag &= ~FC_NLP_MORE; 4453 spin_unlock_irq(shost->host_lock); 4454 } 4455 return sentadisc; 4456 } 4457 4458 /** 4459 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 4460 * @vport: pointer to a host virtual N_Port data structure. 4461 * 4462 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 4463 * which are in node port recovery state, with a @vport. Each time an ELS 4464 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 4465 * the per @vport number of discover count (num_disc_nodes) shall be 4466 * incremented. If the num_disc_nodes reaches a pre-configured threshold 4467 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 4468 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 4469 * later pick up. On the other hand, after walking through all the ndlps with 4470 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 4471 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 4472 * PLOGI need to be sent. 4473 * 4474 * Return code 4475 * The number of N_Ports with plogi issued. 4476 **/ 4477 int 4478 lpfc_els_disc_plogi(struct lpfc_vport *vport) 4479 { 4480 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4481 struct lpfc_nodelist *ndlp, *next_ndlp; 4482 int sentplogi = 0; 4483 4484 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 4485 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4486 if (!NLP_CHK_NODE_ACT(ndlp)) 4487 continue; 4488 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4489 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4490 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4491 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4492 ndlp->nlp_prev_state = ndlp->nlp_state; 4493 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4494 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4495 sentplogi++; 4496 vport->num_disc_nodes++; 4497 if (vport->num_disc_nodes >= 4498 vport->cfg_discovery_threads) { 4499 spin_lock_irq(shost->host_lock); 4500 vport->fc_flag |= FC_NLP_MORE; 4501 spin_unlock_irq(shost->host_lock); 4502 break; 4503 } 4504 } 4505 } 4506 if (sentplogi) { 4507 lpfc_set_disctmo(vport); 4508 } 4509 else { 4510 spin_lock_irq(shost->host_lock); 4511 vport->fc_flag &= ~FC_NLP_MORE; 4512 spin_unlock_irq(shost->host_lock); 4513 } 4514 return sentplogi; 4515 } 4516 4517 /** 4518 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 4519 * @vport: pointer to a host virtual N_Port data structure. 4520 * 4521 * This routine cleans up any Registration State Change Notification 4522 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 4523 * @vport together with the host_lock is used to prevent multiple thread 4524 * trying to access the RSCN array on a same @vport at the same time. 4525 **/ 4526 void 4527 lpfc_els_flush_rscn(struct lpfc_vport *vport) 4528 { 4529 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4530 struct lpfc_hba *phba = vport->phba; 4531 int i; 4532 4533 spin_lock_irq(shost->host_lock); 4534 if (vport->fc_rscn_flush) { 4535 /* Another thread is walking fc_rscn_id_list on this vport */ 4536 spin_unlock_irq(shost->host_lock); 4537 return; 4538 } 4539 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 4540 vport->fc_rscn_flush = 1; 4541 spin_unlock_irq(shost->host_lock); 4542 4543 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4544 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 4545 vport->fc_rscn_id_list[i] = NULL; 4546 } 4547 spin_lock_irq(shost->host_lock); 4548 vport->fc_rscn_id_cnt = 0; 4549 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 4550 spin_unlock_irq(shost->host_lock); 4551 lpfc_can_disctmo(vport); 4552 /* Indicate we are done walking this fc_rscn_id_list */ 4553 vport->fc_rscn_flush = 0; 4554 } 4555 4556 /** 4557 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 4558 * @vport: pointer to a host virtual N_Port data structure. 4559 * @did: remote destination port identifier. 4560 * 4561 * This routine checks whether there is any pending Registration State 4562 * Configuration Notification (RSCN) to a @did on @vport. 4563 * 4564 * Return code 4565 * None zero - The @did matched with a pending rscn 4566 * 0 - not able to match @did with a pending rscn 4567 **/ 4568 int 4569 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 4570 { 4571 D_ID ns_did; 4572 D_ID rscn_did; 4573 uint32_t *lp; 4574 uint32_t payload_len, i; 4575 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4576 4577 ns_did.un.word = did; 4578 4579 /* Never match fabric nodes for RSCNs */ 4580 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 4581 return 0; 4582 4583 /* If we are doing a FULL RSCN rediscovery, match everything */ 4584 if (vport->fc_flag & FC_RSCN_DISCOVERY) 4585 return did; 4586 4587 spin_lock_irq(shost->host_lock); 4588 if (vport->fc_rscn_flush) { 4589 /* Another thread is walking fc_rscn_id_list on this vport */ 4590 spin_unlock_irq(shost->host_lock); 4591 return 0; 4592 } 4593 /* Indicate we are walking fc_rscn_id_list on this vport */ 4594 vport->fc_rscn_flush = 1; 4595 spin_unlock_irq(shost->host_lock); 4596 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4597 lp = vport->fc_rscn_id_list[i]->virt; 4598 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4599 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4600 while (payload_len) { 4601 rscn_did.un.word = be32_to_cpu(*lp++); 4602 payload_len -= sizeof(uint32_t); 4603 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 4604 case RSCN_ADDRESS_FORMAT_PORT: 4605 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4606 && (ns_did.un.b.area == rscn_did.un.b.area) 4607 && (ns_did.un.b.id == rscn_did.un.b.id)) 4608 goto return_did_out; 4609 break; 4610 case RSCN_ADDRESS_FORMAT_AREA: 4611 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4612 && (ns_did.un.b.area == rscn_did.un.b.area)) 4613 goto return_did_out; 4614 break; 4615 case RSCN_ADDRESS_FORMAT_DOMAIN: 4616 if (ns_did.un.b.domain == rscn_did.un.b.domain) 4617 goto return_did_out; 4618 break; 4619 case RSCN_ADDRESS_FORMAT_FABRIC: 4620 goto return_did_out; 4621 } 4622 } 4623 } 4624 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4625 vport->fc_rscn_flush = 0; 4626 return 0; 4627 return_did_out: 4628 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4629 vport->fc_rscn_flush = 0; 4630 return did; 4631 } 4632 4633 /** 4634 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 4635 * @vport: pointer to a host virtual N_Port data structure. 4636 * 4637 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 4638 * state machine for a @vport's nodes that are with pending RSCN (Registration 4639 * State Change Notification). 4640 * 4641 * Return code 4642 * 0 - Successful (currently alway return 0) 4643 **/ 4644 static int 4645 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 4646 { 4647 struct lpfc_nodelist *ndlp = NULL; 4648 4649 /* Move all affected nodes by pending RSCNs to NPR state. */ 4650 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4651 if (!NLP_CHK_NODE_ACT(ndlp) || 4652 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 4653 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 4654 continue; 4655 lpfc_disc_state_machine(vport, ndlp, NULL, 4656 NLP_EVT_DEVICE_RECOVERY); 4657 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4658 } 4659 return 0; 4660 } 4661 4662 /** 4663 * lpfc_send_rscn_event - Send an RSCN event to management application 4664 * @vport: pointer to a host virtual N_Port data structure. 4665 * @cmdiocb: pointer to lpfc command iocb data structure. 4666 * 4667 * lpfc_send_rscn_event sends an RSCN netlink event to management 4668 * applications. 4669 */ 4670 static void 4671 lpfc_send_rscn_event(struct lpfc_vport *vport, 4672 struct lpfc_iocbq *cmdiocb) 4673 { 4674 struct lpfc_dmabuf *pcmd; 4675 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4676 uint32_t *payload_ptr; 4677 uint32_t payload_len; 4678 struct lpfc_rscn_event_header *rscn_event_data; 4679 4680 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4681 payload_ptr = (uint32_t *) pcmd->virt; 4682 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 4683 4684 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 4685 payload_len, GFP_KERNEL); 4686 if (!rscn_event_data) { 4687 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4688 "0147 Failed to allocate memory for RSCN event\n"); 4689 return; 4690 } 4691 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 4692 rscn_event_data->payload_length = payload_len; 4693 memcpy(rscn_event_data->rscn_payload, payload_ptr, 4694 payload_len); 4695 4696 fc_host_post_vendor_event(shost, 4697 fc_get_event_number(), 4698 sizeof(struct lpfc_els_event_header) + payload_len, 4699 (char *)rscn_event_data, 4700 LPFC_NL_VENDOR_ID); 4701 4702 kfree(rscn_event_data); 4703 } 4704 4705 /** 4706 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 4707 * @vport: pointer to a host virtual N_Port data structure. 4708 * @cmdiocb: pointer to lpfc command iocb data structure. 4709 * @ndlp: pointer to a node-list data structure. 4710 * 4711 * This routine processes an unsolicited RSCN (Registration State Change 4712 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 4713 * to invoke fc_host_post_event() routine to the FC transport layer. If the 4714 * discover state machine is about to begin discovery, it just accepts the 4715 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 4716 * contains N_Port IDs for other vports on this HBA, it just accepts the 4717 * RSCN and ignore processing it. If the state machine is in the recovery 4718 * state, the fc_rscn_id_list of this @vport is walked and the 4719 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 4720 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 4721 * routine is invoked to handle the RSCN event. 4722 * 4723 * Return code 4724 * 0 - Just sent the acc response 4725 * 1 - Sent the acc response and waited for name server completion 4726 **/ 4727 static int 4728 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4729 struct lpfc_nodelist *ndlp) 4730 { 4731 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4732 struct lpfc_hba *phba = vport->phba; 4733 struct lpfc_dmabuf *pcmd; 4734 uint32_t *lp, *datap; 4735 IOCB_t *icmd; 4736 uint32_t payload_len, length, nportid, *cmd; 4737 int rscn_cnt; 4738 int rscn_id = 0, hba_id = 0; 4739 int i; 4740 4741 icmd = &cmdiocb->iocb; 4742 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4743 lp = (uint32_t *) pcmd->virt; 4744 4745 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4746 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4747 /* RSCN received */ 4748 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4749 "0214 RSCN received Data: x%x x%x x%x x%x\n", 4750 vport->fc_flag, payload_len, *lp, 4751 vport->fc_rscn_id_cnt); 4752 4753 /* Send an RSCN event to the management application */ 4754 lpfc_send_rscn_event(vport, cmdiocb); 4755 4756 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 4757 fc_host_post_event(shost, fc_get_event_number(), 4758 FCH_EVT_RSCN, lp[i]); 4759 4760 /* If we are about to begin discovery, just ACC the RSCN. 4761 * Discovery processing will satisfy it. 4762 */ 4763 if (vport->port_state <= LPFC_NS_QRY) { 4764 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4765 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 4766 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4767 4768 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4769 return 0; 4770 } 4771 4772 /* If this RSCN just contains NPortIDs for other vports on this HBA, 4773 * just ACC and ignore it. 4774 */ 4775 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4776 !(vport->cfg_peer_port_login)) { 4777 i = payload_len; 4778 datap = lp; 4779 while (i > 0) { 4780 nportid = *datap++; 4781 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 4782 i -= sizeof(uint32_t); 4783 rscn_id++; 4784 if (lpfc_find_vport_by_did(phba, nportid)) 4785 hba_id++; 4786 } 4787 if (rscn_id == hba_id) { 4788 /* ALL NPortIDs in RSCN are on HBA */ 4789 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4790 "0219 Ignore RSCN " 4791 "Data: x%x x%x x%x x%x\n", 4792 vport->fc_flag, payload_len, 4793 *lp, vport->fc_rscn_id_cnt); 4794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4795 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 4796 ndlp->nlp_DID, vport->port_state, 4797 ndlp->nlp_flag); 4798 4799 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 4800 ndlp, NULL); 4801 return 0; 4802 } 4803 } 4804 4805 spin_lock_irq(shost->host_lock); 4806 if (vport->fc_rscn_flush) { 4807 /* Another thread is walking fc_rscn_id_list on this vport */ 4808 vport->fc_flag |= FC_RSCN_DISCOVERY; 4809 spin_unlock_irq(shost->host_lock); 4810 /* Send back ACC */ 4811 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4812 return 0; 4813 } 4814 /* Indicate we are walking fc_rscn_id_list on this vport */ 4815 vport->fc_rscn_flush = 1; 4816 spin_unlock_irq(shost->host_lock); 4817 /* Get the array count after successfully have the token */ 4818 rscn_cnt = vport->fc_rscn_id_cnt; 4819 /* If we are already processing an RSCN, save the received 4820 * RSCN payload buffer, cmdiocb->context2 to process later. 4821 */ 4822 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 4823 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4824 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 4825 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4826 4827 spin_lock_irq(shost->host_lock); 4828 vport->fc_flag |= FC_RSCN_DEFERRED; 4829 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 4830 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 4831 vport->fc_flag |= FC_RSCN_MODE; 4832 spin_unlock_irq(shost->host_lock); 4833 if (rscn_cnt) { 4834 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 4835 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 4836 } 4837 if ((rscn_cnt) && 4838 (payload_len + length <= LPFC_BPL_SIZE)) { 4839 *cmd &= ELS_CMD_MASK; 4840 *cmd |= cpu_to_be32(payload_len + length); 4841 memcpy(((uint8_t *)cmd) + length, lp, 4842 payload_len); 4843 } else { 4844 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 4845 vport->fc_rscn_id_cnt++; 4846 /* If we zero, cmdiocb->context2, the calling 4847 * routine will not try to free it. 4848 */ 4849 cmdiocb->context2 = NULL; 4850 } 4851 /* Deferred RSCN */ 4852 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4853 "0235 Deferred RSCN " 4854 "Data: x%x x%x x%x\n", 4855 vport->fc_rscn_id_cnt, vport->fc_flag, 4856 vport->port_state); 4857 } else { 4858 vport->fc_flag |= FC_RSCN_DISCOVERY; 4859 spin_unlock_irq(shost->host_lock); 4860 /* ReDiscovery RSCN */ 4861 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4862 "0234 ReDiscovery RSCN " 4863 "Data: x%x x%x x%x\n", 4864 vport->fc_rscn_id_cnt, vport->fc_flag, 4865 vport->port_state); 4866 } 4867 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4868 vport->fc_rscn_flush = 0; 4869 /* Send back ACC */ 4870 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4871 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4872 lpfc_rscn_recovery_check(vport); 4873 spin_lock_irq(shost->host_lock); 4874 vport->fc_flag &= ~FC_RSCN_DEFERRED; 4875 spin_unlock_irq(shost->host_lock); 4876 return 0; 4877 } 4878 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4879 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 4880 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4881 4882 spin_lock_irq(shost->host_lock); 4883 vport->fc_flag |= FC_RSCN_MODE; 4884 spin_unlock_irq(shost->host_lock); 4885 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 4886 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4887 vport->fc_rscn_flush = 0; 4888 /* 4889 * If we zero, cmdiocb->context2, the calling routine will 4890 * not try to free it. 4891 */ 4892 cmdiocb->context2 = NULL; 4893 lpfc_set_disctmo(vport); 4894 /* Send back ACC */ 4895 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4896 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4897 lpfc_rscn_recovery_check(vport); 4898 return lpfc_els_handle_rscn(vport); 4899 } 4900 4901 /** 4902 * lpfc_els_handle_rscn - Handle rscn for a vport 4903 * @vport: pointer to a host virtual N_Port data structure. 4904 * 4905 * This routine handles the Registration State Configuration Notification 4906 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 4907 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 4908 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 4909 * NameServer shall be issued. If CT command to the NameServer fails to be 4910 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 4911 * RSCN activities with the @vport. 4912 * 4913 * Return code 4914 * 0 - Cleaned up rscn on the @vport 4915 * 1 - Wait for plogi to name server before proceed 4916 **/ 4917 int 4918 lpfc_els_handle_rscn(struct lpfc_vport *vport) 4919 { 4920 struct lpfc_nodelist *ndlp; 4921 struct lpfc_hba *phba = vport->phba; 4922 4923 /* Ignore RSCN if the port is being torn down. */ 4924 if (vport->load_flag & FC_UNLOADING) { 4925 lpfc_els_flush_rscn(vport); 4926 return 0; 4927 } 4928 4929 /* Start timer for RSCN processing */ 4930 lpfc_set_disctmo(vport); 4931 4932 /* RSCN processed */ 4933 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4934 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 4935 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 4936 vport->port_state); 4937 4938 /* To process RSCN, first compare RSCN data with NameServer */ 4939 vport->fc_ns_retry = 0; 4940 vport->num_disc_nodes = 0; 4941 4942 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4943 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 4944 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 4945 /* Good ndlp, issue CT Request to NameServer */ 4946 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 4947 /* Wait for NameServer query cmpl before we can 4948 continue */ 4949 return 1; 4950 } else { 4951 /* If login to NameServer does not exist, issue one */ 4952 /* Good status, issue PLOGI to NameServer */ 4953 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4954 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 4955 /* Wait for NameServer login cmpl before we can 4956 continue */ 4957 return 1; 4958 4959 if (ndlp) { 4960 ndlp = lpfc_enable_node(vport, ndlp, 4961 NLP_STE_PLOGI_ISSUE); 4962 if (!ndlp) { 4963 lpfc_els_flush_rscn(vport); 4964 return 0; 4965 } 4966 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 4967 } else { 4968 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4969 if (!ndlp) { 4970 lpfc_els_flush_rscn(vport); 4971 return 0; 4972 } 4973 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4974 ndlp->nlp_prev_state = ndlp->nlp_state; 4975 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4976 } 4977 ndlp->nlp_type |= NLP_FABRIC; 4978 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 4979 /* Wait for NameServer login cmpl before we can 4980 * continue 4981 */ 4982 return 1; 4983 } 4984 4985 lpfc_els_flush_rscn(vport); 4986 return 0; 4987 } 4988 4989 /** 4990 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 4991 * @vport: pointer to a host virtual N_Port data structure. 4992 * @cmdiocb: pointer to lpfc command iocb data structure. 4993 * @ndlp: pointer to a node-list data structure. 4994 * 4995 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 4996 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 4997 * point topology. As an unsolicited FLOGI should not be received in a loop 4998 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 4999 * lpfc_check_sparm() routine is invoked to check the parameters in the 5000 * unsolicited FLOGI. If parameters validation failed, the routine 5001 * lpfc_els_rsp_reject() shall be called with reject reason code set to 5002 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 5003 * FLOGI shall be compared with the Port WWN of the @vport to determine who 5004 * will initiate PLOGI. The higher lexicographical value party shall has 5005 * higher priority (as the winning port) and will initiate PLOGI and 5006 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 5007 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 5008 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 5009 * 5010 * Return code 5011 * 0 - Successfully processed the unsolicited flogi 5012 * 1 - Failed to process the unsolicited flogi 5013 **/ 5014 static int 5015 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5016 struct lpfc_nodelist *ndlp) 5017 { 5018 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5019 struct lpfc_hba *phba = vport->phba; 5020 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5021 uint32_t *lp = (uint32_t *) pcmd->virt; 5022 IOCB_t *icmd = &cmdiocb->iocb; 5023 struct serv_parm *sp; 5024 LPFC_MBOXQ_t *mbox; 5025 struct ls_rjt stat; 5026 uint32_t cmd, did; 5027 int rc; 5028 5029 cmd = *lp++; 5030 sp = (struct serv_parm *) lp; 5031 5032 /* FLOGI received */ 5033 5034 lpfc_set_disctmo(vport); 5035 5036 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 5037 /* We should never receive a FLOGI in loop mode, ignore it */ 5038 did = icmd->un.elsreq64.remoteID; 5039 5040 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 5041 Loop Mode */ 5042 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 5043 "0113 An FLOGI ELS command x%x was " 5044 "received from DID x%x in Loop Mode\n", 5045 cmd, did); 5046 return 1; 5047 } 5048 5049 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { 5050 /* For a FLOGI we accept, then if our portname is greater 5051 * then the remote portname we initiate Nport login. 5052 */ 5053 5054 rc = memcmp(&vport->fc_portname, &sp->portName, 5055 sizeof(struct lpfc_name)); 5056 5057 if (!rc) { 5058 if (phba->sli_rev < LPFC_SLI_REV4) { 5059 mbox = mempool_alloc(phba->mbox_mem_pool, 5060 GFP_KERNEL); 5061 if (!mbox) 5062 return 1; 5063 lpfc_linkdown(phba); 5064 lpfc_init_link(phba, mbox, 5065 phba->cfg_topology, 5066 phba->cfg_link_speed); 5067 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 5068 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5069 mbox->vport = vport; 5070 rc = lpfc_sli_issue_mbox(phba, mbox, 5071 MBX_NOWAIT); 5072 lpfc_set_loopback_flag(phba); 5073 if (rc == MBX_NOT_FINISHED) 5074 mempool_free(mbox, phba->mbox_mem_pool); 5075 return 1; 5076 } else { 5077 /* abort the flogi coming back to ourselves 5078 * due to external loopback on the port. 5079 */ 5080 lpfc_els_abort_flogi(phba); 5081 return 0; 5082 } 5083 } else if (rc > 0) { /* greater than */ 5084 spin_lock_irq(shost->host_lock); 5085 vport->fc_flag |= FC_PT2PT_PLOGI; 5086 spin_unlock_irq(shost->host_lock); 5087 5088 /* If we have the high WWPN we can assign our own 5089 * myDID; otherwise, we have to WAIT for a PLOGI 5090 * from the remote NPort to find out what it 5091 * will be. 5092 */ 5093 vport->fc_myDID = PT2PT_LocalID; 5094 } 5095 5096 /* 5097 * The vport state should go to LPFC_FLOGI only 5098 * AFTER we issue a FLOGI, not receive one. 5099 */ 5100 spin_lock_irq(shost->host_lock); 5101 vport->fc_flag |= FC_PT2PT; 5102 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5103 spin_unlock_irq(shost->host_lock); 5104 5105 /* 5106 * We temporarily set fc_myDID to make it look like we are 5107 * a Fabric. This is done just so we end up with the right 5108 * did / sid on the FLOGI ACC rsp. 5109 */ 5110 did = vport->fc_myDID; 5111 vport->fc_myDID = Fabric_DID; 5112 5113 } else { 5114 /* Reject this request because invalid parameters */ 5115 stat.un.b.lsRjtRsvd0 = 0; 5116 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5117 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 5118 stat.un.b.vendorUnique = 0; 5119 5120 /* 5121 * We temporarily set fc_myDID to make it look like we are 5122 * a Fabric. This is done just so we end up with the right 5123 * did / sid on the FLOGI LS_RJT rsp. 5124 */ 5125 did = vport->fc_myDID; 5126 vport->fc_myDID = Fabric_DID; 5127 5128 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5129 NULL); 5130 5131 /* Now lets put fc_myDID back to what its supposed to be */ 5132 vport->fc_myDID = did; 5133 5134 return 1; 5135 } 5136 5137 /* Send back ACC */ 5138 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 5139 5140 /* Now lets put fc_myDID back to what its supposed to be */ 5141 vport->fc_myDID = did; 5142 5143 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 5144 5145 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5146 if (!mbox) 5147 goto fail; 5148 5149 lpfc_config_link(phba, mbox); 5150 5151 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5152 mbox->vport = vport; 5153 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5154 if (rc == MBX_NOT_FINISHED) { 5155 mempool_free(mbox, phba->mbox_mem_pool); 5156 goto fail; 5157 } 5158 } 5159 5160 return 0; 5161 fail: 5162 return 1; 5163 } 5164 5165 /** 5166 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 5167 * @vport: pointer to a host virtual N_Port data structure. 5168 * @cmdiocb: pointer to lpfc command iocb data structure. 5169 * @ndlp: pointer to a node-list data structure. 5170 * 5171 * This routine processes Request Node Identification Data (RNID) IOCB 5172 * received as an ELS unsolicited event. Only when the RNID specified format 5173 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 5174 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 5175 * Accept (ACC) the RNID ELS command. All the other RNID formats are 5176 * rejected by invoking the lpfc_els_rsp_reject() routine. 5177 * 5178 * Return code 5179 * 0 - Successfully processed rnid iocb (currently always return 0) 5180 **/ 5181 static int 5182 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5183 struct lpfc_nodelist *ndlp) 5184 { 5185 struct lpfc_dmabuf *pcmd; 5186 uint32_t *lp; 5187 IOCB_t *icmd; 5188 RNID *rn; 5189 struct ls_rjt stat; 5190 uint32_t cmd, did; 5191 5192 icmd = &cmdiocb->iocb; 5193 did = icmd->un.elsreq64.remoteID; 5194 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5195 lp = (uint32_t *) pcmd->virt; 5196 5197 cmd = *lp++; 5198 rn = (RNID *) lp; 5199 5200 /* RNID received */ 5201 5202 switch (rn->Format) { 5203 case 0: 5204 case RNID_TOPOLOGY_DISC: 5205 /* Send back ACC */ 5206 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 5207 break; 5208 default: 5209 /* Reject this request because format not supported */ 5210 stat.un.b.lsRjtRsvd0 = 0; 5211 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5212 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5213 stat.un.b.vendorUnique = 0; 5214 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5215 NULL); 5216 } 5217 return 0; 5218 } 5219 5220 /** 5221 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 5222 * @vport: pointer to a host virtual N_Port data structure. 5223 * @cmdiocb: pointer to lpfc command iocb data structure. 5224 * @ndlp: pointer to a node-list data structure. 5225 * 5226 * Return code 5227 * 0 - Successfully processed echo iocb (currently always return 0) 5228 **/ 5229 static int 5230 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5231 struct lpfc_nodelist *ndlp) 5232 { 5233 uint8_t *pcmd; 5234 5235 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 5236 5237 /* skip over first word of echo command to find echo data */ 5238 pcmd += sizeof(uint32_t); 5239 5240 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 5241 return 0; 5242 } 5243 5244 /** 5245 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 5246 * @vport: pointer to a host virtual N_Port data structure. 5247 * @cmdiocb: pointer to lpfc command iocb data structure. 5248 * @ndlp: pointer to a node-list data structure. 5249 * 5250 * This routine processes a Link Incident Report Registration(LIRR) IOCB 5251 * received as an ELS unsolicited event. Currently, this function just invokes 5252 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 5253 * 5254 * Return code 5255 * 0 - Successfully processed lirr iocb (currently always return 0) 5256 **/ 5257 static int 5258 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5259 struct lpfc_nodelist *ndlp) 5260 { 5261 struct ls_rjt stat; 5262 5263 /* For now, unconditionally reject this command */ 5264 stat.un.b.lsRjtRsvd0 = 0; 5265 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5266 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5267 stat.un.b.vendorUnique = 0; 5268 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5269 return 0; 5270 } 5271 5272 /** 5273 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 5274 * @vport: pointer to a host virtual N_Port data structure. 5275 * @cmdiocb: pointer to lpfc command iocb data structure. 5276 * @ndlp: pointer to a node-list data structure. 5277 * 5278 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 5279 * received as an ELS unsolicited event. A request to RRQ shall only 5280 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 5281 * Nx_Port N_Port_ID of the target Exchange is the same as the 5282 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 5283 * not accepted, an LS_RJT with reason code "Unable to perform 5284 * command request" and reason code explanation "Invalid Originator 5285 * S_ID" shall be returned. For now, we just unconditionally accept 5286 * RRQ from the target. 5287 **/ 5288 static void 5289 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5290 struct lpfc_nodelist *ndlp) 5291 { 5292 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5293 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5294 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 5295 } 5296 5297 /** 5298 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5299 * @phba: pointer to lpfc hba data structure. 5300 * @pmb: pointer to the driver internal queue element for mailbox command. 5301 * 5302 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5303 * mailbox command. This callback function is to actually send the Accept 5304 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5305 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5306 * mailbox command, constructs the RPS response with the link statistics 5307 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5308 * response to the RPS. 5309 * 5310 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5311 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5312 * will be stored into the context1 field of the IOCB for the completion 5313 * callback function to the RPS Accept Response ELS IOCB command. 5314 * 5315 **/ 5316 static void 5317 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5318 { 5319 MAILBOX_t *mb; 5320 IOCB_t *icmd; 5321 struct RLS_RSP *rls_rsp; 5322 uint8_t *pcmd; 5323 struct lpfc_iocbq *elsiocb; 5324 struct lpfc_nodelist *ndlp; 5325 uint16_t oxid; 5326 uint16_t rxid; 5327 uint32_t cmdsize; 5328 5329 mb = &pmb->u.mb; 5330 5331 ndlp = (struct lpfc_nodelist *) pmb->context2; 5332 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5333 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5334 pmb->context1 = NULL; 5335 pmb->context2 = NULL; 5336 5337 if (mb->mbxStatus) { 5338 mempool_free(pmb, phba->mbox_mem_pool); 5339 return; 5340 } 5341 5342 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 5343 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5344 lpfc_max_els_tries, ndlp, 5345 ndlp->nlp_DID, ELS_CMD_ACC); 5346 5347 /* Decrement the ndlp reference count from previous mbox command */ 5348 lpfc_nlp_put(ndlp); 5349 5350 if (!elsiocb) { 5351 mempool_free(pmb, phba->mbox_mem_pool); 5352 return; 5353 } 5354 5355 icmd = &elsiocb->iocb; 5356 icmd->ulpContext = rxid; 5357 icmd->unsli3.rcvsli3.ox_id = oxid; 5358 5359 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5360 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5361 pcmd += sizeof(uint32_t); /* Skip past command */ 5362 rls_rsp = (struct RLS_RSP *)pcmd; 5363 5364 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5365 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5366 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5367 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5368 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5369 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5370 mempool_free(pmb, phba->mbox_mem_pool); 5371 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5372 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5373 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 5374 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5375 elsiocb->iotag, elsiocb->iocb.ulpContext, 5376 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5377 ndlp->nlp_rpi); 5378 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5379 phba->fc_stat.elsXmitACC++; 5380 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5381 lpfc_els_free_iocb(phba, elsiocb); 5382 } 5383 5384 /** 5385 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5386 * @phba: pointer to lpfc hba data structure. 5387 * @pmb: pointer to the driver internal queue element for mailbox command. 5388 * 5389 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5390 * mailbox command. This callback function is to actually send the Accept 5391 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5392 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5393 * mailbox command, constructs the RPS response with the link statistics 5394 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5395 * response to the RPS. 5396 * 5397 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5398 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5399 * will be stored into the context1 field of the IOCB for the completion 5400 * callback function to the RPS Accept Response ELS IOCB command. 5401 * 5402 **/ 5403 static void 5404 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5405 { 5406 MAILBOX_t *mb; 5407 IOCB_t *icmd; 5408 RPS_RSP *rps_rsp; 5409 uint8_t *pcmd; 5410 struct lpfc_iocbq *elsiocb; 5411 struct lpfc_nodelist *ndlp; 5412 uint16_t status; 5413 uint16_t oxid; 5414 uint16_t rxid; 5415 uint32_t cmdsize; 5416 5417 mb = &pmb->u.mb; 5418 5419 ndlp = (struct lpfc_nodelist *) pmb->context2; 5420 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5421 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5422 pmb->context1 = NULL; 5423 pmb->context2 = NULL; 5424 5425 if (mb->mbxStatus) { 5426 mempool_free(pmb, phba->mbox_mem_pool); 5427 return; 5428 } 5429 5430 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 5431 mempool_free(pmb, phba->mbox_mem_pool); 5432 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5433 lpfc_max_els_tries, ndlp, 5434 ndlp->nlp_DID, ELS_CMD_ACC); 5435 5436 /* Decrement the ndlp reference count from previous mbox command */ 5437 lpfc_nlp_put(ndlp); 5438 5439 if (!elsiocb) 5440 return; 5441 5442 icmd = &elsiocb->iocb; 5443 icmd->ulpContext = rxid; 5444 icmd->unsli3.rcvsli3.ox_id = oxid; 5445 5446 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5447 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5448 pcmd += sizeof(uint32_t); /* Skip past command */ 5449 rps_rsp = (RPS_RSP *)pcmd; 5450 5451 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5452 status = 0x10; 5453 else 5454 status = 0x8; 5455 if (phba->pport->fc_flag & FC_FABRIC) 5456 status |= 0x4; 5457 5458 rps_rsp->rsvd1 = 0; 5459 rps_rsp->portStatus = cpu_to_be16(status); 5460 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5461 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5462 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5463 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5464 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5465 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5466 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 5467 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5468 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 5469 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5470 elsiocb->iotag, elsiocb->iocb.ulpContext, 5471 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5472 ndlp->nlp_rpi); 5473 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5474 phba->fc_stat.elsXmitACC++; 5475 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5476 lpfc_els_free_iocb(phba, elsiocb); 5477 return; 5478 } 5479 5480 /** 5481 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 5482 * @vport: pointer to a host virtual N_Port data structure. 5483 * @cmdiocb: pointer to lpfc command iocb data structure. 5484 * @ndlp: pointer to a node-list data structure. 5485 * 5486 * This routine processes Read Port Status (RPL) IOCB received as an 5487 * ELS unsolicited event. It first checks the remote port state. If the 5488 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5489 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5490 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5491 * for reading the HBA link statistics. It is for the callback function, 5492 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 5493 * to actually sending out RPL Accept (ACC) response. 5494 * 5495 * Return codes 5496 * 0 - Successfully processed rls iocb (currently always return 0) 5497 **/ 5498 static int 5499 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5500 struct lpfc_nodelist *ndlp) 5501 { 5502 struct lpfc_hba *phba = vport->phba; 5503 LPFC_MBOXQ_t *mbox; 5504 struct lpfc_dmabuf *pcmd; 5505 struct ls_rjt stat; 5506 5507 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5508 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5509 /* reject the unsolicited RPS request and done with it */ 5510 goto reject_out; 5511 5512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5513 5514 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5515 if (mbox) { 5516 lpfc_read_lnk_stat(phba, mbox); 5517 mbox->context1 = (void *)((unsigned long) 5518 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5519 cmdiocb->iocb.ulpContext)); /* rx_id */ 5520 mbox->context2 = lpfc_nlp_get(ndlp); 5521 mbox->vport = vport; 5522 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5523 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5524 != MBX_NOT_FINISHED) 5525 /* Mbox completion will send ELS Response */ 5526 return 0; 5527 /* Decrement reference count used for the failed mbox 5528 * command. 5529 */ 5530 lpfc_nlp_put(ndlp); 5531 mempool_free(mbox, phba->mbox_mem_pool); 5532 } 5533 reject_out: 5534 /* issue rejection response */ 5535 stat.un.b.lsRjtRsvd0 = 0; 5536 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5537 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5538 stat.un.b.vendorUnique = 0; 5539 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5540 return 0; 5541 } 5542 5543 /** 5544 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 5545 * @vport: pointer to a host virtual N_Port data structure. 5546 * @cmdiocb: pointer to lpfc command iocb data structure. 5547 * @ndlp: pointer to a node-list data structure. 5548 * 5549 * This routine processes Read Timout Value (RTV) IOCB received as an 5550 * ELS unsolicited event. It first checks the remote port state. If the 5551 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5552 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5553 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 5554 * Value (RTV) unsolicited IOCB event. 5555 * 5556 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5557 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5558 * will be stored into the context1 field of the IOCB for the completion 5559 * callback function to the RPS Accept Response ELS IOCB command. 5560 * 5561 * Return codes 5562 * 0 - Successfully processed rtv iocb (currently always return 0) 5563 **/ 5564 static int 5565 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5566 struct lpfc_nodelist *ndlp) 5567 { 5568 struct lpfc_hba *phba = vport->phba; 5569 struct ls_rjt stat; 5570 struct RTV_RSP *rtv_rsp; 5571 uint8_t *pcmd; 5572 struct lpfc_iocbq *elsiocb; 5573 uint32_t cmdsize; 5574 5575 5576 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5577 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5578 /* reject the unsolicited RPS request and done with it */ 5579 goto reject_out; 5580 5581 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 5582 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5583 lpfc_max_els_tries, ndlp, 5584 ndlp->nlp_DID, ELS_CMD_ACC); 5585 5586 if (!elsiocb) 5587 return 1; 5588 5589 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5590 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5591 pcmd += sizeof(uint32_t); /* Skip past command */ 5592 5593 /* use the command's xri in the response */ 5594 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 5595 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 5596 5597 rtv_rsp = (struct RTV_RSP *)pcmd; 5598 5599 /* populate RTV payload */ 5600 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 5601 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 5602 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 5603 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 5604 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 5605 5606 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5607 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5608 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 5609 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 5610 "Data: x%x x%x x%x\n", 5611 elsiocb->iotag, elsiocb->iocb.ulpContext, 5612 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5613 ndlp->nlp_rpi, 5614 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 5615 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5616 phba->fc_stat.elsXmitACC++; 5617 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5618 lpfc_els_free_iocb(phba, elsiocb); 5619 return 0; 5620 5621 reject_out: 5622 /* issue rejection response */ 5623 stat.un.b.lsRjtRsvd0 = 0; 5624 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5625 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5626 stat.un.b.vendorUnique = 0; 5627 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5628 return 0; 5629 } 5630 5631 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 5632 * @vport: pointer to a host virtual N_Port data structure. 5633 * @cmdiocb: pointer to lpfc command iocb data structure. 5634 * @ndlp: pointer to a node-list data structure. 5635 * 5636 * This routine processes Read Port Status (RPS) IOCB received as an 5637 * ELS unsolicited event. It first checks the remote port state. If the 5638 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5639 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 5640 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5641 * for reading the HBA link statistics. It is for the callback function, 5642 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 5643 * to actually sending out RPS Accept (ACC) response. 5644 * 5645 * Return codes 5646 * 0 - Successfully processed rps iocb (currently always return 0) 5647 **/ 5648 static int 5649 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5650 struct lpfc_nodelist *ndlp) 5651 { 5652 struct lpfc_hba *phba = vport->phba; 5653 uint32_t *lp; 5654 uint8_t flag; 5655 LPFC_MBOXQ_t *mbox; 5656 struct lpfc_dmabuf *pcmd; 5657 RPS *rps; 5658 struct ls_rjt stat; 5659 5660 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5661 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5662 /* reject the unsolicited RPS request and done with it */ 5663 goto reject_out; 5664 5665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5666 lp = (uint32_t *) pcmd->virt; 5667 flag = (be32_to_cpu(*lp++) & 0xf); 5668 rps = (RPS *) lp; 5669 5670 if ((flag == 0) || 5671 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 5672 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 5673 sizeof(struct lpfc_name)) == 0))) { 5674 5675 printk("Fix me....\n"); 5676 dump_stack(); 5677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5678 if (mbox) { 5679 lpfc_read_lnk_stat(phba, mbox); 5680 mbox->context1 = (void *)((unsigned long) 5681 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5682 cmdiocb->iocb.ulpContext)); /* rx_id */ 5683 mbox->context2 = lpfc_nlp_get(ndlp); 5684 mbox->vport = vport; 5685 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5686 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5687 != MBX_NOT_FINISHED) 5688 /* Mbox completion will send ELS Response */ 5689 return 0; 5690 /* Decrement reference count used for the failed mbox 5691 * command. 5692 */ 5693 lpfc_nlp_put(ndlp); 5694 mempool_free(mbox, phba->mbox_mem_pool); 5695 } 5696 } 5697 5698 reject_out: 5699 /* issue rejection response */ 5700 stat.un.b.lsRjtRsvd0 = 0; 5701 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5702 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5703 stat.un.b.vendorUnique = 0; 5704 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5705 return 0; 5706 } 5707 5708 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 5709 * @vport: pointer to a host virtual N_Port data structure. 5710 * @ndlp: pointer to a node-list data structure. 5711 * @did: DID of the target. 5712 * @rrq: Pointer to the rrq struct. 5713 * 5714 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 5715 * Successful the the completion handler will clear the RRQ. 5716 * 5717 * Return codes 5718 * 0 - Successfully sent rrq els iocb. 5719 * 1 - Failed to send rrq els iocb. 5720 **/ 5721 static int 5722 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5723 uint32_t did, struct lpfc_node_rrq *rrq) 5724 { 5725 struct lpfc_hba *phba = vport->phba; 5726 struct RRQ *els_rrq; 5727 IOCB_t *icmd; 5728 struct lpfc_iocbq *elsiocb; 5729 uint8_t *pcmd; 5730 uint16_t cmdsize; 5731 int ret; 5732 5733 5734 if (ndlp != rrq->ndlp) 5735 ndlp = rrq->ndlp; 5736 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 5737 return 1; 5738 5739 /* If ndlp is not NULL, we will bump the reference count on it */ 5740 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 5741 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 5742 ELS_CMD_RRQ); 5743 if (!elsiocb) 5744 return 1; 5745 5746 icmd = &elsiocb->iocb; 5747 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5748 5749 /* For RRQ request, remainder of payload is Exchange IDs */ 5750 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 5751 pcmd += sizeof(uint32_t); 5752 els_rrq = (struct RRQ *) pcmd; 5753 5754 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 5755 bf_set(rrq_rxid, els_rrq, rrq->rxid); 5756 bf_set(rrq_did, els_rrq, vport->fc_myDID); 5757 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 5758 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 5759 5760 5761 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5762 "Issue RRQ: did:x%x", 5763 did, rrq->xritag, rrq->rxid); 5764 elsiocb->context_un.rrq = rrq; 5765 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 5766 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5767 5768 if (ret == IOCB_ERROR) { 5769 lpfc_els_free_iocb(phba, elsiocb); 5770 return 1; 5771 } 5772 return 0; 5773 } 5774 5775 /** 5776 * lpfc_send_rrq - Sends ELS RRQ if needed. 5777 * @phba: pointer to lpfc hba data structure. 5778 * @rrq: pointer to the active rrq. 5779 * 5780 * This routine will call the lpfc_issue_els_rrq if the rrq is 5781 * still active for the xri. If this function returns a failure then 5782 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 5783 * 5784 * Returns 0 Success. 5785 * 1 Failure. 5786 **/ 5787 int 5788 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 5789 { 5790 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 5791 rrq->nlp_DID); 5792 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 5793 return lpfc_issue_els_rrq(rrq->vport, ndlp, 5794 rrq->nlp_DID, rrq); 5795 else 5796 return 1; 5797 } 5798 5799 /** 5800 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 5801 * @vport: pointer to a host virtual N_Port data structure. 5802 * @cmdsize: size of the ELS command. 5803 * @oldiocb: pointer to the original lpfc command iocb data structure. 5804 * @ndlp: pointer to a node-list data structure. 5805 * 5806 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 5807 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 5808 * 5809 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5810 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5811 * will be stored into the context1 field of the IOCB for the completion 5812 * callback function to the RPL Accept Response ELS command. 5813 * 5814 * Return code 5815 * 0 - Successfully issued ACC RPL ELS command 5816 * 1 - Failed to issue ACC RPL ELS command 5817 **/ 5818 static int 5819 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 5820 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5821 { 5822 struct lpfc_hba *phba = vport->phba; 5823 IOCB_t *icmd, *oldcmd; 5824 RPL_RSP rpl_rsp; 5825 struct lpfc_iocbq *elsiocb; 5826 uint8_t *pcmd; 5827 5828 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5829 ndlp->nlp_DID, ELS_CMD_ACC); 5830 5831 if (!elsiocb) 5832 return 1; 5833 5834 icmd = &elsiocb->iocb; 5835 oldcmd = &oldiocb->iocb; 5836 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5837 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5838 5839 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5840 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5841 pcmd += sizeof(uint16_t); 5842 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 5843 pcmd += sizeof(uint16_t); 5844 5845 /* Setup the RPL ACC payload */ 5846 rpl_rsp.listLen = be32_to_cpu(1); 5847 rpl_rsp.index = 0; 5848 rpl_rsp.port_num_blk.portNum = 0; 5849 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 5850 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 5851 sizeof(struct lpfc_name)); 5852 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 5853 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 5854 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5855 "0120 Xmit ELS RPL ACC response tag x%x " 5856 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5857 "rpi x%x\n", 5858 elsiocb->iotag, elsiocb->iocb.ulpContext, 5859 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5860 ndlp->nlp_rpi); 5861 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5862 phba->fc_stat.elsXmitACC++; 5863 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 5864 IOCB_ERROR) { 5865 lpfc_els_free_iocb(phba, elsiocb); 5866 return 1; 5867 } 5868 return 0; 5869 } 5870 5871 /** 5872 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 5873 * @vport: pointer to a host virtual N_Port data structure. 5874 * @cmdiocb: pointer to lpfc command iocb data structure. 5875 * @ndlp: pointer to a node-list data structure. 5876 * 5877 * This routine processes Read Port List (RPL) IOCB received as an ELS 5878 * unsolicited event. It first checks the remote port state. If the remote 5879 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 5880 * invokes the lpfc_els_rsp_reject() routine to send reject response. 5881 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 5882 * to accept the RPL. 5883 * 5884 * Return code 5885 * 0 - Successfully processed rpl iocb (currently always return 0) 5886 **/ 5887 static int 5888 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5889 struct lpfc_nodelist *ndlp) 5890 { 5891 struct lpfc_dmabuf *pcmd; 5892 uint32_t *lp; 5893 uint32_t maxsize; 5894 uint16_t cmdsize; 5895 RPL *rpl; 5896 struct ls_rjt stat; 5897 5898 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5899 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 5900 /* issue rejection response */ 5901 stat.un.b.lsRjtRsvd0 = 0; 5902 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5903 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5904 stat.un.b.vendorUnique = 0; 5905 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5906 NULL); 5907 /* rejected the unsolicited RPL request and done with it */ 5908 return 0; 5909 } 5910 5911 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5912 lp = (uint32_t *) pcmd->virt; 5913 rpl = (RPL *) (lp + 1); 5914 maxsize = be32_to_cpu(rpl->maxsize); 5915 5916 /* We support only one port */ 5917 if ((rpl->index == 0) && 5918 ((maxsize == 0) || 5919 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 5920 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 5921 } else { 5922 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 5923 } 5924 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 5925 5926 return 0; 5927 } 5928 5929 /** 5930 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 5931 * @vport: pointer to a virtual N_Port data structure. 5932 * @cmdiocb: pointer to lpfc command iocb data structure. 5933 * @ndlp: pointer to a node-list data structure. 5934 * 5935 * This routine processes Fibre Channel Address Resolution Protocol 5936 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 5937 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 5938 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 5939 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 5940 * remote PortName is compared against the FC PortName stored in the @vport 5941 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 5942 * compared against the FC NodeName stored in the @vport data structure. 5943 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 5944 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 5945 * invoked to send out FARP Response to the remote node. Before sending the 5946 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 5947 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 5948 * routine is invoked to log into the remote port first. 5949 * 5950 * Return code 5951 * 0 - Either the FARP Match Mode not supported or successfully processed 5952 **/ 5953 static int 5954 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5955 struct lpfc_nodelist *ndlp) 5956 { 5957 struct lpfc_dmabuf *pcmd; 5958 uint32_t *lp; 5959 IOCB_t *icmd; 5960 FARP *fp; 5961 uint32_t cmd, cnt, did; 5962 5963 icmd = &cmdiocb->iocb; 5964 did = icmd->un.elsreq64.remoteID; 5965 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5966 lp = (uint32_t *) pcmd->virt; 5967 5968 cmd = *lp++; 5969 fp = (FARP *) lp; 5970 /* FARP-REQ received from DID <did> */ 5971 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5972 "0601 FARP-REQ received from DID x%x\n", did); 5973 /* We will only support match on WWPN or WWNN */ 5974 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 5975 return 0; 5976 } 5977 5978 cnt = 0; 5979 /* If this FARP command is searching for my portname */ 5980 if (fp->Mflags & FARP_MATCH_PORT) { 5981 if (memcmp(&fp->RportName, &vport->fc_portname, 5982 sizeof(struct lpfc_name)) == 0) 5983 cnt = 1; 5984 } 5985 5986 /* If this FARP command is searching for my nodename */ 5987 if (fp->Mflags & FARP_MATCH_NODE) { 5988 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 5989 sizeof(struct lpfc_name)) == 0) 5990 cnt = 1; 5991 } 5992 5993 if (cnt) { 5994 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 5995 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 5996 /* Log back into the node before sending the FARP. */ 5997 if (fp->Rflags & FARP_REQUEST_PLOGI) { 5998 ndlp->nlp_prev_state = ndlp->nlp_state; 5999 lpfc_nlp_set_state(vport, ndlp, 6000 NLP_STE_PLOGI_ISSUE); 6001 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6002 } 6003 6004 /* Send a FARP response to that node */ 6005 if (fp->Rflags & FARP_REQUEST_FARPR) 6006 lpfc_issue_els_farpr(vport, did, 0); 6007 } 6008 } 6009 return 0; 6010 } 6011 6012 /** 6013 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 6014 * @vport: pointer to a host virtual N_Port data structure. 6015 * @cmdiocb: pointer to lpfc command iocb data structure. 6016 * @ndlp: pointer to a node-list data structure. 6017 * 6018 * This routine processes Fibre Channel Address Resolution Protocol 6019 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 6020 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 6021 * the FARP response request. 6022 * 6023 * Return code 6024 * 0 - Successfully processed FARPR IOCB (currently always return 0) 6025 **/ 6026 static int 6027 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6028 struct lpfc_nodelist *ndlp) 6029 { 6030 struct lpfc_dmabuf *pcmd; 6031 uint32_t *lp; 6032 IOCB_t *icmd; 6033 uint32_t cmd, did; 6034 6035 icmd = &cmdiocb->iocb; 6036 did = icmd->un.elsreq64.remoteID; 6037 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6038 lp = (uint32_t *) pcmd->virt; 6039 6040 cmd = *lp++; 6041 /* FARP-RSP received from DID <did> */ 6042 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6043 "0600 FARP-RSP received from DID x%x\n", did); 6044 /* ACCEPT the Farp resp request */ 6045 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6046 6047 return 0; 6048 } 6049 6050 /** 6051 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 6052 * @vport: pointer to a host virtual N_Port data structure. 6053 * @cmdiocb: pointer to lpfc command iocb data structure. 6054 * @fan_ndlp: pointer to a node-list data structure. 6055 * 6056 * This routine processes a Fabric Address Notification (FAN) IOCB 6057 * command received as an ELS unsolicited event. The FAN ELS command will 6058 * only be processed on a physical port (i.e., the @vport represents the 6059 * physical port). The fabric NodeName and PortName from the FAN IOCB are 6060 * compared against those in the phba data structure. If any of those is 6061 * different, the lpfc_initial_flogi() routine is invoked to initialize 6062 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 6063 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 6064 * is invoked to register login to the fabric. 6065 * 6066 * Return code 6067 * 0 - Successfully processed fan iocb (currently always return 0). 6068 **/ 6069 static int 6070 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6071 struct lpfc_nodelist *fan_ndlp) 6072 { 6073 struct lpfc_hba *phba = vport->phba; 6074 uint32_t *lp; 6075 FAN *fp; 6076 6077 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 6078 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 6079 fp = (FAN *) ++lp; 6080 /* FAN received; Fan does not have a reply sequence */ 6081 if ((vport == phba->pport) && 6082 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 6083 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 6084 sizeof(struct lpfc_name))) || 6085 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 6086 sizeof(struct lpfc_name)))) { 6087 /* This port has switched fabrics. FLOGI is required */ 6088 lpfc_issue_init_vfi(vport); 6089 } else { 6090 /* FAN verified - skip FLOGI */ 6091 vport->fc_myDID = vport->fc_prevDID; 6092 if (phba->sli_rev < LPFC_SLI_REV4) 6093 lpfc_issue_fabric_reglogin(vport); 6094 else { 6095 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6096 "3138 Need register VFI: (x%x/%x)\n", 6097 vport->fc_prevDID, vport->fc_myDID); 6098 lpfc_issue_reg_vfi(vport); 6099 } 6100 } 6101 } 6102 return 0; 6103 } 6104 6105 /** 6106 * lpfc_els_timeout - Handler funciton to the els timer 6107 * @ptr: holder for the timer function associated data. 6108 * 6109 * This routine is invoked by the ELS timer after timeout. It posts the ELS 6110 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 6111 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 6112 * up the worker thread. It is for the worker thread to invoke the routine 6113 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 6114 **/ 6115 void 6116 lpfc_els_timeout(unsigned long ptr) 6117 { 6118 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 6119 struct lpfc_hba *phba = vport->phba; 6120 uint32_t tmo_posted; 6121 unsigned long iflag; 6122 6123 spin_lock_irqsave(&vport->work_port_lock, iflag); 6124 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 6125 if (!tmo_posted) 6126 vport->work_port_events |= WORKER_ELS_TMO; 6127 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 6128 6129 if (!tmo_posted) 6130 lpfc_worker_wake_up(phba); 6131 return; 6132 } 6133 6134 6135 /** 6136 * lpfc_els_timeout_handler - Process an els timeout event 6137 * @vport: pointer to a virtual N_Port data structure. 6138 * 6139 * This routine is the actual handler function that processes an ELS timeout 6140 * event. It walks the ELS ring to get and abort all the IOCBs (except the 6141 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 6142 * invoking the lpfc_sli_issue_abort_iotag() routine. 6143 **/ 6144 void 6145 lpfc_els_timeout_handler(struct lpfc_vport *vport) 6146 { 6147 struct lpfc_hba *phba = vport->phba; 6148 struct lpfc_sli_ring *pring; 6149 struct lpfc_iocbq *tmp_iocb, *piocb; 6150 IOCB_t *cmd = NULL; 6151 struct lpfc_dmabuf *pcmd; 6152 uint32_t els_command = 0; 6153 uint32_t timeout; 6154 uint32_t remote_ID = 0xffffffff; 6155 LIST_HEAD(txcmplq_completions); 6156 LIST_HEAD(abort_list); 6157 6158 6159 timeout = (uint32_t)(phba->fc_ratov << 1); 6160 6161 pring = &phba->sli.ring[LPFC_ELS_RING]; 6162 6163 spin_lock_irq(&phba->hbalock); 6164 list_splice_init(&pring->txcmplq, &txcmplq_completions); 6165 spin_unlock_irq(&phba->hbalock); 6166 6167 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { 6168 cmd = &piocb->iocb; 6169 6170 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 6171 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6172 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6173 continue; 6174 6175 if (piocb->vport != vport) 6176 continue; 6177 6178 pcmd = (struct lpfc_dmabuf *) piocb->context2; 6179 if (pcmd) 6180 els_command = *(uint32_t *) (pcmd->virt); 6181 6182 if (els_command == ELS_CMD_FARP || 6183 els_command == ELS_CMD_FARPR || 6184 els_command == ELS_CMD_FDISC) 6185 continue; 6186 6187 if (piocb->drvrTimeout > 0) { 6188 if (piocb->drvrTimeout >= timeout) 6189 piocb->drvrTimeout -= timeout; 6190 else 6191 piocb->drvrTimeout = 0; 6192 continue; 6193 } 6194 6195 remote_ID = 0xffffffff; 6196 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 6197 remote_ID = cmd->un.elsreq64.remoteID; 6198 else { 6199 struct lpfc_nodelist *ndlp; 6200 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 6201 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 6202 remote_ID = ndlp->nlp_DID; 6203 } 6204 list_add_tail(&piocb->dlist, &abort_list); 6205 } 6206 spin_lock_irq(&phba->hbalock); 6207 list_splice(&txcmplq_completions, &pring->txcmplq); 6208 spin_unlock_irq(&phba->hbalock); 6209 6210 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 6211 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6212 "0127 ELS timeout Data: x%x x%x x%x " 6213 "x%x\n", els_command, 6214 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 6215 spin_lock_irq(&phba->hbalock); 6216 list_del_init(&piocb->dlist); 6217 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6218 spin_unlock_irq(&phba->hbalock); 6219 } 6220 6221 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 6222 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6223 } 6224 6225 /** 6226 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 6227 * @vport: pointer to a host virtual N_Port data structure. 6228 * 6229 * This routine is used to clean up all the outstanding ELS commands on a 6230 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 6231 * routine. After that, it walks the ELS transmit queue to remove all the 6232 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 6233 * the IOCBs with a non-NULL completion callback function, the callback 6234 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6235 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 6236 * callback function, the IOCB will simply be released. Finally, it walks 6237 * the ELS transmit completion queue to issue an abort IOCB to any transmit 6238 * completion queue IOCB that is associated with the @vport and is not 6239 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 6240 * part of the discovery state machine) out to HBA by invoking the 6241 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 6242 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 6243 * the IOCBs are aborted when this function returns. 6244 **/ 6245 void 6246 lpfc_els_flush_cmd(struct lpfc_vport *vport) 6247 { 6248 LIST_HEAD(completions); 6249 struct lpfc_hba *phba = vport->phba; 6250 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6251 struct lpfc_iocbq *tmp_iocb, *piocb; 6252 IOCB_t *cmd = NULL; 6253 6254 lpfc_fabric_abort_vport(vport); 6255 6256 spin_lock_irq(&phba->hbalock); 6257 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6258 cmd = &piocb->iocb; 6259 6260 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6261 continue; 6262 } 6263 6264 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6265 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6266 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6267 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6268 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6269 continue; 6270 6271 if (piocb->vport != vport) 6272 continue; 6273 6274 list_move_tail(&piocb->list, &completions); 6275 pring->txq_cnt--; 6276 } 6277 6278 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6279 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6280 continue; 6281 } 6282 6283 if (piocb->vport != vport) 6284 continue; 6285 6286 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6287 } 6288 spin_unlock_irq(&phba->hbalock); 6289 6290 /* Cancell all the IOCBs from the completions list */ 6291 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6292 IOERR_SLI_ABORTED); 6293 6294 return; 6295 } 6296 6297 /** 6298 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 6299 * @phba: pointer to lpfc hba data structure. 6300 * 6301 * This routine is used to clean up all the outstanding ELS commands on a 6302 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 6303 * routine. After that, it walks the ELS transmit queue to remove all the 6304 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 6305 * the IOCBs with the completion callback function associated, the callback 6306 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6307 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 6308 * callback function associated, the IOCB will simply be released. Finally, 6309 * it walks the ELS transmit completion queue to issue an abort IOCB to any 6310 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 6311 * management plane IOCBs that are not part of the discovery state machine) 6312 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 6313 **/ 6314 void 6315 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 6316 { 6317 LIST_HEAD(completions); 6318 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6319 struct lpfc_iocbq *tmp_iocb, *piocb; 6320 IOCB_t *cmd = NULL; 6321 6322 lpfc_fabric_abort_hba(phba); 6323 spin_lock_irq(&phba->hbalock); 6324 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6325 cmd = &piocb->iocb; 6326 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6327 continue; 6328 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6329 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6330 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6331 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6332 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6333 continue; 6334 list_move_tail(&piocb->list, &completions); 6335 pring->txq_cnt--; 6336 } 6337 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6338 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6339 continue; 6340 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6341 } 6342 spin_unlock_irq(&phba->hbalock); 6343 6344 /* Cancel all the IOCBs from the completions list */ 6345 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6346 IOERR_SLI_ABORTED); 6347 6348 return; 6349 } 6350 6351 /** 6352 * lpfc_send_els_failure_event - Posts an ELS command failure event 6353 * @phba: Pointer to hba context object. 6354 * @cmdiocbp: Pointer to command iocb which reported error. 6355 * @rspiocbp: Pointer to response iocb which reported error. 6356 * 6357 * This function sends an event when there is an ELS command 6358 * failure. 6359 **/ 6360 void 6361 lpfc_send_els_failure_event(struct lpfc_hba *phba, 6362 struct lpfc_iocbq *cmdiocbp, 6363 struct lpfc_iocbq *rspiocbp) 6364 { 6365 struct lpfc_vport *vport = cmdiocbp->vport; 6366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6367 struct lpfc_lsrjt_event lsrjt_event; 6368 struct lpfc_fabric_event_header fabric_event; 6369 struct ls_rjt stat; 6370 struct lpfc_nodelist *ndlp; 6371 uint32_t *pcmd; 6372 6373 ndlp = cmdiocbp->context1; 6374 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 6375 return; 6376 6377 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 6378 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 6379 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 6380 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 6381 sizeof(struct lpfc_name)); 6382 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 6383 sizeof(struct lpfc_name)); 6384 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 6385 cmdiocbp->context2)->virt); 6386 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 6387 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 6388 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 6389 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 6390 fc_host_post_vendor_event(shost, 6391 fc_get_event_number(), 6392 sizeof(lsrjt_event), 6393 (char *)&lsrjt_event, 6394 LPFC_NL_VENDOR_ID); 6395 return; 6396 } 6397 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 6398 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 6399 fabric_event.event_type = FC_REG_FABRIC_EVENT; 6400 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 6401 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 6402 else 6403 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 6404 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 6405 sizeof(struct lpfc_name)); 6406 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 6407 sizeof(struct lpfc_name)); 6408 fc_host_post_vendor_event(shost, 6409 fc_get_event_number(), 6410 sizeof(fabric_event), 6411 (char *)&fabric_event, 6412 LPFC_NL_VENDOR_ID); 6413 return; 6414 } 6415 6416 } 6417 6418 /** 6419 * lpfc_send_els_event - Posts unsolicited els event 6420 * @vport: Pointer to vport object. 6421 * @ndlp: Pointer FC node object. 6422 * @cmd: ELS command code. 6423 * 6424 * This function posts an event when there is an incoming 6425 * unsolicited ELS command. 6426 **/ 6427 static void 6428 lpfc_send_els_event(struct lpfc_vport *vport, 6429 struct lpfc_nodelist *ndlp, 6430 uint32_t *payload) 6431 { 6432 struct lpfc_els_event_header *els_data = NULL; 6433 struct lpfc_logo_event *logo_data = NULL; 6434 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6435 6436 if (*payload == ELS_CMD_LOGO) { 6437 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 6438 if (!logo_data) { 6439 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6440 "0148 Failed to allocate memory " 6441 "for LOGO event\n"); 6442 return; 6443 } 6444 els_data = &logo_data->header; 6445 } else { 6446 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 6447 GFP_KERNEL); 6448 if (!els_data) { 6449 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6450 "0149 Failed to allocate memory " 6451 "for ELS event\n"); 6452 return; 6453 } 6454 } 6455 els_data->event_type = FC_REG_ELS_EVENT; 6456 switch (*payload) { 6457 case ELS_CMD_PLOGI: 6458 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 6459 break; 6460 case ELS_CMD_PRLO: 6461 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 6462 break; 6463 case ELS_CMD_ADISC: 6464 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 6465 break; 6466 case ELS_CMD_LOGO: 6467 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 6468 /* Copy the WWPN in the LOGO payload */ 6469 memcpy(logo_data->logo_wwpn, &payload[2], 6470 sizeof(struct lpfc_name)); 6471 break; 6472 default: 6473 kfree(els_data); 6474 return; 6475 } 6476 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 6477 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 6478 if (*payload == ELS_CMD_LOGO) { 6479 fc_host_post_vendor_event(shost, 6480 fc_get_event_number(), 6481 sizeof(struct lpfc_logo_event), 6482 (char *)logo_data, 6483 LPFC_NL_VENDOR_ID); 6484 kfree(logo_data); 6485 } else { 6486 fc_host_post_vendor_event(shost, 6487 fc_get_event_number(), 6488 sizeof(struct lpfc_els_event_header), 6489 (char *)els_data, 6490 LPFC_NL_VENDOR_ID); 6491 kfree(els_data); 6492 } 6493 6494 return; 6495 } 6496 6497 6498 /** 6499 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 6500 * @phba: pointer to lpfc hba data structure. 6501 * @pring: pointer to a SLI ring. 6502 * @vport: pointer to a host virtual N_Port data structure. 6503 * @elsiocb: pointer to lpfc els command iocb data structure. 6504 * 6505 * This routine is used for processing the IOCB associated with a unsolicited 6506 * event. It first determines whether there is an existing ndlp that matches 6507 * the DID from the unsolicited IOCB. If not, it will create a new one with 6508 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 6509 * IOCB is then used to invoke the proper routine and to set up proper state 6510 * of the discovery state machine. 6511 **/ 6512 static void 6513 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6514 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 6515 { 6516 struct Scsi_Host *shost; 6517 struct lpfc_nodelist *ndlp; 6518 struct ls_rjt stat; 6519 uint32_t *payload; 6520 uint32_t cmd, did, newnode, rjt_err = 0; 6521 IOCB_t *icmd = &elsiocb->iocb; 6522 6523 if (!vport || !(elsiocb->context2)) 6524 goto dropit; 6525 6526 newnode = 0; 6527 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 6528 cmd = *payload; 6529 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 6530 lpfc_post_buffer(phba, pring, 1); 6531 6532 did = icmd->un.rcvels.remoteID; 6533 if (icmd->ulpStatus) { 6534 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6535 "RCV Unsol ELS: status:x%x/x%x did:x%x", 6536 icmd->ulpStatus, icmd->un.ulpWord[4], did); 6537 goto dropit; 6538 } 6539 6540 /* Check to see if link went down during discovery */ 6541 if (lpfc_els_chk_latt(vport)) 6542 goto dropit; 6543 6544 /* Ignore traffic received during vport shutdown. */ 6545 if (vport->load_flag & FC_UNLOADING) 6546 goto dropit; 6547 6548 /* If NPort discovery is delayed drop incoming ELS */ 6549 if ((vport->fc_flag & FC_DISC_DELAYED) && 6550 (cmd != ELS_CMD_PLOGI)) 6551 goto dropit; 6552 6553 ndlp = lpfc_findnode_did(vport, did); 6554 if (!ndlp) { 6555 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6556 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6557 if (!ndlp) 6558 goto dropit; 6559 6560 lpfc_nlp_init(vport, ndlp, did); 6561 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6562 newnode = 1; 6563 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6564 ndlp->nlp_type |= NLP_FABRIC; 6565 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6566 ndlp = lpfc_enable_node(vport, ndlp, 6567 NLP_STE_UNUSED_NODE); 6568 if (!ndlp) 6569 goto dropit; 6570 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6571 newnode = 1; 6572 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6573 ndlp->nlp_type |= NLP_FABRIC; 6574 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 6575 /* This is similar to the new node path */ 6576 ndlp = lpfc_nlp_get(ndlp); 6577 if (!ndlp) 6578 goto dropit; 6579 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6580 newnode = 1; 6581 } 6582 6583 phba->fc_stat.elsRcvFrame++; 6584 6585 elsiocb->context1 = lpfc_nlp_get(ndlp); 6586 elsiocb->vport = vport; 6587 6588 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 6589 cmd &= ELS_CMD_MASK; 6590 } 6591 /* ELS command <elsCmd> received from NPORT <did> */ 6592 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6593 "0112 ELS command x%x received from NPORT x%x " 6594 "Data: x%x\n", cmd, did, vport->port_state); 6595 switch (cmd) { 6596 case ELS_CMD_PLOGI: 6597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6598 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 6599 did, vport->port_state, ndlp->nlp_flag); 6600 6601 phba->fc_stat.elsRcvPLOGI++; 6602 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6603 6604 lpfc_send_els_event(vport, ndlp, payload); 6605 6606 /* If Nport discovery is delayed, reject PLOGIs */ 6607 if (vport->fc_flag & FC_DISC_DELAYED) { 6608 rjt_err = LSRJT_UNABLE_TPC; 6609 break; 6610 } 6611 if (vport->port_state < LPFC_DISC_AUTH) { 6612 if (!(phba->pport->fc_flag & FC_PT2PT) || 6613 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6614 rjt_err = LSRJT_UNABLE_TPC; 6615 break; 6616 } 6617 /* We get here, and drop thru, if we are PT2PT with 6618 * another NPort and the other side has initiated 6619 * the PLOGI before responding to our FLOGI. 6620 */ 6621 } 6622 6623 shost = lpfc_shost_from_vport(vport); 6624 spin_lock_irq(shost->host_lock); 6625 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6626 spin_unlock_irq(shost->host_lock); 6627 6628 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6629 NLP_EVT_RCV_PLOGI); 6630 6631 break; 6632 case ELS_CMD_FLOGI: 6633 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6634 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 6635 did, vport->port_state, ndlp->nlp_flag); 6636 6637 phba->fc_stat.elsRcvFLOGI++; 6638 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 6639 if (newnode) 6640 lpfc_nlp_put(ndlp); 6641 break; 6642 case ELS_CMD_LOGO: 6643 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6644 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 6645 did, vport->port_state, ndlp->nlp_flag); 6646 6647 phba->fc_stat.elsRcvLOGO++; 6648 lpfc_send_els_event(vport, ndlp, payload); 6649 if (vport->port_state < LPFC_DISC_AUTH) { 6650 rjt_err = LSRJT_UNABLE_TPC; 6651 break; 6652 } 6653 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6654 break; 6655 case ELS_CMD_PRLO: 6656 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6657 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 6658 did, vport->port_state, ndlp->nlp_flag); 6659 6660 phba->fc_stat.elsRcvPRLO++; 6661 lpfc_send_els_event(vport, ndlp, payload); 6662 if (vport->port_state < LPFC_DISC_AUTH) { 6663 rjt_err = LSRJT_UNABLE_TPC; 6664 break; 6665 } 6666 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6667 break; 6668 case ELS_CMD_RSCN: 6669 phba->fc_stat.elsRcvRSCN++; 6670 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 6671 if (newnode) 6672 lpfc_nlp_put(ndlp); 6673 break; 6674 case ELS_CMD_ADISC: 6675 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6676 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 6677 did, vport->port_state, ndlp->nlp_flag); 6678 6679 lpfc_send_els_event(vport, ndlp, payload); 6680 phba->fc_stat.elsRcvADISC++; 6681 if (vport->port_state < LPFC_DISC_AUTH) { 6682 rjt_err = LSRJT_UNABLE_TPC; 6683 break; 6684 } 6685 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6686 NLP_EVT_RCV_ADISC); 6687 break; 6688 case ELS_CMD_PDISC: 6689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6690 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 6691 did, vport->port_state, ndlp->nlp_flag); 6692 6693 phba->fc_stat.elsRcvPDISC++; 6694 if (vport->port_state < LPFC_DISC_AUTH) { 6695 rjt_err = LSRJT_UNABLE_TPC; 6696 break; 6697 } 6698 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6699 NLP_EVT_RCV_PDISC); 6700 break; 6701 case ELS_CMD_FARPR: 6702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6703 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 6704 did, vport->port_state, ndlp->nlp_flag); 6705 6706 phba->fc_stat.elsRcvFARPR++; 6707 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 6708 break; 6709 case ELS_CMD_FARP: 6710 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6711 "RCV FARP: did:x%x/ste:x%x flg:x%x", 6712 did, vport->port_state, ndlp->nlp_flag); 6713 6714 phba->fc_stat.elsRcvFARP++; 6715 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 6716 break; 6717 case ELS_CMD_FAN: 6718 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6719 "RCV FAN: did:x%x/ste:x%x flg:x%x", 6720 did, vport->port_state, ndlp->nlp_flag); 6721 6722 phba->fc_stat.elsRcvFAN++; 6723 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 6724 break; 6725 case ELS_CMD_PRLI: 6726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6727 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 6728 did, vport->port_state, ndlp->nlp_flag); 6729 6730 phba->fc_stat.elsRcvPRLI++; 6731 if (vport->port_state < LPFC_DISC_AUTH) { 6732 rjt_err = LSRJT_UNABLE_TPC; 6733 break; 6734 } 6735 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6736 break; 6737 case ELS_CMD_LIRR: 6738 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6739 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 6740 did, vport->port_state, ndlp->nlp_flag); 6741 6742 phba->fc_stat.elsRcvLIRR++; 6743 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 6744 if (newnode) 6745 lpfc_nlp_put(ndlp); 6746 break; 6747 case ELS_CMD_RLS: 6748 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6749 "RCV RLS: did:x%x/ste:x%x flg:x%x", 6750 did, vport->port_state, ndlp->nlp_flag); 6751 6752 phba->fc_stat.elsRcvRLS++; 6753 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 6754 if (newnode) 6755 lpfc_nlp_put(ndlp); 6756 break; 6757 case ELS_CMD_RPS: 6758 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6759 "RCV RPS: did:x%x/ste:x%x flg:x%x", 6760 did, vport->port_state, ndlp->nlp_flag); 6761 6762 phba->fc_stat.elsRcvRPS++; 6763 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 6764 if (newnode) 6765 lpfc_nlp_put(ndlp); 6766 break; 6767 case ELS_CMD_RPL: 6768 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6769 "RCV RPL: did:x%x/ste:x%x flg:x%x", 6770 did, vport->port_state, ndlp->nlp_flag); 6771 6772 phba->fc_stat.elsRcvRPL++; 6773 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 6774 if (newnode) 6775 lpfc_nlp_put(ndlp); 6776 break; 6777 case ELS_CMD_RNID: 6778 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6779 "RCV RNID: did:x%x/ste:x%x flg:x%x", 6780 did, vport->port_state, ndlp->nlp_flag); 6781 6782 phba->fc_stat.elsRcvRNID++; 6783 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 6784 if (newnode) 6785 lpfc_nlp_put(ndlp); 6786 break; 6787 case ELS_CMD_RTV: 6788 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6789 "RCV RTV: did:x%x/ste:x%x flg:x%x", 6790 did, vport->port_state, ndlp->nlp_flag); 6791 phba->fc_stat.elsRcvRTV++; 6792 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 6793 if (newnode) 6794 lpfc_nlp_put(ndlp); 6795 break; 6796 case ELS_CMD_RRQ: 6797 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6798 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 6799 did, vport->port_state, ndlp->nlp_flag); 6800 6801 phba->fc_stat.elsRcvRRQ++; 6802 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 6803 if (newnode) 6804 lpfc_nlp_put(ndlp); 6805 break; 6806 case ELS_CMD_ECHO: 6807 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6808 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 6809 did, vport->port_state, ndlp->nlp_flag); 6810 6811 phba->fc_stat.elsRcvECHO++; 6812 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 6813 if (newnode) 6814 lpfc_nlp_put(ndlp); 6815 break; 6816 default: 6817 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6818 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6819 cmd, did, vport->port_state); 6820 6821 /* Unsupported ELS command, reject */ 6822 rjt_err = LSRJT_CMD_UNSUPPORTED; 6823 6824 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6825 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6826 "0115 Unknown ELS command x%x " 6827 "received from NPORT x%x\n", cmd, did); 6828 if (newnode) 6829 lpfc_nlp_put(ndlp); 6830 break; 6831 } 6832 6833 /* check if need to LS_RJT received ELS cmd */ 6834 if (rjt_err) { 6835 memset(&stat, 0, sizeof(stat)); 6836 stat.un.b.lsRjtRsnCode = rjt_err; 6837 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6838 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6839 NULL); 6840 } 6841 6842 lpfc_nlp_put(elsiocb->context1); 6843 elsiocb->context1 = NULL; 6844 return; 6845 6846 dropit: 6847 if (vport && !(vport->load_flag & FC_UNLOADING)) 6848 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6849 "0111 Dropping received ELS cmd " 6850 "Data: x%x x%x x%x\n", 6851 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 6852 phba->fc_stat.elsRcvDrop++; 6853 } 6854 6855 /** 6856 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 6857 * @phba: pointer to lpfc hba data structure. 6858 * @pring: pointer to a SLI ring. 6859 * @elsiocb: pointer to lpfc els iocb data structure. 6860 * 6861 * This routine is used to process an unsolicited event received from a SLI 6862 * (Service Level Interface) ring. The actual processing of the data buffer 6863 * associated with the unsolicited event is done by invoking the routine 6864 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 6865 * SLI ring on which the unsolicited event was received. 6866 **/ 6867 void 6868 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6869 struct lpfc_iocbq *elsiocb) 6870 { 6871 struct lpfc_vport *vport = phba->pport; 6872 IOCB_t *icmd = &elsiocb->iocb; 6873 dma_addr_t paddr; 6874 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 6875 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 6876 6877 elsiocb->context1 = NULL; 6878 elsiocb->context2 = NULL; 6879 elsiocb->context3 = NULL; 6880 6881 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6882 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6883 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6884 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 6885 IOERR_RCV_BUFFER_WAITING) { 6886 phba->fc_stat.NoRcvBuf++; 6887 /* Not enough posted buffers; Try posting more buffers */ 6888 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6889 lpfc_post_buffer(phba, pring, 0); 6890 return; 6891 } 6892 6893 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6894 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 6895 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 6896 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 6897 vport = phba->pport; 6898 else 6899 vport = lpfc_find_vport_by_vpid(phba, 6900 icmd->unsli3.rcvsli3.vpi); 6901 } 6902 6903 /* If there are no BDEs associated 6904 * with this IOCB, there is nothing to do. 6905 */ 6906 if (icmd->ulpBdeCount == 0) 6907 return; 6908 6909 /* type of ELS cmd is first 32bit word 6910 * in packet 6911 */ 6912 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 6913 elsiocb->context2 = bdeBuf1; 6914 } else { 6915 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 6916 icmd->un.cont64[0].addrLow); 6917 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 6918 paddr); 6919 } 6920 6921 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6922 /* 6923 * The different unsolicited event handlers would tell us 6924 * if they are done with "mp" by setting context2 to NULL. 6925 */ 6926 if (elsiocb->context2) { 6927 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 6928 elsiocb->context2 = NULL; 6929 } 6930 6931 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 6932 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 6933 icmd->ulpBdeCount == 2) { 6934 elsiocb->context2 = bdeBuf2; 6935 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6936 /* free mp if we are done with it */ 6937 if (elsiocb->context2) { 6938 lpfc_in_buf_free(phba, elsiocb->context2); 6939 elsiocb->context2 = NULL; 6940 } 6941 } 6942 } 6943 6944 /** 6945 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 6946 * @phba: pointer to lpfc hba data structure. 6947 * @vport: pointer to a virtual N_Port data structure. 6948 * 6949 * This routine issues a Port Login (PLOGI) to the Name Server with 6950 * State Change Request (SCR) for a @vport. This routine will create an 6951 * ndlp for the Name Server associated to the @vport if such node does 6952 * not already exist. The PLOGI to Name Server is issued by invoking the 6953 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 6954 * (FDMI) is configured to the @vport, a FDMI node will be created and 6955 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 6956 **/ 6957 void 6958 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 6959 { 6960 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 6961 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6962 6963 /* 6964 * If lpfc_delay_discovery parameter is set and the clean address 6965 * bit is cleared and fc fabric parameters chenged, delay FC NPort 6966 * discovery. 6967 */ 6968 spin_lock_irq(shost->host_lock); 6969 if (vport->fc_flag & FC_DISC_DELAYED) { 6970 spin_unlock_irq(shost->host_lock); 6971 mod_timer(&vport->delayed_disc_tmo, 6972 jiffies + HZ * phba->fc_ratov); 6973 return; 6974 } 6975 spin_unlock_irq(shost->host_lock); 6976 6977 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6978 if (!ndlp) { 6979 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6980 if (!ndlp) { 6981 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6982 lpfc_disc_start(vport); 6983 return; 6984 } 6985 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6986 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6987 "0251 NameServer login: no memory\n"); 6988 return; 6989 } 6990 lpfc_nlp_init(vport, ndlp, NameServer_DID); 6991 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6992 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 6993 if (!ndlp) { 6994 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6995 lpfc_disc_start(vport); 6996 return; 6997 } 6998 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6999 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7000 "0348 NameServer login: node freed\n"); 7001 return; 7002 } 7003 } 7004 ndlp->nlp_type |= NLP_FABRIC; 7005 7006 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7007 7008 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 7009 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7010 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7011 "0252 Cannot issue NameServer login\n"); 7012 return; 7013 } 7014 7015 if (vport->cfg_fdmi_on) { 7016 /* If this is the first time, allocate an ndlp and initialize 7017 * it. Otherwise, make sure the node is enabled and then do the 7018 * login. 7019 */ 7020 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); 7021 if (!ndlp_fdmi) { 7022 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 7023 GFP_KERNEL); 7024 if (ndlp_fdmi) { 7025 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 7026 ndlp_fdmi->nlp_type |= NLP_FABRIC; 7027 } else 7028 return; 7029 } 7030 if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) 7031 ndlp_fdmi = lpfc_enable_node(vport, 7032 ndlp_fdmi, 7033 NLP_STE_NPR_NODE); 7034 7035 if (ndlp_fdmi) { 7036 lpfc_nlp_set_state(vport, ndlp_fdmi, 7037 NLP_STE_PLOGI_ISSUE); 7038 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); 7039 } 7040 } 7041 } 7042 7043 /** 7044 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 7045 * @phba: pointer to lpfc hba data structure. 7046 * @pmb: pointer to the driver internal queue element for mailbox command. 7047 * 7048 * This routine is the completion callback function to register new vport 7049 * mailbox command. If the new vport mailbox command completes successfully, 7050 * the fabric registration login shall be performed on physical port (the 7051 * new vport created is actually a physical port, with VPI 0) or the port 7052 * login to Name Server for State Change Request (SCR) will be performed 7053 * on virtual port (real virtual port, with VPI greater than 0). 7054 **/ 7055 static void 7056 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7057 { 7058 struct lpfc_vport *vport = pmb->vport; 7059 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7060 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 7061 MAILBOX_t *mb = &pmb->u.mb; 7062 int rc; 7063 7064 spin_lock_irq(shost->host_lock); 7065 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 7066 spin_unlock_irq(shost->host_lock); 7067 7068 if (mb->mbxStatus) { 7069 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7070 "0915 Register VPI failed : Status: x%x" 7071 " upd bit: x%x \n", mb->mbxStatus, 7072 mb->un.varRegVpi.upd); 7073 if (phba->sli_rev == LPFC_SLI_REV4 && 7074 mb->un.varRegVpi.upd) 7075 goto mbox_err_exit ; 7076 7077 switch (mb->mbxStatus) { 7078 case 0x11: /* unsupported feature */ 7079 case 0x9603: /* max_vpi exceeded */ 7080 case 0x9602: /* Link event since CLEAR_LA */ 7081 /* giving up on vport registration */ 7082 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7083 spin_lock_irq(shost->host_lock); 7084 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7085 spin_unlock_irq(shost->host_lock); 7086 lpfc_can_disctmo(vport); 7087 break; 7088 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 7089 case 0x20: 7090 spin_lock_irq(shost->host_lock); 7091 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7092 spin_unlock_irq(shost->host_lock); 7093 lpfc_init_vpi(phba, pmb, vport->vpi); 7094 pmb->vport = vport; 7095 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 7096 rc = lpfc_sli_issue_mbox(phba, pmb, 7097 MBX_NOWAIT); 7098 if (rc == MBX_NOT_FINISHED) { 7099 lpfc_printf_vlog(vport, 7100 KERN_ERR, LOG_MBOX, 7101 "2732 Failed to issue INIT_VPI" 7102 " mailbox command\n"); 7103 } else { 7104 lpfc_nlp_put(ndlp); 7105 return; 7106 } 7107 7108 default: 7109 /* Try to recover from this error */ 7110 if (phba->sli_rev == LPFC_SLI_REV4) 7111 lpfc_sli4_unreg_all_rpis(vport); 7112 lpfc_mbx_unreg_vpi(vport); 7113 spin_lock_irq(shost->host_lock); 7114 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7115 spin_unlock_irq(shost->host_lock); 7116 if (vport->port_type == LPFC_PHYSICAL_PORT 7117 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 7118 lpfc_issue_init_vfi(vport); 7119 else 7120 lpfc_initial_fdisc(vport); 7121 break; 7122 } 7123 } else { 7124 spin_lock_irq(shost->host_lock); 7125 vport->vpi_state |= LPFC_VPI_REGISTERED; 7126 spin_unlock_irq(shost->host_lock); 7127 if (vport == phba->pport) { 7128 if (phba->sli_rev < LPFC_SLI_REV4) 7129 lpfc_issue_fabric_reglogin(vport); 7130 else { 7131 /* 7132 * If the physical port is instantiated using 7133 * FDISC, do not start vport discovery. 7134 */ 7135 if (vport->port_state != LPFC_FDISC) 7136 lpfc_start_fdiscs(phba); 7137 lpfc_do_scr_ns_plogi(phba, vport); 7138 } 7139 } else 7140 lpfc_do_scr_ns_plogi(phba, vport); 7141 } 7142 mbox_err_exit: 7143 /* Now, we decrement the ndlp reference count held for this 7144 * callback function 7145 */ 7146 lpfc_nlp_put(ndlp); 7147 7148 mempool_free(pmb, phba->mbox_mem_pool); 7149 return; 7150 } 7151 7152 /** 7153 * lpfc_register_new_vport - Register a new vport with a HBA 7154 * @phba: pointer to lpfc hba data structure. 7155 * @vport: pointer to a host virtual N_Port data structure. 7156 * @ndlp: pointer to a node-list data structure. 7157 * 7158 * This routine registers the @vport as a new virtual port with a HBA. 7159 * It is done through a registering vpi mailbox command. 7160 **/ 7161 void 7162 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 7163 struct lpfc_nodelist *ndlp) 7164 { 7165 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7166 LPFC_MBOXQ_t *mbox; 7167 7168 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7169 if (mbox) { 7170 lpfc_reg_vpi(vport, mbox); 7171 mbox->vport = vport; 7172 mbox->context2 = lpfc_nlp_get(ndlp); 7173 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 7174 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7175 == MBX_NOT_FINISHED) { 7176 /* mailbox command not success, decrement ndlp 7177 * reference count for this command 7178 */ 7179 lpfc_nlp_put(ndlp); 7180 mempool_free(mbox, phba->mbox_mem_pool); 7181 7182 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7183 "0253 Register VPI: Can't send mbox\n"); 7184 goto mbox_err_exit; 7185 } 7186 } else { 7187 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7188 "0254 Register VPI: no memory\n"); 7189 goto mbox_err_exit; 7190 } 7191 return; 7192 7193 mbox_err_exit: 7194 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7195 spin_lock_irq(shost->host_lock); 7196 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 7197 spin_unlock_irq(shost->host_lock); 7198 return; 7199 } 7200 7201 /** 7202 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 7203 * @phba: pointer to lpfc hba data structure. 7204 * 7205 * This routine cancels the retry delay timers to all the vports. 7206 **/ 7207 void 7208 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 7209 { 7210 struct lpfc_vport **vports; 7211 struct lpfc_nodelist *ndlp; 7212 uint32_t link_state; 7213 int i; 7214 7215 /* Treat this failure as linkdown for all vports */ 7216 link_state = phba->link_state; 7217 lpfc_linkdown(phba); 7218 phba->link_state = link_state; 7219 7220 vports = lpfc_create_vport_work_array(phba); 7221 7222 if (vports) { 7223 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 7224 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 7225 if (ndlp) 7226 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 7227 lpfc_els_flush_cmd(vports[i]); 7228 } 7229 lpfc_destroy_vport_work_array(phba, vports); 7230 } 7231 } 7232 7233 /** 7234 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 7235 * @phba: pointer to lpfc hba data structure. 7236 * 7237 * This routine abort all pending discovery commands and 7238 * start a timer to retry FLOGI for the physical port 7239 * discovery. 7240 **/ 7241 void 7242 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 7243 { 7244 struct lpfc_nodelist *ndlp; 7245 struct Scsi_Host *shost; 7246 7247 /* Cancel the all vports retry delay retry timers */ 7248 lpfc_cancel_all_vport_retry_delay_timer(phba); 7249 7250 /* If fabric require FLOGI, then re-instantiate physical login */ 7251 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 7252 if (!ndlp) 7253 return; 7254 7255 shost = lpfc_shost_from_vport(phba->pport); 7256 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7257 spin_lock_irq(shost->host_lock); 7258 ndlp->nlp_flag |= NLP_DELAY_TMO; 7259 spin_unlock_irq(shost->host_lock); 7260 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 7261 phba->pport->port_state = LPFC_FLOGI; 7262 return; 7263 } 7264 7265 /** 7266 * lpfc_fabric_login_reqd - Check if FLOGI required. 7267 * @phba: pointer to lpfc hba data structure. 7268 * @cmdiocb: pointer to FDISC command iocb. 7269 * @rspiocb: pointer to FDISC response iocb. 7270 * 7271 * This routine checks if a FLOGI is reguired for FDISC 7272 * to succeed. 7273 **/ 7274 static int 7275 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 7276 struct lpfc_iocbq *cmdiocb, 7277 struct lpfc_iocbq *rspiocb) 7278 { 7279 7280 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 7281 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 7282 return 0; 7283 else 7284 return 1; 7285 } 7286 7287 /** 7288 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 7289 * @phba: pointer to lpfc hba data structure. 7290 * @cmdiocb: pointer to lpfc command iocb data structure. 7291 * @rspiocb: pointer to lpfc response iocb data structure. 7292 * 7293 * This routine is the completion callback function to a Fabric Discover 7294 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 7295 * single threaded, each FDISC completion callback function will reset 7296 * the discovery timer for all vports such that the timers will not get 7297 * unnecessary timeout. The function checks the FDISC IOCB status. If error 7298 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 7299 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 7300 * assigned to the vport has been changed with the completion of the FDISC 7301 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 7302 * are unregistered from the HBA, and then the lpfc_register_new_vport() 7303 * routine is invoked to register new vport with the HBA. Otherwise, the 7304 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 7305 * Server for State Change Request (SCR). 7306 **/ 7307 static void 7308 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7309 struct lpfc_iocbq *rspiocb) 7310 { 7311 struct lpfc_vport *vport = cmdiocb->vport; 7312 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7313 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 7314 struct lpfc_nodelist *np; 7315 struct lpfc_nodelist *next_np; 7316 IOCB_t *irsp = &rspiocb->iocb; 7317 struct lpfc_iocbq *piocb; 7318 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 7319 struct serv_parm *sp; 7320 uint8_t fabric_param_changed; 7321 7322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7323 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 7324 irsp->ulpStatus, irsp->un.ulpWord[4], 7325 vport->fc_prevDID); 7326 /* Since all FDISCs are being single threaded, we 7327 * must reset the discovery timer for ALL vports 7328 * waiting to send FDISC when one completes. 7329 */ 7330 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 7331 lpfc_set_disctmo(piocb->vport); 7332 } 7333 7334 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7335 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 7336 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 7337 7338 if (irsp->ulpStatus) { 7339 7340 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 7341 lpfc_retry_pport_discovery(phba); 7342 goto out; 7343 } 7344 7345 /* Check for retry */ 7346 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 7347 goto out; 7348 /* FDISC failed */ 7349 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7350 "0126 FDISC failed. (x%x/x%x)\n", 7351 irsp->ulpStatus, irsp->un.ulpWord[4]); 7352 goto fdisc_failed; 7353 } 7354 spin_lock_irq(shost->host_lock); 7355 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 7356 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 7357 vport->fc_flag |= FC_FABRIC; 7358 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 7359 vport->fc_flag |= FC_PUBLIC_LOOP; 7360 spin_unlock_irq(shost->host_lock); 7361 7362 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 7363 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 7364 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 7365 sp = prsp->virt + sizeof(uint32_t); 7366 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 7367 memcpy(&vport->fabric_portname, &sp->portName, 7368 sizeof(struct lpfc_name)); 7369 memcpy(&vport->fabric_nodename, &sp->nodeName, 7370 sizeof(struct lpfc_name)); 7371 if (fabric_param_changed && 7372 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7373 /* If our NportID changed, we need to ensure all 7374 * remaining NPORTs get unreg_login'ed so we can 7375 * issue unreg_vpi. 7376 */ 7377 list_for_each_entry_safe(np, next_np, 7378 &vport->fc_nodes, nlp_listp) { 7379 if (!NLP_CHK_NODE_ACT(ndlp) || 7380 (np->nlp_state != NLP_STE_NPR_NODE) || 7381 !(np->nlp_flag & NLP_NPR_ADISC)) 7382 continue; 7383 spin_lock_irq(shost->host_lock); 7384 np->nlp_flag &= ~NLP_NPR_ADISC; 7385 spin_unlock_irq(shost->host_lock); 7386 lpfc_unreg_rpi(vport, np); 7387 } 7388 lpfc_cleanup_pending_mbox(vport); 7389 7390 if (phba->sli_rev == LPFC_SLI_REV4) 7391 lpfc_sli4_unreg_all_rpis(vport); 7392 7393 lpfc_mbx_unreg_vpi(vport); 7394 spin_lock_irq(shost->host_lock); 7395 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7396 if (phba->sli_rev == LPFC_SLI_REV4) 7397 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 7398 else 7399 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 7400 spin_unlock_irq(shost->host_lock); 7401 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 7402 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7403 /* 7404 * Driver needs to re-reg VPI in order for f/w 7405 * to update the MAC address. 7406 */ 7407 lpfc_register_new_vport(phba, vport, ndlp); 7408 goto out; 7409 } 7410 7411 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 7412 lpfc_issue_init_vpi(vport); 7413 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 7414 lpfc_register_new_vport(phba, vport, ndlp); 7415 else 7416 lpfc_do_scr_ns_plogi(phba, vport); 7417 goto out; 7418 fdisc_failed: 7419 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7420 /* Cancel discovery timer */ 7421 lpfc_can_disctmo(vport); 7422 lpfc_nlp_put(ndlp); 7423 out: 7424 lpfc_els_free_iocb(phba, cmdiocb); 7425 } 7426 7427 /** 7428 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 7429 * @vport: pointer to a virtual N_Port data structure. 7430 * @ndlp: pointer to a node-list data structure. 7431 * @retry: number of retries to the command IOCB. 7432 * 7433 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 7434 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 7435 * routine to issue the IOCB, which makes sure only one outstanding fabric 7436 * IOCB will be sent off HBA at any given time. 7437 * 7438 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7439 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7440 * will be stored into the context1 field of the IOCB for the completion 7441 * callback function to the FDISC ELS command. 7442 * 7443 * Return code 7444 * 0 - Successfully issued fdisc iocb command 7445 * 1 - Failed to issue fdisc iocb command 7446 **/ 7447 static int 7448 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7449 uint8_t retry) 7450 { 7451 struct lpfc_hba *phba = vport->phba; 7452 IOCB_t *icmd; 7453 struct lpfc_iocbq *elsiocb; 7454 struct serv_parm *sp; 7455 uint8_t *pcmd; 7456 uint16_t cmdsize; 7457 int did = ndlp->nlp_DID; 7458 int rc; 7459 7460 vport->port_state = LPFC_FDISC; 7461 vport->fc_myDID = 0; 7462 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 7463 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 7464 ELS_CMD_FDISC); 7465 if (!elsiocb) { 7466 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7467 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7468 "0255 Issue FDISC: no IOCB\n"); 7469 return 1; 7470 } 7471 7472 icmd = &elsiocb->iocb; 7473 icmd->un.elsreq64.myID = 0; 7474 icmd->un.elsreq64.fl = 1; 7475 7476 /* 7477 * SLI3 ports require a different context type value than SLI4. 7478 * Catch SLI3 ports here and override the prep. 7479 */ 7480 if (phba->sli_rev == LPFC_SLI_REV3) { 7481 icmd->ulpCt_h = 1; 7482 icmd->ulpCt_l = 0; 7483 } 7484 7485 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7486 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 7487 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 7488 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 7489 sp = (struct serv_parm *) pcmd; 7490 /* Setup CSPs accordingly for Fabric */ 7491 sp->cmn.e_d_tov = 0; 7492 sp->cmn.w2.r_a_tov = 0; 7493 sp->cmn.virtual_fabric_support = 0; 7494 sp->cls1.classValid = 0; 7495 sp->cls2.seqDelivery = 1; 7496 sp->cls3.seqDelivery = 1; 7497 7498 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 7499 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 7500 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 7501 pcmd += sizeof(uint32_t); /* Port Name */ 7502 memcpy(pcmd, &vport->fc_portname, 8); 7503 pcmd += sizeof(uint32_t); /* Node Name */ 7504 pcmd += sizeof(uint32_t); /* Node Name */ 7505 memcpy(pcmd, &vport->fc_nodename, 8); 7506 7507 lpfc_set_disctmo(vport); 7508 7509 phba->fc_stat.elsXmitFDISC++; 7510 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 7511 7512 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7513 "Issue FDISC: did:x%x", 7514 did, 0, 0); 7515 7516 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 7517 if (rc == IOCB_ERROR) { 7518 lpfc_els_free_iocb(phba, elsiocb); 7519 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7520 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7521 "0256 Issue FDISC: Cannot send IOCB\n"); 7522 return 1; 7523 } 7524 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 7525 return 0; 7526 } 7527 7528 /** 7529 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 7530 * @phba: pointer to lpfc hba data structure. 7531 * @cmdiocb: pointer to lpfc command iocb data structure. 7532 * @rspiocb: pointer to lpfc response iocb data structure. 7533 * 7534 * This routine is the completion callback function to the issuing of a LOGO 7535 * ELS command off a vport. It frees the command IOCB and then decrement the 7536 * reference count held on ndlp for this completion function, indicating that 7537 * the reference to the ndlp is no long needed. Note that the 7538 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 7539 * callback function and an additional explicit ndlp reference decrementation 7540 * will trigger the actual release of the ndlp. 7541 **/ 7542 static void 7543 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7544 struct lpfc_iocbq *rspiocb) 7545 { 7546 struct lpfc_vport *vport = cmdiocb->vport; 7547 IOCB_t *irsp; 7548 struct lpfc_nodelist *ndlp; 7549 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7550 7551 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7552 irsp = &rspiocb->iocb; 7553 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7554 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7555 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 7556 7557 lpfc_els_free_iocb(phba, cmdiocb); 7558 vport->unreg_vpi_cmpl = VPORT_ERROR; 7559 7560 /* Trigger the release of the ndlp after logo */ 7561 lpfc_nlp_put(ndlp); 7562 7563 /* NPIV LOGO completes to NPort <nlp_DID> */ 7564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7565 "2928 NPIV LOGO completes to NPort x%x " 7566 "Data: x%x x%x x%x x%x\n", 7567 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 7568 irsp->ulpTimeout, vport->num_disc_nodes); 7569 7570 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 7571 spin_lock_irq(shost->host_lock); 7572 vport->fc_flag &= ~FC_FABRIC; 7573 spin_unlock_irq(shost->host_lock); 7574 } 7575 } 7576 7577 /** 7578 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 7579 * @vport: pointer to a virtual N_Port data structure. 7580 * @ndlp: pointer to a node-list data structure. 7581 * 7582 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 7583 * 7584 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7585 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7586 * will be stored into the context1 field of the IOCB for the completion 7587 * callback function to the LOGO ELS command. 7588 * 7589 * Return codes 7590 * 0 - Successfully issued logo off the @vport 7591 * 1 - Failed to issue logo off the @vport 7592 **/ 7593 int 7594 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7595 { 7596 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7597 struct lpfc_hba *phba = vport->phba; 7598 IOCB_t *icmd; 7599 struct lpfc_iocbq *elsiocb; 7600 uint8_t *pcmd; 7601 uint16_t cmdsize; 7602 7603 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 7604 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 7605 ELS_CMD_LOGO); 7606 if (!elsiocb) 7607 return 1; 7608 7609 icmd = &elsiocb->iocb; 7610 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7611 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 7612 pcmd += sizeof(uint32_t); 7613 7614 /* Fill in LOGO payload */ 7615 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 7616 pcmd += sizeof(uint32_t); 7617 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 7618 7619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7620 "Issue LOGO npiv did:x%x flg:x%x", 7621 ndlp->nlp_DID, ndlp->nlp_flag, 0); 7622 7623 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 7624 spin_lock_irq(shost->host_lock); 7625 ndlp->nlp_flag |= NLP_LOGO_SND; 7626 spin_unlock_irq(shost->host_lock); 7627 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7628 IOCB_ERROR) { 7629 spin_lock_irq(shost->host_lock); 7630 ndlp->nlp_flag &= ~NLP_LOGO_SND; 7631 spin_unlock_irq(shost->host_lock); 7632 lpfc_els_free_iocb(phba, elsiocb); 7633 return 1; 7634 } 7635 return 0; 7636 } 7637 7638 /** 7639 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 7640 * @ptr: holder for the timer function associated data. 7641 * 7642 * This routine is invoked by the fabric iocb block timer after 7643 * timeout. It posts the fabric iocb block timeout event by setting the 7644 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 7645 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 7646 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 7647 * posted event WORKER_FABRIC_BLOCK_TMO. 7648 **/ 7649 void 7650 lpfc_fabric_block_timeout(unsigned long ptr) 7651 { 7652 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7653 unsigned long iflags; 7654 uint32_t tmo_posted; 7655 7656 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 7657 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 7658 if (!tmo_posted) 7659 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 7660 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 7661 7662 if (!tmo_posted) 7663 lpfc_worker_wake_up(phba); 7664 return; 7665 } 7666 7667 /** 7668 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 7669 * @phba: pointer to lpfc hba data structure. 7670 * 7671 * This routine issues one fabric iocb from the driver internal list to 7672 * the HBA. It first checks whether it's ready to issue one fabric iocb to 7673 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 7674 * remove one pending fabric iocb from the driver internal list and invokes 7675 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 7676 **/ 7677 static void 7678 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 7679 { 7680 struct lpfc_iocbq *iocb; 7681 unsigned long iflags; 7682 int ret; 7683 IOCB_t *cmd; 7684 7685 repeat: 7686 iocb = NULL; 7687 spin_lock_irqsave(&phba->hbalock, iflags); 7688 /* Post any pending iocb to the SLI layer */ 7689 if (atomic_read(&phba->fabric_iocb_count) == 0) { 7690 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 7691 list); 7692 if (iocb) 7693 /* Increment fabric iocb count to hold the position */ 7694 atomic_inc(&phba->fabric_iocb_count); 7695 } 7696 spin_unlock_irqrestore(&phba->hbalock, iflags); 7697 if (iocb) { 7698 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7699 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7700 iocb->iocb_flag |= LPFC_IO_FABRIC; 7701 7702 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7703 "Fabric sched1: ste:x%x", 7704 iocb->vport->port_state, 0, 0); 7705 7706 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7707 7708 if (ret == IOCB_ERROR) { 7709 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7710 iocb->fabric_iocb_cmpl = NULL; 7711 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7712 cmd = &iocb->iocb; 7713 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 7714 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 7715 iocb->iocb_cmpl(phba, iocb, iocb); 7716 7717 atomic_dec(&phba->fabric_iocb_count); 7718 goto repeat; 7719 } 7720 } 7721 7722 return; 7723 } 7724 7725 /** 7726 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 7727 * @phba: pointer to lpfc hba data structure. 7728 * 7729 * This routine unblocks the issuing fabric iocb command. The function 7730 * will clear the fabric iocb block bit and then invoke the routine 7731 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 7732 * from the driver internal fabric iocb list. 7733 **/ 7734 void 7735 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 7736 { 7737 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7738 7739 lpfc_resume_fabric_iocbs(phba); 7740 return; 7741 } 7742 7743 /** 7744 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 7745 * @phba: pointer to lpfc hba data structure. 7746 * 7747 * This routine blocks the issuing fabric iocb for a specified amount of 7748 * time (currently 100 ms). This is done by set the fabric iocb block bit 7749 * and set up a timeout timer for 100ms. When the block bit is set, no more 7750 * fabric iocb will be issued out of the HBA. 7751 **/ 7752 static void 7753 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 7754 { 7755 int blocked; 7756 7757 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7758 /* Start a timer to unblock fabric iocbs after 100ms */ 7759 if (!blocked) 7760 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7761 7762 return; 7763 } 7764 7765 /** 7766 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 7767 * @phba: pointer to lpfc hba data structure. 7768 * @cmdiocb: pointer to lpfc command iocb data structure. 7769 * @rspiocb: pointer to lpfc response iocb data structure. 7770 * 7771 * This routine is the callback function that is put to the fabric iocb's 7772 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 7773 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 7774 * function first restores and invokes the original iocb's callback function 7775 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 7776 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 7777 **/ 7778 static void 7779 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7780 struct lpfc_iocbq *rspiocb) 7781 { 7782 struct ls_rjt stat; 7783 7784 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 7785 BUG(); 7786 7787 switch (rspiocb->iocb.ulpStatus) { 7788 case IOSTAT_NPORT_RJT: 7789 case IOSTAT_FABRIC_RJT: 7790 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 7791 lpfc_block_fabric_iocbs(phba); 7792 } 7793 break; 7794 7795 case IOSTAT_NPORT_BSY: 7796 case IOSTAT_FABRIC_BSY: 7797 lpfc_block_fabric_iocbs(phba); 7798 break; 7799 7800 case IOSTAT_LS_RJT: 7801 stat.un.lsRjtError = 7802 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 7803 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 7804 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 7805 lpfc_block_fabric_iocbs(phba); 7806 break; 7807 } 7808 7809 if (atomic_read(&phba->fabric_iocb_count) == 0) 7810 BUG(); 7811 7812 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 7813 cmdiocb->fabric_iocb_cmpl = NULL; 7814 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 7815 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 7816 7817 atomic_dec(&phba->fabric_iocb_count); 7818 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 7819 /* Post any pending iocbs to HBA */ 7820 lpfc_resume_fabric_iocbs(phba); 7821 } 7822 } 7823 7824 /** 7825 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 7826 * @phba: pointer to lpfc hba data structure. 7827 * @iocb: pointer to lpfc command iocb data structure. 7828 * 7829 * This routine is used as the top-level API for issuing a fabric iocb command 7830 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 7831 * function makes sure that only one fabric bound iocb will be outstanding at 7832 * any given time. As such, this function will first check to see whether there 7833 * is already an outstanding fabric iocb on the wire. If so, it will put the 7834 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 7835 * issued later. Otherwise, it will issue the iocb on the wire and update the 7836 * fabric iocb count it indicate that there is one fabric iocb on the wire. 7837 * 7838 * Note, this implementation has a potential sending out fabric IOCBs out of 7839 * order. The problem is caused by the construction of the "ready" boolen does 7840 * not include the condition that the internal fabric IOCB list is empty. As 7841 * such, it is possible a fabric IOCB issued by this routine might be "jump" 7842 * ahead of the fabric IOCBs in the internal list. 7843 * 7844 * Return code 7845 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 7846 * IOCB_ERROR - failed to issue fabric iocb 7847 **/ 7848 static int 7849 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 7850 { 7851 unsigned long iflags; 7852 int ready; 7853 int ret; 7854 7855 if (atomic_read(&phba->fabric_iocb_count) > 1) 7856 BUG(); 7857 7858 spin_lock_irqsave(&phba->hbalock, iflags); 7859 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 7860 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7861 7862 if (ready) 7863 /* Increment fabric iocb count to hold the position */ 7864 atomic_inc(&phba->fabric_iocb_count); 7865 spin_unlock_irqrestore(&phba->hbalock, iflags); 7866 if (ready) { 7867 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7868 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7869 iocb->iocb_flag |= LPFC_IO_FABRIC; 7870 7871 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7872 "Fabric sched2: ste:x%x", 7873 iocb->vport->port_state, 0, 0); 7874 7875 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7876 7877 if (ret == IOCB_ERROR) { 7878 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7879 iocb->fabric_iocb_cmpl = NULL; 7880 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7881 atomic_dec(&phba->fabric_iocb_count); 7882 } 7883 } else { 7884 spin_lock_irqsave(&phba->hbalock, iflags); 7885 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 7886 spin_unlock_irqrestore(&phba->hbalock, iflags); 7887 ret = IOCB_SUCCESS; 7888 } 7889 return ret; 7890 } 7891 7892 /** 7893 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 7894 * @vport: pointer to a virtual N_Port data structure. 7895 * 7896 * This routine aborts all the IOCBs associated with a @vport from the 7897 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7898 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7899 * list, removes each IOCB associated with the @vport off the list, set the 7900 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7901 * associated with the IOCB. 7902 **/ 7903 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 7904 { 7905 LIST_HEAD(completions); 7906 struct lpfc_hba *phba = vport->phba; 7907 struct lpfc_iocbq *tmp_iocb, *piocb; 7908 7909 spin_lock_irq(&phba->hbalock); 7910 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7911 list) { 7912 7913 if (piocb->vport != vport) 7914 continue; 7915 7916 list_move_tail(&piocb->list, &completions); 7917 } 7918 spin_unlock_irq(&phba->hbalock); 7919 7920 /* Cancel all the IOCBs from the completions list */ 7921 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7922 IOERR_SLI_ABORTED); 7923 } 7924 7925 /** 7926 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 7927 * @ndlp: pointer to a node-list data structure. 7928 * 7929 * This routine aborts all the IOCBs associated with an @ndlp from the 7930 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7931 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7932 * list, removes each IOCB associated with the @ndlp off the list, set the 7933 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7934 * associated with the IOCB. 7935 **/ 7936 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 7937 { 7938 LIST_HEAD(completions); 7939 struct lpfc_hba *phba = ndlp->phba; 7940 struct lpfc_iocbq *tmp_iocb, *piocb; 7941 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7942 7943 spin_lock_irq(&phba->hbalock); 7944 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7945 list) { 7946 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 7947 7948 list_move_tail(&piocb->list, &completions); 7949 } 7950 } 7951 spin_unlock_irq(&phba->hbalock); 7952 7953 /* Cancel all the IOCBs from the completions list */ 7954 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7955 IOERR_SLI_ABORTED); 7956 } 7957 7958 /** 7959 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 7960 * @phba: pointer to lpfc hba data structure. 7961 * 7962 * This routine aborts all the IOCBs currently on the driver internal 7963 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 7964 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 7965 * list, removes IOCBs off the list, set the status feild to 7966 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 7967 * the IOCB. 7968 **/ 7969 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 7970 { 7971 LIST_HEAD(completions); 7972 7973 spin_lock_irq(&phba->hbalock); 7974 list_splice_init(&phba->fabric_iocb_list, &completions); 7975 spin_unlock_irq(&phba->hbalock); 7976 7977 /* Cancel all the IOCBs from the completions list */ 7978 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7979 IOERR_SLI_ABORTED); 7980 } 7981 7982 /** 7983 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 7984 * @vport: pointer to lpfc vport data structure. 7985 * 7986 * This routine is invoked by the vport cleanup for deletions and the cleanup 7987 * for an ndlp on removal. 7988 **/ 7989 void 7990 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 7991 { 7992 struct lpfc_hba *phba = vport->phba; 7993 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7994 unsigned long iflag = 0; 7995 7996 spin_lock_irqsave(&phba->hbalock, iflag); 7997 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7998 list_for_each_entry_safe(sglq_entry, sglq_next, 7999 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 8000 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 8001 sglq_entry->ndlp = NULL; 8002 } 8003 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 8004 spin_unlock_irqrestore(&phba->hbalock, iflag); 8005 return; 8006 } 8007 8008 /** 8009 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 8010 * @phba: pointer to lpfc hba data structure. 8011 * @axri: pointer to the els xri abort wcqe structure. 8012 * 8013 * This routine is invoked by the worker thread to process a SLI4 slow-path 8014 * ELS aborted xri. 8015 **/ 8016 void 8017 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 8018 struct sli4_wcqe_xri_aborted *axri) 8019 { 8020 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 8021 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 8022 uint16_t lxri = 0; 8023 8024 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8025 unsigned long iflag = 0; 8026 struct lpfc_nodelist *ndlp; 8027 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 8028 8029 spin_lock_irqsave(&phba->hbalock, iflag); 8030 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 8031 list_for_each_entry_safe(sglq_entry, sglq_next, 8032 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 8033 if (sglq_entry->sli4_xritag == xri) { 8034 list_del(&sglq_entry->list); 8035 ndlp = sglq_entry->ndlp; 8036 sglq_entry->ndlp = NULL; 8037 list_add_tail(&sglq_entry->list, 8038 &phba->sli4_hba.lpfc_sgl_list); 8039 sglq_entry->state = SGL_FREED; 8040 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 8041 spin_unlock_irqrestore(&phba->hbalock, iflag); 8042 lpfc_set_rrq_active(phba, ndlp, 8043 sglq_entry->sli4_lxritag, 8044 rxid, 1); 8045 8046 /* Check if TXQ queue needs to be serviced */ 8047 if (pring->txq_cnt) 8048 lpfc_worker_wake_up(phba); 8049 return; 8050 } 8051 } 8052 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 8053 lxri = lpfc_sli4_xri_inrange(phba, xri); 8054 if (lxri == NO_XRI) { 8055 spin_unlock_irqrestore(&phba->hbalock, iflag); 8056 return; 8057 } 8058 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 8059 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 8060 spin_unlock_irqrestore(&phba->hbalock, iflag); 8061 return; 8062 } 8063 sglq_entry->state = SGL_XRI_ABORTED; 8064 spin_unlock_irqrestore(&phba->hbalock, iflag); 8065 return; 8066 } 8067 8068 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 8069 * @vport: pointer to virtual port object. 8070 * @ndlp: nodelist pointer for the impacted node. 8071 * 8072 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 8073 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 8074 * the driver is required to send a LOGO to the remote node before it 8075 * attempts to recover its login to the remote node. 8076 */ 8077 void 8078 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 8079 struct lpfc_nodelist *ndlp) 8080 { 8081 struct Scsi_Host *shost; 8082 struct lpfc_hba *phba; 8083 unsigned long flags = 0; 8084 8085 shost = lpfc_shost_from_vport(vport); 8086 phba = vport->phba; 8087 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 8088 lpfc_printf_log(phba, KERN_INFO, 8089 LOG_SLI, "3093 No rport recovery needed. " 8090 "rport in state 0x%x\n", ndlp->nlp_state); 8091 return; 8092 } 8093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8094 "3094 Start rport recovery on shost id 0x%x " 8095 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 8096 "flags 0x%x\n", 8097 shost->host_no, ndlp->nlp_DID, 8098 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 8099 ndlp->nlp_flag); 8100 /* 8101 * The rport is not responding. Remove the FCP-2 flag to prevent 8102 * an ADISC in the follow-up recovery code. 8103 */ 8104 spin_lock_irqsave(shost->host_lock, flags); 8105 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 8106 spin_unlock_irqrestore(shost->host_lock, flags); 8107 lpfc_issue_els_logo(vport, ndlp, 0); 8108 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 8109 } 8110 8111