1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 /* See Fibre Channel protocol T11 FC-LS for details */ 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_hw4.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_sli4.h" 36 #include "lpfc_nl.h" 37 #include "lpfc_disc.h" 38 #include "lpfc_scsi.h" 39 #include "lpfc.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_crtn.h" 42 #include "lpfc_vport.h" 43 #include "lpfc_debugfs.h" 44 45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 46 struct lpfc_iocbq *); 47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 51 struct lpfc_nodelist *ndlp, uint8_t retry); 52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 53 struct lpfc_iocbq *iocb); 54 55 static int lpfc_max_els_tries = 3; 56 57 /** 58 * lpfc_els_chk_latt - Check host link attention event for a vport 59 * @vport: pointer to a host virtual N_Port data structure. 60 * 61 * This routine checks whether there is an outstanding host link 62 * attention event during the discovery process with the @vport. It is done 63 * by reading the HBA's Host Attention (HA) register. If there is any host 64 * link attention events during this @vport's discovery process, the @vport 65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 66 * be issued if the link state is not already in host link cleared state, 67 * and a return code shall indicate whether the host link attention event 68 * had happened. 69 * 70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 71 * state in LPFC_VPORT_READY, the request for checking host link attention 72 * event will be ignored and a return code shall indicate no host link 73 * attention event had happened. 74 * 75 * Return codes 76 * 0 - no host link attention event happened 77 * 1 - host link attention event happened 78 **/ 79 int 80 lpfc_els_chk_latt(struct lpfc_vport *vport) 81 { 82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 83 struct lpfc_hba *phba = vport->phba; 84 uint32_t ha_copy; 85 86 if (vport->port_state >= LPFC_VPORT_READY || 87 phba->link_state == LPFC_LINK_DOWN || 88 phba->sli_rev > LPFC_SLI_REV3) 89 return 0; 90 91 /* Read the HBA Host Attention Register */ 92 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 93 return 1; 94 95 if (!(ha_copy & HA_LATT)) 96 return 0; 97 98 /* Pending Link Event during Discovery */ 99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 100 "0237 Pending Link Event during " 101 "Discovery: State x%x\n", 102 phba->pport->port_state); 103 104 /* CLEAR_LA should re-enable link attention events and 105 * we should then immediately take a LATT event. The 106 * LATT processing should call lpfc_linkdown() which 107 * will cleanup any left over in-progress discovery 108 * events. 109 */ 110 spin_lock_irq(shost->host_lock); 111 vport->fc_flag |= FC_ABORT_DISCOVERY; 112 spin_unlock_irq(shost->host_lock); 113 114 if (phba->link_state != LPFC_CLEAR_LA) 115 lpfc_issue_clear_la(phba, vport); 116 117 return 1; 118 } 119 120 /** 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 122 * @vport: pointer to a host virtual N_Port data structure. 123 * @expectRsp: flag indicating whether response is expected. 124 * @cmdSize: size of the ELS command. 125 * @retry: number of retries to the command IOCB when it fails. 126 * @ndlp: pointer to a node-list data structure. 127 * @did: destination identifier. 128 * @elscmd: the ELS command code. 129 * 130 * This routine is used for allocating a lpfc-IOCB data structure from 131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 132 * passed into the routine for discovery state machine to issue an Extended 133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 134 * and preparation routine that is used by all the discovery state machine 135 * routines and the ELS command-specific fields will be later set up by 136 * the individual discovery machine routines after calling this routine 137 * allocating and preparing a generic IOCB data structure. It fills in the 138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 139 * payload and response payload (if expected). The reference count on the 140 * ndlp is incremented by 1 and the reference to the ndlp is put into 141 * context1 of the IOCB data structure for this IOCB to hold the ndlp 142 * reference for the command's callback function to access later. 143 * 144 * Return code 145 * Pointer to the newly allocated/prepared els iocb data structure 146 * NULL - when els iocb data structure allocation/preparation failed 147 **/ 148 struct lpfc_iocbq * 149 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150 uint16_t cmdSize, uint8_t retry, 151 struct lpfc_nodelist *ndlp, uint32_t did, 152 uint32_t elscmd) 153 { 154 struct lpfc_hba *phba = vport->phba; 155 struct lpfc_iocbq *elsiocb; 156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 157 struct ulp_bde64 *bpl; 158 IOCB_t *icmd; 159 160 161 if (!lpfc_is_link_up(phba)) 162 return NULL; 163 164 /* Allocate buffer for command iocb */ 165 elsiocb = lpfc_sli_get_iocbq(phba); 166 167 if (elsiocb == NULL) 168 return NULL; 169 170 /* 171 * If this command is for fabric controller and HBA running 172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 */ 174 if ((did == Fabric_DID) && 175 (phba->hba_flag & HBA_FIP_SUPPORT) && 176 ((elscmd == ELS_CMD_FLOGI) || 177 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_LOGO))) 179 switch (elscmd) { 180 case ELS_CMD_FLOGI: 181 elsiocb->iocb_flag |= 182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 183 & LPFC_FIP_ELS_ID_MASK); 184 break; 185 case ELS_CMD_FDISC: 186 elsiocb->iocb_flag |= 187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 188 & LPFC_FIP_ELS_ID_MASK); 189 break; 190 case ELS_CMD_LOGO: 191 elsiocb->iocb_flag |= 192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 193 & LPFC_FIP_ELS_ID_MASK); 194 break; 195 } 196 else 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 198 199 icmd = &elsiocb->iocb; 200 201 /* fill in BDEs for command */ 202 /* Allocate buffer for command payload */ 203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 204 if (pcmd) 205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 206 if (!pcmd || !pcmd->virt) 207 goto els_iocb_free_pcmb_exit; 208 209 INIT_LIST_HEAD(&pcmd->list); 210 211 /* Allocate buffer for response payload */ 212 if (expectRsp) { 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 214 if (prsp) 215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 216 &prsp->phys); 217 if (!prsp || !prsp->virt) 218 goto els_iocb_free_prsp_exit; 219 INIT_LIST_HEAD(&prsp->list); 220 } else 221 prsp = NULL; 222 223 /* Allocate buffer for Buffer ptr list */ 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 225 if (pbuflist) 226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 227 &pbuflist->phys); 228 if (!pbuflist || !pbuflist->virt) 229 goto els_iocb_free_pbuf_exit; 230 231 INIT_LIST_HEAD(&pbuflist->list); 232 233 if (expectRsp) { 234 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 235 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 236 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 237 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 238 239 icmd->un.elsreq64.remoteID = did; /* DID */ 240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 241 icmd->ulpTimeout = phba->fc_ratov * 2; 242 } else { 243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 245 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 246 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 247 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 248 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 249 } 250 icmd->ulpBdeCount = 1; 251 icmd->ulpLe = 1; 252 icmd->ulpClass = CLASS3; 253 254 /* 255 * If we have NPIV enabled, we want to send ELS traffic by VPI. 256 * For SLI4, since the driver controls VPIs we also want to include 257 * all ELS pt2pt protocol traffic as well. 258 */ 259 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 260 ((phba->sli_rev == LPFC_SLI_REV4) && 261 (vport->fc_flag & FC_PT2PT))) { 262 263 if (expectRsp) { 264 icmd->un.elsreq64.myID = vport->fc_myDID; 265 266 /* For ELS_REQUEST64_CR, use the VPI by default */ 267 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 268 } 269 270 icmd->ulpCt_h = 0; 271 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 272 if (elscmd == ELS_CMD_ECHO) 273 icmd->ulpCt_l = 0; /* context = invalid RPI */ 274 else 275 icmd->ulpCt_l = 1; /* context = VPI */ 276 } 277 278 bpl = (struct ulp_bde64 *) pbuflist->virt; 279 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 280 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 281 bpl->tus.f.bdeSize = cmdSize; 282 bpl->tus.f.bdeFlags = 0; 283 bpl->tus.w = le32_to_cpu(bpl->tus.w); 284 285 if (expectRsp) { 286 bpl++; 287 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 288 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 289 bpl->tus.f.bdeSize = FCELSSIZE; 290 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 291 bpl->tus.w = le32_to_cpu(bpl->tus.w); 292 } 293 294 /* prevent preparing iocb with NULL ndlp reference */ 295 elsiocb->context1 = lpfc_nlp_get(ndlp); 296 if (!elsiocb->context1) 297 goto els_iocb_free_pbuf_exit; 298 elsiocb->context2 = pcmd; 299 elsiocb->context3 = pbuflist; 300 elsiocb->retry = retry; 301 elsiocb->vport = vport; 302 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 303 304 if (prsp) { 305 list_add(&prsp->list, &pcmd->list); 306 } 307 if (expectRsp) { 308 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 310 "0116 Xmit ELS command x%x to remote " 311 "NPORT x%x I/O tag: x%x, port state: x%x\n", 312 elscmd, did, elsiocb->iotag, 313 vport->port_state); 314 } else { 315 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 317 "0117 Xmit ELS response x%x to remote " 318 "NPORT x%x I/O tag: x%x, size: x%x\n", 319 elscmd, ndlp->nlp_DID, elsiocb->iotag, 320 cmdSize); 321 } 322 return elsiocb; 323 324 els_iocb_free_pbuf_exit: 325 if (expectRsp) 326 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 327 kfree(pbuflist); 328 329 els_iocb_free_prsp_exit: 330 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 331 kfree(prsp); 332 333 els_iocb_free_pcmb_exit: 334 kfree(pcmd); 335 lpfc_sli_release_iocbq(phba, elsiocb); 336 return NULL; 337 } 338 339 /** 340 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 341 * @vport: pointer to a host virtual N_Port data structure. 342 * 343 * This routine issues a fabric registration login for a @vport. An 344 * active ndlp node with Fabric_DID must already exist for this @vport. 345 * The routine invokes two mailbox commands to carry out fabric registration 346 * login through the HBA firmware: the first mailbox command requests the 347 * HBA to perform link configuration for the @vport; and the second mailbox 348 * command requests the HBA to perform the actual fabric registration login 349 * with the @vport. 350 * 351 * Return code 352 * 0 - successfully issued fabric registration login for @vport 353 * -ENXIO -- failed to issue fabric registration login for @vport 354 **/ 355 int 356 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 357 { 358 struct lpfc_hba *phba = vport->phba; 359 LPFC_MBOXQ_t *mbox; 360 struct lpfc_dmabuf *mp; 361 struct lpfc_nodelist *ndlp; 362 struct serv_parm *sp; 363 int rc; 364 int err = 0; 365 366 sp = &phba->fc_fabparam; 367 ndlp = lpfc_findnode_did(vport, Fabric_DID); 368 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 369 err = 1; 370 goto fail; 371 } 372 373 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 374 if (!mbox) { 375 err = 2; 376 goto fail; 377 } 378 379 vport->port_state = LPFC_FABRIC_CFG_LINK; 380 lpfc_config_link(phba, mbox); 381 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 382 mbox->vport = vport; 383 384 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 385 if (rc == MBX_NOT_FINISHED) { 386 err = 3; 387 goto fail_free_mbox; 388 } 389 390 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 391 if (!mbox) { 392 err = 4; 393 goto fail; 394 } 395 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 396 ndlp->nlp_rpi); 397 if (rc) { 398 err = 5; 399 goto fail_free_mbox; 400 } 401 402 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 403 mbox->vport = vport; 404 /* increment the reference count on ndlp to hold reference 405 * for the callback routine. 406 */ 407 mbox->context2 = lpfc_nlp_get(ndlp); 408 409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 410 if (rc == MBX_NOT_FINISHED) { 411 err = 6; 412 goto fail_issue_reg_login; 413 } 414 415 return 0; 416 417 fail_issue_reg_login: 418 /* decrement the reference count on ndlp just incremented 419 * for the failed mbox command. 420 */ 421 lpfc_nlp_put(ndlp); 422 mp = (struct lpfc_dmabuf *) mbox->context1; 423 lpfc_mbuf_free(phba, mp->virt, mp->phys); 424 kfree(mp); 425 fail_free_mbox: 426 mempool_free(mbox, phba->mbox_mem_pool); 427 428 fail: 429 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 430 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 431 "0249 Cannot issue Register Fabric login: Err %d\n", err); 432 return -ENXIO; 433 } 434 435 /** 436 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 437 * @vport: pointer to a host virtual N_Port data structure. 438 * 439 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 440 * the @vport. This mailbox command is necessary for SLI4 port only. 441 * 442 * Return code 443 * 0 - successfully issued REG_VFI for @vport 444 * A failure code otherwise. 445 **/ 446 int 447 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 448 { 449 struct lpfc_hba *phba = vport->phba; 450 LPFC_MBOXQ_t *mboxq; 451 struct lpfc_nodelist *ndlp; 452 struct serv_parm *sp; 453 struct lpfc_dmabuf *dmabuf; 454 int rc = 0; 455 456 sp = &phba->fc_fabparam; 457 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 458 if ((phba->sli_rev == LPFC_SLI_REV4) && 459 !(phba->link_flag & LS_LOOPBACK_MODE) && 460 !(vport->fc_flag & FC_PT2PT)) { 461 ndlp = lpfc_findnode_did(vport, Fabric_DID); 462 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 463 rc = -ENODEV; 464 goto fail; 465 } 466 } 467 468 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 469 if (!dmabuf) { 470 rc = -ENOMEM; 471 goto fail; 472 } 473 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 474 if (!dmabuf->virt) { 475 rc = -ENOMEM; 476 goto fail_free_dmabuf; 477 } 478 479 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 480 if (!mboxq) { 481 rc = -ENOMEM; 482 goto fail_free_coherent; 483 } 484 vport->port_state = LPFC_FABRIC_CFG_LINK; 485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 487 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 488 mboxq->vport = vport; 489 mboxq->context1 = dmabuf; 490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 491 if (rc == MBX_NOT_FINISHED) { 492 rc = -ENXIO; 493 goto fail_free_mbox; 494 } 495 return 0; 496 497 fail_free_mbox: 498 mempool_free(mboxq, phba->mbox_mem_pool); 499 fail_free_coherent: 500 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 501 fail_free_dmabuf: 502 kfree(dmabuf); 503 fail: 504 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 505 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 506 "0289 Issue Register VFI failed: Err %d\n", rc); 507 return rc; 508 } 509 510 /** 511 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 512 * @vport: pointer to a host virtual N_Port data structure. 513 * 514 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 515 * the @vport. This mailbox command is necessary for SLI4 port only. 516 * 517 * Return code 518 * 0 - successfully issued REG_VFI for @vport 519 * A failure code otherwise. 520 **/ 521 int 522 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 523 { 524 struct lpfc_hba *phba = vport->phba; 525 struct Scsi_Host *shost; 526 LPFC_MBOXQ_t *mboxq; 527 int rc; 528 529 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 530 if (!mboxq) { 531 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 532 "2556 UNREG_VFI mbox allocation failed" 533 "HBA state x%x\n", phba->pport->port_state); 534 return -ENOMEM; 535 } 536 537 lpfc_unreg_vfi(mboxq, vport); 538 mboxq->vport = vport; 539 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 540 541 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 542 if (rc == MBX_NOT_FINISHED) { 543 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 544 "2557 UNREG_VFI issue mbox failed rc x%x " 545 "HBA state x%x\n", 546 rc, phba->pport->port_state); 547 mempool_free(mboxq, phba->mbox_mem_pool); 548 return -EIO; 549 } 550 551 shost = lpfc_shost_from_vport(vport); 552 spin_lock_irq(shost->host_lock); 553 vport->fc_flag &= ~FC_VFI_REGISTERED; 554 spin_unlock_irq(shost->host_lock); 555 return 0; 556 } 557 558 /** 559 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 560 * @vport: pointer to a host virtual N_Port data structure. 561 * @sp: pointer to service parameter data structure. 562 * 563 * This routine is called from FLOGI/FDISC completion handler functions. 564 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 565 * node nodename is changed in the completion service parameter else return 566 * 0. This function also set flag in the vport data structure to delay 567 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 568 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 569 * node nodename is changed in the completion service parameter. 570 * 571 * Return code 572 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 573 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 574 * 575 **/ 576 static uint8_t 577 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 578 struct serv_parm *sp) 579 { 580 uint8_t fabric_param_changed = 0; 581 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 582 583 if ((vport->fc_prevDID != vport->fc_myDID) || 584 memcmp(&vport->fabric_portname, &sp->portName, 585 sizeof(struct lpfc_name)) || 586 memcmp(&vport->fabric_nodename, &sp->nodeName, 587 sizeof(struct lpfc_name))) 588 fabric_param_changed = 1; 589 590 /* 591 * Word 1 Bit 31 in common service parameter is overloaded. 592 * Word 1 Bit 31 in FLOGI request is multiple NPort request 593 * Word 1 Bit 31 in FLOGI response is clean address bit 594 * 595 * If fabric parameter is changed and clean address bit is 596 * cleared delay nport discovery if 597 * - vport->fc_prevDID != 0 (not initial discovery) OR 598 * - lpfc_delay_discovery module parameter is set. 599 */ 600 if (fabric_param_changed && !sp->cmn.clean_address_bit && 601 (vport->fc_prevDID || lpfc_delay_discovery)) { 602 spin_lock_irq(shost->host_lock); 603 vport->fc_flag |= FC_DISC_DELAYED; 604 spin_unlock_irq(shost->host_lock); 605 } 606 607 return fabric_param_changed; 608 } 609 610 611 /** 612 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 613 * @vport: pointer to a host virtual N_Port data structure. 614 * @ndlp: pointer to a node-list data structure. 615 * @sp: pointer to service parameter data structure. 616 * @irsp: pointer to the IOCB within the lpfc response IOCB. 617 * 618 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 619 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 620 * port in a fabric topology. It properly sets up the parameters to the @ndlp 621 * from the IOCB response. It also check the newly assigned N_Port ID to the 622 * @vport against the previously assigned N_Port ID. If it is different from 623 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 624 * is invoked on all the remaining nodes with the @vport to unregister the 625 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 626 * is invoked to register login to the fabric. 627 * 628 * Return code 629 * 0 - Success (currently, always return 0) 630 **/ 631 static int 632 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 633 struct serv_parm *sp, IOCB_t *irsp) 634 { 635 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 636 struct lpfc_hba *phba = vport->phba; 637 struct lpfc_nodelist *np; 638 struct lpfc_nodelist *next_np; 639 uint8_t fabric_param_changed; 640 641 spin_lock_irq(shost->host_lock); 642 vport->fc_flag |= FC_FABRIC; 643 spin_unlock_irq(shost->host_lock); 644 645 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 646 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 647 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 648 649 phba->fc_edtovResol = sp->cmn.edtovResolution; 650 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 651 652 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 653 spin_lock_irq(shost->host_lock); 654 vport->fc_flag |= FC_PUBLIC_LOOP; 655 spin_unlock_irq(shost->host_lock); 656 } 657 658 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 659 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 660 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 661 ndlp->nlp_class_sup = 0; 662 if (sp->cls1.classValid) 663 ndlp->nlp_class_sup |= FC_COS_CLASS1; 664 if (sp->cls2.classValid) 665 ndlp->nlp_class_sup |= FC_COS_CLASS2; 666 if (sp->cls3.classValid) 667 ndlp->nlp_class_sup |= FC_COS_CLASS3; 668 if (sp->cls4.classValid) 669 ndlp->nlp_class_sup |= FC_COS_CLASS4; 670 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 671 sp->cmn.bbRcvSizeLsb; 672 673 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 674 memcpy(&vport->fabric_portname, &sp->portName, 675 sizeof(struct lpfc_name)); 676 memcpy(&vport->fabric_nodename, &sp->nodeName, 677 sizeof(struct lpfc_name)); 678 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 679 680 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 681 if (sp->cmn.response_multiple_NPort) { 682 lpfc_printf_vlog(vport, KERN_WARNING, 683 LOG_ELS | LOG_VPORT, 684 "1816 FLOGI NPIV supported, " 685 "response data 0x%x\n", 686 sp->cmn.response_multiple_NPort); 687 spin_lock_irq(&phba->hbalock); 688 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 689 spin_unlock_irq(&phba->hbalock); 690 } else { 691 /* Because we asked f/w for NPIV it still expects us 692 to call reg_vnpid atleast for the physcial host */ 693 lpfc_printf_vlog(vport, KERN_WARNING, 694 LOG_ELS | LOG_VPORT, 695 "1817 Fabric does not support NPIV " 696 "- configuring single port mode.\n"); 697 spin_lock_irq(&phba->hbalock); 698 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 699 spin_unlock_irq(&phba->hbalock); 700 } 701 } 702 703 if (fabric_param_changed && 704 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 705 706 /* If our NportID changed, we need to ensure all 707 * remaining NPORTs get unreg_login'ed. 708 */ 709 list_for_each_entry_safe(np, next_np, 710 &vport->fc_nodes, nlp_listp) { 711 if (!NLP_CHK_NODE_ACT(np)) 712 continue; 713 if ((np->nlp_state != NLP_STE_NPR_NODE) || 714 !(np->nlp_flag & NLP_NPR_ADISC)) 715 continue; 716 spin_lock_irq(shost->host_lock); 717 np->nlp_flag &= ~NLP_NPR_ADISC; 718 spin_unlock_irq(shost->host_lock); 719 lpfc_unreg_rpi(vport, np); 720 } 721 lpfc_cleanup_pending_mbox(vport); 722 723 if (phba->sli_rev == LPFC_SLI_REV4) { 724 lpfc_sli4_unreg_all_rpis(vport); 725 lpfc_mbx_unreg_vpi(vport); 726 spin_lock_irq(shost->host_lock); 727 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 728 spin_unlock_irq(shost->host_lock); 729 } 730 731 /* 732 * For SLI3 and SLI4, the VPI needs to be reregistered in 733 * response to this fabric parameter change event. 734 */ 735 spin_lock_irq(shost->host_lock); 736 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 737 spin_unlock_irq(shost->host_lock); 738 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 739 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 740 /* 741 * Driver needs to re-reg VPI in order for f/w 742 * to update the MAC address. 743 */ 744 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 745 lpfc_register_new_vport(phba, vport, ndlp); 746 return 0; 747 } 748 749 if (phba->sli_rev < LPFC_SLI_REV4) { 750 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 751 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 752 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 753 lpfc_register_new_vport(phba, vport, ndlp); 754 else 755 lpfc_issue_fabric_reglogin(vport); 756 } else { 757 ndlp->nlp_type |= NLP_FABRIC; 758 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 759 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 760 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 761 lpfc_start_fdiscs(phba); 762 lpfc_do_scr_ns_plogi(phba, vport); 763 } else if (vport->fc_flag & FC_VFI_REGISTERED) 764 lpfc_issue_init_vpi(vport); 765 else { 766 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 767 "3135 Need register VFI: (x%x/%x)\n", 768 vport->fc_prevDID, vport->fc_myDID); 769 lpfc_issue_reg_vfi(vport); 770 } 771 } 772 return 0; 773 } 774 775 /** 776 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 777 * @vport: pointer to a host virtual N_Port data structure. 778 * @ndlp: pointer to a node-list data structure. 779 * @sp: pointer to service parameter data structure. 780 * 781 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 782 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 783 * in a point-to-point topology. First, the @vport's N_Port Name is compared 784 * with the received N_Port Name: if the @vport's N_Port Name is greater than 785 * the received N_Port Name lexicographically, this node shall assign local 786 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 787 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 788 * this node shall just wait for the remote node to issue PLOGI and assign 789 * N_Port IDs. 790 * 791 * Return code 792 * 0 - Success 793 * -ENXIO - Fail 794 **/ 795 static int 796 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 797 struct serv_parm *sp) 798 { 799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 800 struct lpfc_hba *phba = vport->phba; 801 LPFC_MBOXQ_t *mbox; 802 int rc; 803 804 spin_lock_irq(shost->host_lock); 805 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 806 spin_unlock_irq(shost->host_lock); 807 808 phba->fc_edtov = FF_DEF_EDTOV; 809 phba->fc_ratov = FF_DEF_RATOV; 810 rc = memcmp(&vport->fc_portname, &sp->portName, 811 sizeof(vport->fc_portname)); 812 if (rc >= 0) { 813 /* This side will initiate the PLOGI */ 814 spin_lock_irq(shost->host_lock); 815 vport->fc_flag |= FC_PT2PT_PLOGI; 816 spin_unlock_irq(shost->host_lock); 817 818 /* 819 * N_Port ID cannot be 0, set our to LocalID the other 820 * side will be RemoteID. 821 */ 822 823 /* not equal */ 824 if (rc) 825 vport->fc_myDID = PT2PT_LocalID; 826 827 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 828 if (!mbox) 829 goto fail; 830 831 lpfc_config_link(phba, mbox); 832 833 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 834 mbox->vport = vport; 835 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 836 if (rc == MBX_NOT_FINISHED) { 837 mempool_free(mbox, phba->mbox_mem_pool); 838 goto fail; 839 } 840 841 /* 842 * For SLI4, the VFI/VPI are registered AFTER the 843 * Nport with the higher WWPN sends the PLOGI with 844 * an assigned NPortId. 845 */ 846 847 /* not equal */ 848 if ((phba->sli_rev == LPFC_SLI_REV4) && rc) 849 lpfc_issue_reg_vfi(vport); 850 851 /* Decrement ndlp reference count indicating that ndlp can be 852 * safely released when other references to it are done. 853 */ 854 lpfc_nlp_put(ndlp); 855 856 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 857 if (!ndlp) { 858 /* 859 * Cannot find existing Fabric ndlp, so allocate a 860 * new one 861 */ 862 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 863 if (!ndlp) 864 goto fail; 865 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 866 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 867 ndlp = lpfc_enable_node(vport, ndlp, 868 NLP_STE_UNUSED_NODE); 869 if(!ndlp) 870 goto fail; 871 } 872 873 memcpy(&ndlp->nlp_portname, &sp->portName, 874 sizeof(struct lpfc_name)); 875 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 876 sizeof(struct lpfc_name)); 877 /* Set state will put ndlp onto node list if not already done */ 878 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 879 spin_lock_irq(shost->host_lock); 880 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 881 spin_unlock_irq(shost->host_lock); 882 } else 883 /* This side will wait for the PLOGI, decrement ndlp reference 884 * count indicating that ndlp can be released when other 885 * references to it are done. 886 */ 887 lpfc_nlp_put(ndlp); 888 889 /* If we are pt2pt with another NPort, force NPIV off! */ 890 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 891 892 spin_lock_irq(shost->host_lock); 893 vport->fc_flag |= FC_PT2PT; 894 spin_unlock_irq(shost->host_lock); 895 896 /* Start discovery - this should just do CLEAR_LA */ 897 lpfc_disc_start(vport); 898 return 0; 899 fail: 900 return -ENXIO; 901 } 902 903 /** 904 * lpfc_cmpl_els_flogi - Completion callback function for flogi 905 * @phba: pointer to lpfc hba data structure. 906 * @cmdiocb: pointer to lpfc command iocb data structure. 907 * @rspiocb: pointer to lpfc response iocb data structure. 908 * 909 * This routine is the top-level completion callback function for issuing 910 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 911 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 912 * retry has been made (either immediately or delayed with lpfc_els_retry() 913 * returning 1), the command IOCB will be released and function returned. 914 * If the retry attempt has been given up (possibly reach the maximum 915 * number of retries), one additional decrement of ndlp reference shall be 916 * invoked before going out after releasing the command IOCB. This will 917 * actually release the remote node (Note, lpfc_els_free_iocb() will also 918 * invoke one decrement of ndlp reference count). If no error reported in 919 * the IOCB status, the command Port ID field is used to determine whether 920 * this is a point-to-point topology or a fabric topology: if the Port ID 921 * field is assigned, it is a fabric topology; otherwise, it is a 922 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 923 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 924 * specific topology completion conditions. 925 **/ 926 static void 927 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 928 struct lpfc_iocbq *rspiocb) 929 { 930 struct lpfc_vport *vport = cmdiocb->vport; 931 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 932 IOCB_t *irsp = &rspiocb->iocb; 933 struct lpfc_nodelist *ndlp = cmdiocb->context1; 934 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 935 struct serv_parm *sp; 936 uint16_t fcf_index; 937 int rc; 938 939 /* Check to see if link went down during discovery */ 940 if (lpfc_els_chk_latt(vport)) { 941 /* One additional decrement on node reference count to 942 * trigger the release of the node 943 */ 944 lpfc_nlp_put(ndlp); 945 goto out; 946 } 947 948 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 949 "FLOGI cmpl: status:x%x/x%x state:x%x", 950 irsp->ulpStatus, irsp->un.ulpWord[4], 951 vport->port_state); 952 953 if (irsp->ulpStatus) { 954 /* 955 * In case of FIP mode, perform roundrobin FCF failover 956 * due to new FCF discovery 957 */ 958 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 959 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 960 if (phba->link_state < LPFC_LINK_UP) 961 goto stop_rr_fcf_flogi; 962 if ((phba->fcoe_cvl_eventtag_attn == 963 phba->fcoe_cvl_eventtag) && 964 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 965 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)) 966 goto stop_rr_fcf_flogi; 967 else 968 phba->fcoe_cvl_eventtag_attn = 969 phba->fcoe_cvl_eventtag; 970 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 971 "2611 FLOGI failed on FCF (x%x), " 972 "status:x%x/x%x, tmo:x%x, perform " 973 "roundrobin FCF failover\n", 974 phba->fcf.current_rec.fcf_indx, 975 irsp->ulpStatus, irsp->un.ulpWord[4], 976 irsp->ulpTimeout); 977 lpfc_sli4_set_fcf_flogi_fail(phba, 978 phba->fcf.current_rec.fcf_indx); 979 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 980 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 981 if (rc) 982 goto out; 983 } 984 985 stop_rr_fcf_flogi: 986 /* FLOGI failure */ 987 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 988 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", 989 irsp->ulpStatus, irsp->un.ulpWord[4], 990 irsp->ulpTimeout); 991 992 /* Check for retry */ 993 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 994 goto out; 995 996 /* FLOGI failure */ 997 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 998 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 999 irsp->ulpStatus, irsp->un.ulpWord[4], 1000 irsp->ulpTimeout); 1001 1002 /* FLOGI failed, so there is no fabric */ 1003 spin_lock_irq(shost->host_lock); 1004 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1005 spin_unlock_irq(shost->host_lock); 1006 1007 /* If private loop, then allow max outstanding els to be 1008 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1009 * alpa map would take too long otherwise. 1010 */ 1011 if (phba->alpa_map[0] == 0) 1012 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1013 if ((phba->sli_rev == LPFC_SLI_REV4) && 1014 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1015 (vport->fc_prevDID != vport->fc_myDID))) { 1016 if (vport->fc_flag & FC_VFI_REGISTERED) 1017 lpfc_sli4_unreg_all_rpis(vport); 1018 lpfc_issue_reg_vfi(vport); 1019 lpfc_nlp_put(ndlp); 1020 goto out; 1021 } 1022 goto flogifail; 1023 } 1024 spin_lock_irq(shost->host_lock); 1025 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1026 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1027 spin_unlock_irq(shost->host_lock); 1028 1029 /* 1030 * The FLogI succeeded. Sync the data for the CPU before 1031 * accessing it. 1032 */ 1033 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1034 1035 sp = prsp->virt + sizeof(uint32_t); 1036 1037 /* FLOGI completes successfully */ 1038 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1039 "0101 FLOGI completes successfully " 1040 "Data: x%x x%x x%x x%x\n", 1041 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1042 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1043 1044 if (vport->port_state == LPFC_FLOGI) { 1045 /* 1046 * If Common Service Parameters indicate Nport 1047 * we are point to point, if Fport we are Fabric. 1048 */ 1049 if (sp->cmn.fPort) 1050 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1051 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1052 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1053 else { 1054 lpfc_printf_vlog(vport, KERN_ERR, 1055 LOG_FIP | LOG_ELS, 1056 "2831 FLOGI response with cleared Fabric " 1057 "bit fcf_index 0x%x " 1058 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1059 "Fabric Name " 1060 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1061 phba->fcf.current_rec.fcf_indx, 1062 phba->fcf.current_rec.switch_name[0], 1063 phba->fcf.current_rec.switch_name[1], 1064 phba->fcf.current_rec.switch_name[2], 1065 phba->fcf.current_rec.switch_name[3], 1066 phba->fcf.current_rec.switch_name[4], 1067 phba->fcf.current_rec.switch_name[5], 1068 phba->fcf.current_rec.switch_name[6], 1069 phba->fcf.current_rec.switch_name[7], 1070 phba->fcf.current_rec.fabric_name[0], 1071 phba->fcf.current_rec.fabric_name[1], 1072 phba->fcf.current_rec.fabric_name[2], 1073 phba->fcf.current_rec.fabric_name[3], 1074 phba->fcf.current_rec.fabric_name[4], 1075 phba->fcf.current_rec.fabric_name[5], 1076 phba->fcf.current_rec.fabric_name[6], 1077 phba->fcf.current_rec.fabric_name[7]); 1078 lpfc_nlp_put(ndlp); 1079 spin_lock_irq(&phba->hbalock); 1080 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1081 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1082 spin_unlock_irq(&phba->hbalock); 1083 goto out; 1084 } 1085 if (!rc) { 1086 /* Mark the FCF discovery process done */ 1087 if (phba->hba_flag & HBA_FIP_SUPPORT) 1088 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1089 LOG_ELS, 1090 "2769 FLOGI to FCF (x%x) " 1091 "completed successfully\n", 1092 phba->fcf.current_rec.fcf_indx); 1093 spin_lock_irq(&phba->hbalock); 1094 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1095 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1096 spin_unlock_irq(&phba->hbalock); 1097 goto out; 1098 } 1099 } 1100 1101 flogifail: 1102 lpfc_nlp_put(ndlp); 1103 1104 if (!lpfc_error_lost_link(irsp)) { 1105 /* FLOGI failed, so just use loop map to make discovery list */ 1106 lpfc_disc_list_loopmap(vport); 1107 1108 /* Start discovery */ 1109 lpfc_disc_start(vport); 1110 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1111 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1112 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 1113 (phba->link_state != LPFC_CLEAR_LA)) { 1114 /* If FLOGI failed enable link interrupt. */ 1115 lpfc_issue_clear_la(phba, vport); 1116 } 1117 out: 1118 lpfc_els_free_iocb(phba, cmdiocb); 1119 } 1120 1121 /** 1122 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1123 * @vport: pointer to a host virtual N_Port data structure. 1124 * @ndlp: pointer to a node-list data structure. 1125 * @retry: number of retries to the command IOCB. 1126 * 1127 * This routine issues a Fabric Login (FLOGI) Request ELS command 1128 * for a @vport. The initiator service parameters are put into the payload 1129 * of the FLOGI Request IOCB and the top-level callback function pointer 1130 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1131 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1132 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1133 * 1134 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1135 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1136 * will be stored into the context1 field of the IOCB for the completion 1137 * callback function to the FLOGI ELS command. 1138 * 1139 * Return code 1140 * 0 - successfully issued flogi iocb for @vport 1141 * 1 - failed to issue flogi iocb for @vport 1142 **/ 1143 static int 1144 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1145 uint8_t retry) 1146 { 1147 struct lpfc_hba *phba = vport->phba; 1148 struct serv_parm *sp; 1149 IOCB_t *icmd; 1150 struct lpfc_iocbq *elsiocb; 1151 struct lpfc_sli_ring *pring; 1152 uint8_t *pcmd; 1153 uint16_t cmdsize; 1154 uint32_t tmo; 1155 int rc; 1156 1157 pring = &phba->sli.ring[LPFC_ELS_RING]; 1158 1159 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1160 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1161 ndlp->nlp_DID, ELS_CMD_FLOGI); 1162 1163 if (!elsiocb) 1164 return 1; 1165 1166 icmd = &elsiocb->iocb; 1167 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1168 1169 /* For FLOGI request, remainder of payload is service parameters */ 1170 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1171 pcmd += sizeof(uint32_t); 1172 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1173 sp = (struct serv_parm *) pcmd; 1174 1175 /* Setup CSPs accordingly for Fabric */ 1176 sp->cmn.e_d_tov = 0; 1177 sp->cmn.w2.r_a_tov = 0; 1178 sp->cmn.virtual_fabric_support = 0; 1179 sp->cls1.classValid = 0; 1180 sp->cls2.seqDelivery = 1; 1181 sp->cls3.seqDelivery = 1; 1182 if (sp->cmn.fcphLow < FC_PH3) 1183 sp->cmn.fcphLow = FC_PH3; 1184 if (sp->cmn.fcphHigh < FC_PH3) 1185 sp->cmn.fcphHigh = FC_PH3; 1186 1187 if (phba->sli_rev == LPFC_SLI_REV4) { 1188 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1189 LPFC_SLI_INTF_IF_TYPE_0) { 1190 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1191 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1192 /* FLOGI needs to be 3 for WQE FCFI */ 1193 /* Set the fcfi to the fcfi we registered with */ 1194 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1195 } 1196 } else { 1197 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1198 sp->cmn.request_multiple_Nport = 1; 1199 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1200 icmd->ulpCt_h = 1; 1201 icmd->ulpCt_l = 0; 1202 } else 1203 sp->cmn.request_multiple_Nport = 0; 1204 } 1205 1206 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1207 icmd->un.elsreq64.myID = 0; 1208 icmd->un.elsreq64.fl = 1; 1209 } 1210 1211 tmo = phba->fc_ratov; 1212 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1213 lpfc_set_disctmo(vport); 1214 phba->fc_ratov = tmo; 1215 1216 phba->fc_stat.elsXmitFLOGI++; 1217 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1218 1219 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1220 "Issue FLOGI: opt:x%x", 1221 phba->sli3_options, 0, 0); 1222 1223 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1224 if (rc == IOCB_ERROR) { 1225 lpfc_els_free_iocb(phba, elsiocb); 1226 return 1; 1227 } 1228 return 0; 1229 } 1230 1231 /** 1232 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1233 * @phba: pointer to lpfc hba data structure. 1234 * 1235 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1236 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1237 * list and issues an abort IOCB commond on each outstanding IOCB that 1238 * contains a active Fabric_DID ndlp. Note that this function is to issue 1239 * the abort IOCB command on all the outstanding IOCBs, thus when this 1240 * function returns, it does not guarantee all the IOCBs are actually aborted. 1241 * 1242 * Return code 1243 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1244 **/ 1245 int 1246 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1247 { 1248 struct lpfc_sli_ring *pring; 1249 struct lpfc_iocbq *iocb, *next_iocb; 1250 struct lpfc_nodelist *ndlp; 1251 IOCB_t *icmd; 1252 1253 /* Abort outstanding I/O on NPort <nlp_DID> */ 1254 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1255 "0201 Abort outstanding I/O on NPort x%x\n", 1256 Fabric_DID); 1257 1258 pring = &phba->sli.ring[LPFC_ELS_RING]; 1259 1260 /* 1261 * Check the txcmplq for an iocb that matches the nport the driver is 1262 * searching for. 1263 */ 1264 spin_lock_irq(&phba->hbalock); 1265 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1266 icmd = &iocb->iocb; 1267 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1268 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1269 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1270 (ndlp->nlp_DID == Fabric_DID)) 1271 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1272 } 1273 } 1274 spin_unlock_irq(&phba->hbalock); 1275 1276 return 0; 1277 } 1278 1279 /** 1280 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1281 * @vport: pointer to a host virtual N_Port data structure. 1282 * 1283 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1284 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1285 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1286 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1287 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1288 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1289 * @vport. 1290 * 1291 * Return code 1292 * 0 - failed to issue initial flogi for @vport 1293 * 1 - successfully issued initial flogi for @vport 1294 **/ 1295 int 1296 lpfc_initial_flogi(struct lpfc_vport *vport) 1297 { 1298 struct lpfc_hba *phba = vport->phba; 1299 struct lpfc_nodelist *ndlp; 1300 1301 vport->port_state = LPFC_FLOGI; 1302 lpfc_set_disctmo(vport); 1303 1304 /* First look for the Fabric ndlp */ 1305 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1306 if (!ndlp) { 1307 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1308 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1309 if (!ndlp) 1310 return 0; 1311 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1312 /* Set the node type */ 1313 ndlp->nlp_type |= NLP_FABRIC; 1314 /* Put ndlp onto node list */ 1315 lpfc_enqueue_node(vport, ndlp); 1316 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1317 /* re-setup ndlp without removing from node list */ 1318 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1319 if (!ndlp) 1320 return 0; 1321 } 1322 1323 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1324 /* This decrement of reference count to node shall kick off 1325 * the release of the node. 1326 */ 1327 lpfc_nlp_put(ndlp); 1328 return 0; 1329 } 1330 return 1; 1331 } 1332 1333 /** 1334 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1335 * @vport: pointer to a host virtual N_Port data structure. 1336 * 1337 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1338 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1339 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1340 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1341 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1342 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1343 * @vport. 1344 * 1345 * Return code 1346 * 0 - failed to issue initial fdisc for @vport 1347 * 1 - successfully issued initial fdisc for @vport 1348 **/ 1349 int 1350 lpfc_initial_fdisc(struct lpfc_vport *vport) 1351 { 1352 struct lpfc_hba *phba = vport->phba; 1353 struct lpfc_nodelist *ndlp; 1354 1355 /* First look for the Fabric ndlp */ 1356 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1357 if (!ndlp) { 1358 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1359 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1360 if (!ndlp) 1361 return 0; 1362 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1363 /* Put ndlp onto node list */ 1364 lpfc_enqueue_node(vport, ndlp); 1365 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1366 /* re-setup ndlp without removing from node list */ 1367 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1368 if (!ndlp) 1369 return 0; 1370 } 1371 1372 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1373 /* decrement node reference count to trigger the release of 1374 * the node. 1375 */ 1376 lpfc_nlp_put(ndlp); 1377 return 0; 1378 } 1379 return 1; 1380 } 1381 1382 /** 1383 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1384 * @vport: pointer to a host virtual N_Port data structure. 1385 * 1386 * This routine checks whether there are more remaining Port Logins 1387 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1388 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1389 * to issue ELS PLOGIs up to the configured discover threads with the 1390 * @vport (@vport->cfg_discovery_threads). The function also decrement 1391 * the @vport's num_disc_node by 1 if it is not already 0. 1392 **/ 1393 void 1394 lpfc_more_plogi(struct lpfc_vport *vport) 1395 { 1396 int sentplogi; 1397 1398 if (vport->num_disc_nodes) 1399 vport->num_disc_nodes--; 1400 1401 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1402 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1403 "0232 Continue discovery with %d PLOGIs to go " 1404 "Data: x%x x%x x%x\n", 1405 vport->num_disc_nodes, vport->fc_plogi_cnt, 1406 vport->fc_flag, vport->port_state); 1407 /* Check to see if there are more PLOGIs to be sent */ 1408 if (vport->fc_flag & FC_NLP_MORE) 1409 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1410 sentplogi = lpfc_els_disc_plogi(vport); 1411 1412 return; 1413 } 1414 1415 /** 1416 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1417 * @phba: pointer to lpfc hba data structure. 1418 * @prsp: pointer to response IOCB payload. 1419 * @ndlp: pointer to a node-list data structure. 1420 * 1421 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1422 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1423 * The following cases are considered N_Port confirmed: 1424 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1425 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1426 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1427 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1428 * 1) if there is a node on vport list other than the @ndlp with the same 1429 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1430 * on that node to release the RPI associated with the node; 2) if there is 1431 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1432 * into, a new node shall be allocated (or activated). In either case, the 1433 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1434 * be released and the new_ndlp shall be put on to the vport node list and 1435 * its pointer returned as the confirmed node. 1436 * 1437 * Note that before the @ndlp got "released", the keepDID from not-matching 1438 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1439 * of the @ndlp. This is because the release of @ndlp is actually to put it 1440 * into an inactive state on the vport node list and the vport node list 1441 * management algorithm does not allow two node with a same DID. 1442 * 1443 * Return code 1444 * pointer to the PLOGI N_Port @ndlp 1445 **/ 1446 static struct lpfc_nodelist * 1447 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1448 struct lpfc_nodelist *ndlp) 1449 { 1450 struct lpfc_vport *vport = ndlp->vport; 1451 struct lpfc_nodelist *new_ndlp; 1452 struct lpfc_rport_data *rdata; 1453 struct fc_rport *rport; 1454 struct serv_parm *sp; 1455 uint8_t name[sizeof(struct lpfc_name)]; 1456 uint32_t rc, keepDID = 0; 1457 int put_node; 1458 int put_rport; 1459 struct lpfc_node_rrqs rrq; 1460 1461 /* Fabric nodes can have the same WWPN so we don't bother searching 1462 * by WWPN. Just return the ndlp that was given to us. 1463 */ 1464 if (ndlp->nlp_type & NLP_FABRIC) 1465 return ndlp; 1466 1467 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1468 memset(name, 0, sizeof(struct lpfc_name)); 1469 1470 /* Now we find out if the NPort we are logging into, matches the WWPN 1471 * we have for that ndlp. If not, we have some work to do. 1472 */ 1473 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1474 1475 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1476 return ndlp; 1477 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1478 1479 if (!new_ndlp) { 1480 rc = memcmp(&ndlp->nlp_portname, name, 1481 sizeof(struct lpfc_name)); 1482 if (!rc) 1483 return ndlp; 1484 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 1485 if (!new_ndlp) 1486 return ndlp; 1487 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 1488 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1489 rc = memcmp(&ndlp->nlp_portname, name, 1490 sizeof(struct lpfc_name)); 1491 if (!rc) 1492 return ndlp; 1493 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1494 NLP_STE_UNUSED_NODE); 1495 if (!new_ndlp) 1496 return ndlp; 1497 keepDID = new_ndlp->nlp_DID; 1498 if (phba->sli_rev == LPFC_SLI_REV4) 1499 memcpy(&rrq.xri_bitmap, 1500 &new_ndlp->active_rrqs.xri_bitmap, 1501 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1502 } else { 1503 keepDID = new_ndlp->nlp_DID; 1504 if (phba->sli_rev == LPFC_SLI_REV4) 1505 memcpy(&rrq.xri_bitmap, 1506 &new_ndlp->active_rrqs.xri_bitmap, 1507 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1508 } 1509 1510 lpfc_unreg_rpi(vport, new_ndlp); 1511 new_ndlp->nlp_DID = ndlp->nlp_DID; 1512 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1513 if (phba->sli_rev == LPFC_SLI_REV4) 1514 memcpy(new_ndlp->active_rrqs.xri_bitmap, 1515 &ndlp->active_rrqs.xri_bitmap, 1516 sizeof(ndlp->active_rrqs.xri_bitmap)); 1517 1518 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1519 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1520 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1521 1522 /* Set state will put new_ndlp on to node list if not already done */ 1523 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1524 1525 /* Move this back to NPR state */ 1526 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1527 /* The new_ndlp is replacing ndlp totally, so we need 1528 * to put ndlp on UNUSED list and try to free it. 1529 */ 1530 1531 /* Fix up the rport accordingly */ 1532 rport = ndlp->rport; 1533 if (rport) { 1534 rdata = rport->dd_data; 1535 if (rdata->pnode == ndlp) { 1536 lpfc_nlp_put(ndlp); 1537 ndlp->rport = NULL; 1538 rdata->pnode = lpfc_nlp_get(new_ndlp); 1539 new_ndlp->rport = rport; 1540 } 1541 new_ndlp->nlp_type = ndlp->nlp_type; 1542 } 1543 /* We shall actually free the ndlp with both nlp_DID and 1544 * nlp_portname fields equals 0 to avoid any ndlp on the 1545 * nodelist never to be used. 1546 */ 1547 if (ndlp->nlp_DID == 0) { 1548 spin_lock_irq(&phba->ndlp_lock); 1549 NLP_SET_FREE_REQ(ndlp); 1550 spin_unlock_irq(&phba->ndlp_lock); 1551 } 1552 1553 /* Two ndlps cannot have the same did on the nodelist */ 1554 ndlp->nlp_DID = keepDID; 1555 if (phba->sli_rev == LPFC_SLI_REV4) 1556 memcpy(&ndlp->active_rrqs.xri_bitmap, 1557 &rrq.xri_bitmap, 1558 sizeof(ndlp->active_rrqs.xri_bitmap)); 1559 lpfc_drop_node(vport, ndlp); 1560 } 1561 else { 1562 lpfc_unreg_rpi(vport, ndlp); 1563 /* Two ndlps cannot have the same did */ 1564 ndlp->nlp_DID = keepDID; 1565 if (phba->sli_rev == LPFC_SLI_REV4) 1566 memcpy(&ndlp->active_rrqs.xri_bitmap, 1567 &rrq.xri_bitmap, 1568 sizeof(ndlp->active_rrqs.xri_bitmap)); 1569 /* Since we are swapping the ndlp passed in with the new one 1570 * and the did has already been swapped, copy over the 1571 * state and names. 1572 */ 1573 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname, 1574 sizeof(struct lpfc_name)); 1575 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, 1576 sizeof(struct lpfc_name)); 1577 new_ndlp->nlp_state = ndlp->nlp_state; 1578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1579 /* Fix up the rport accordingly */ 1580 rport = ndlp->rport; 1581 if (rport) { 1582 rdata = rport->dd_data; 1583 put_node = rdata->pnode != NULL; 1584 put_rport = ndlp->rport != NULL; 1585 rdata->pnode = NULL; 1586 ndlp->rport = NULL; 1587 if (put_node) 1588 lpfc_nlp_put(ndlp); 1589 if (put_rport) 1590 put_device(&rport->dev); 1591 } 1592 } 1593 return new_ndlp; 1594 } 1595 1596 /** 1597 * lpfc_end_rscn - Check and handle more rscn for a vport 1598 * @vport: pointer to a host virtual N_Port data structure. 1599 * 1600 * This routine checks whether more Registration State Change 1601 * Notifications (RSCNs) came in while the discovery state machine was in 1602 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1603 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1604 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1605 * handling the RSCNs. 1606 **/ 1607 void 1608 lpfc_end_rscn(struct lpfc_vport *vport) 1609 { 1610 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1611 1612 if (vport->fc_flag & FC_RSCN_MODE) { 1613 /* 1614 * Check to see if more RSCNs came in while we were 1615 * processing this one. 1616 */ 1617 if (vport->fc_rscn_id_cnt || 1618 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1619 lpfc_els_handle_rscn(vport); 1620 else { 1621 spin_lock_irq(shost->host_lock); 1622 vport->fc_flag &= ~FC_RSCN_MODE; 1623 spin_unlock_irq(shost->host_lock); 1624 } 1625 } 1626 } 1627 1628 /** 1629 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1630 * @phba: pointer to lpfc hba data structure. 1631 * @cmdiocb: pointer to lpfc command iocb data structure. 1632 * @rspiocb: pointer to lpfc response iocb data structure. 1633 * 1634 * This routine will call the clear rrq function to free the rrq and 1635 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1636 * exist then the clear_rrq is still called because the rrq needs to 1637 * be freed. 1638 **/ 1639 1640 static void 1641 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1642 struct lpfc_iocbq *rspiocb) 1643 { 1644 struct lpfc_vport *vport = cmdiocb->vport; 1645 IOCB_t *irsp; 1646 struct lpfc_nodelist *ndlp; 1647 struct lpfc_node_rrq *rrq; 1648 1649 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1650 rrq = cmdiocb->context_un.rrq; 1651 cmdiocb->context_un.rsp_iocb = rspiocb; 1652 1653 irsp = &rspiocb->iocb; 1654 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1655 "RRQ cmpl: status:x%x/x%x did:x%x", 1656 irsp->ulpStatus, irsp->un.ulpWord[4], 1657 irsp->un.elsreq64.remoteID); 1658 1659 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1660 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1661 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1662 "2882 RRQ completes to NPort x%x " 1663 "with no ndlp. Data: x%x x%x x%x\n", 1664 irsp->un.elsreq64.remoteID, 1665 irsp->ulpStatus, irsp->un.ulpWord[4], 1666 irsp->ulpIoTag); 1667 goto out; 1668 } 1669 1670 /* rrq completes to NPort <nlp_DID> */ 1671 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1672 "2880 RRQ completes to NPort x%x " 1673 "Data: x%x x%x x%x x%x x%x\n", 1674 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1675 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1676 1677 if (irsp->ulpStatus) { 1678 /* Check for retry */ 1679 /* RRQ failed Don't print the vport to vport rjts */ 1680 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1681 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1682 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1683 (phba)->pport->cfg_log_verbose & LOG_ELS) 1684 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1685 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1686 ndlp->nlp_DID, irsp->ulpStatus, 1687 irsp->un.ulpWord[4]); 1688 } 1689 out: 1690 if (rrq) 1691 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1692 lpfc_els_free_iocb(phba, cmdiocb); 1693 return; 1694 } 1695 /** 1696 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1697 * @phba: pointer to lpfc hba data structure. 1698 * @cmdiocb: pointer to lpfc command iocb data structure. 1699 * @rspiocb: pointer to lpfc response iocb data structure. 1700 * 1701 * This routine is the completion callback function for issuing the Port 1702 * Login (PLOGI) command. For PLOGI completion, there must be an active 1703 * ndlp on the vport node list that matches the remote node ID from the 1704 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1705 * ignored and command IOCB released. The PLOGI response IOCB status is 1706 * checked for error conditons. If there is error status reported, PLOGI 1707 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1708 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1709 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1710 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1711 * there are additional N_Port nodes with the vport that need to perform 1712 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1713 * PLOGIs. 1714 **/ 1715 static void 1716 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1717 struct lpfc_iocbq *rspiocb) 1718 { 1719 struct lpfc_vport *vport = cmdiocb->vport; 1720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1721 IOCB_t *irsp; 1722 struct lpfc_nodelist *ndlp; 1723 struct lpfc_dmabuf *prsp; 1724 int disc, rc, did, type; 1725 1726 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1727 cmdiocb->context_un.rsp_iocb = rspiocb; 1728 1729 irsp = &rspiocb->iocb; 1730 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1731 "PLOGI cmpl: status:x%x/x%x did:x%x", 1732 irsp->ulpStatus, irsp->un.ulpWord[4], 1733 irsp->un.elsreq64.remoteID); 1734 1735 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1736 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1737 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1738 "0136 PLOGI completes to NPort x%x " 1739 "with no ndlp. Data: x%x x%x x%x\n", 1740 irsp->un.elsreq64.remoteID, 1741 irsp->ulpStatus, irsp->un.ulpWord[4], 1742 irsp->ulpIoTag); 1743 goto out; 1744 } 1745 1746 /* Since ndlp can be freed in the disc state machine, note if this node 1747 * is being used during discovery. 1748 */ 1749 spin_lock_irq(shost->host_lock); 1750 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1751 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1752 spin_unlock_irq(shost->host_lock); 1753 rc = 0; 1754 1755 /* PLOGI completes to NPort <nlp_DID> */ 1756 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1757 "0102 PLOGI completes to NPort x%x " 1758 "Data: x%x x%x x%x x%x x%x\n", 1759 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1760 irsp->ulpTimeout, disc, vport->num_disc_nodes); 1761 /* Check to see if link went down during discovery */ 1762 if (lpfc_els_chk_latt(vport)) { 1763 spin_lock_irq(shost->host_lock); 1764 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1765 spin_unlock_irq(shost->host_lock); 1766 goto out; 1767 } 1768 1769 /* ndlp could be freed in DSM, save these values now */ 1770 type = ndlp->nlp_type; 1771 did = ndlp->nlp_DID; 1772 1773 if (irsp->ulpStatus) { 1774 /* Check for retry */ 1775 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1776 /* ELS command is being retried */ 1777 if (disc) { 1778 spin_lock_irq(shost->host_lock); 1779 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1780 spin_unlock_irq(shost->host_lock); 1781 } 1782 goto out; 1783 } 1784 /* PLOGI failed Don't print the vport to vport rjts */ 1785 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1786 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1787 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1788 (phba)->pport->cfg_log_verbose & LOG_ELS) 1789 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1790 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1791 ndlp->nlp_DID, irsp->ulpStatus, 1792 irsp->un.ulpWord[4]); 1793 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1794 if (lpfc_error_lost_link(irsp)) 1795 rc = NLP_STE_FREED_NODE; 1796 else 1797 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1798 NLP_EVT_CMPL_PLOGI); 1799 } else { 1800 /* Good status, call state machine */ 1801 prsp = list_entry(((struct lpfc_dmabuf *) 1802 cmdiocb->context2)->list.next, 1803 struct lpfc_dmabuf, list); 1804 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 1805 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1806 NLP_EVT_CMPL_PLOGI); 1807 } 1808 1809 if (disc && vport->num_disc_nodes) { 1810 /* Check to see if there are more PLOGIs to be sent */ 1811 lpfc_more_plogi(vport); 1812 1813 if (vport->num_disc_nodes == 0) { 1814 spin_lock_irq(shost->host_lock); 1815 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1816 spin_unlock_irq(shost->host_lock); 1817 1818 lpfc_can_disctmo(vport); 1819 lpfc_end_rscn(vport); 1820 } 1821 } 1822 1823 out: 1824 lpfc_els_free_iocb(phba, cmdiocb); 1825 return; 1826 } 1827 1828 /** 1829 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 1830 * @vport: pointer to a host virtual N_Port data structure. 1831 * @did: destination port identifier. 1832 * @retry: number of retries to the command IOCB. 1833 * 1834 * This routine issues a Port Login (PLOGI) command to a remote N_Port 1835 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 1836 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 1837 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 1838 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 1839 * 1840 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1841 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1842 * will be stored into the context1 field of the IOCB for the completion 1843 * callback function to the PLOGI ELS command. 1844 * 1845 * Return code 1846 * 0 - Successfully issued a plogi for @vport 1847 * 1 - failed to issue a plogi for @vport 1848 **/ 1849 int 1850 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1851 { 1852 struct lpfc_hba *phba = vport->phba; 1853 struct serv_parm *sp; 1854 IOCB_t *icmd; 1855 struct lpfc_nodelist *ndlp; 1856 struct lpfc_iocbq *elsiocb; 1857 struct lpfc_sli *psli; 1858 uint8_t *pcmd; 1859 uint16_t cmdsize; 1860 int ret; 1861 1862 psli = &phba->sli; 1863 1864 ndlp = lpfc_findnode_did(vport, did); 1865 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1866 ndlp = NULL; 1867 1868 /* If ndlp is not NULL, we will bump the reference count on it */ 1869 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1870 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1871 ELS_CMD_PLOGI); 1872 if (!elsiocb) 1873 return 1; 1874 1875 icmd = &elsiocb->iocb; 1876 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1877 1878 /* For PLOGI request, remainder of payload is service parameters */ 1879 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 1880 pcmd += sizeof(uint32_t); 1881 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1882 sp = (struct serv_parm *) pcmd; 1883 1884 /* 1885 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 1886 * to device on remote loops work. 1887 */ 1888 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 1889 sp->cmn.altBbCredit = 1; 1890 1891 if (sp->cmn.fcphLow < FC_PH_4_3) 1892 sp->cmn.fcphLow = FC_PH_4_3; 1893 1894 if (sp->cmn.fcphHigh < FC_PH3) 1895 sp->cmn.fcphHigh = FC_PH3; 1896 1897 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1898 "Issue PLOGI: did:x%x", 1899 did, 0, 0); 1900 1901 phba->fc_stat.elsXmitPLOGI++; 1902 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1903 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 1904 1905 if (ret == IOCB_ERROR) { 1906 lpfc_els_free_iocb(phba, elsiocb); 1907 return 1; 1908 } 1909 return 0; 1910 } 1911 1912 /** 1913 * lpfc_cmpl_els_prli - Completion callback function for prli 1914 * @phba: pointer to lpfc hba data structure. 1915 * @cmdiocb: pointer to lpfc command iocb data structure. 1916 * @rspiocb: pointer to lpfc response iocb data structure. 1917 * 1918 * This routine is the completion callback function for a Process Login 1919 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 1920 * status. If there is error status reported, PRLI retry shall be attempted 1921 * by invoking the lpfc_els_retry() routine. Otherwise, the state 1922 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 1923 * ndlp to mark the PRLI completion. 1924 **/ 1925 static void 1926 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1927 struct lpfc_iocbq *rspiocb) 1928 { 1929 struct lpfc_vport *vport = cmdiocb->vport; 1930 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1931 IOCB_t *irsp; 1932 struct lpfc_sli *psli; 1933 struct lpfc_nodelist *ndlp; 1934 1935 psli = &phba->sli; 1936 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1937 cmdiocb->context_un.rsp_iocb = rspiocb; 1938 1939 irsp = &(rspiocb->iocb); 1940 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1941 spin_lock_irq(shost->host_lock); 1942 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1943 spin_unlock_irq(shost->host_lock); 1944 1945 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1946 "PRLI cmpl: status:x%x/x%x did:x%x", 1947 irsp->ulpStatus, irsp->un.ulpWord[4], 1948 ndlp->nlp_DID); 1949 /* PRLI completes to NPort <nlp_DID> */ 1950 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1951 "0103 PRLI completes to NPort x%x " 1952 "Data: x%x x%x x%x x%x\n", 1953 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1954 irsp->ulpTimeout, vport->num_disc_nodes); 1955 1956 vport->fc_prli_sent--; 1957 /* Check to see if link went down during discovery */ 1958 if (lpfc_els_chk_latt(vport)) 1959 goto out; 1960 1961 if (irsp->ulpStatus) { 1962 /* Check for retry */ 1963 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1964 /* ELS command is being retried */ 1965 goto out; 1966 } 1967 /* PRLI failed */ 1968 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1969 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 1970 ndlp->nlp_DID, irsp->ulpStatus, 1971 irsp->un.ulpWord[4]); 1972 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1973 if (lpfc_error_lost_link(irsp)) 1974 goto out; 1975 else 1976 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1977 NLP_EVT_CMPL_PRLI); 1978 } else 1979 /* Good status, call state machine */ 1980 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1981 NLP_EVT_CMPL_PRLI); 1982 out: 1983 lpfc_els_free_iocb(phba, cmdiocb); 1984 return; 1985 } 1986 1987 /** 1988 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 1989 * @vport: pointer to a host virtual N_Port data structure. 1990 * @ndlp: pointer to a node-list data structure. 1991 * @retry: number of retries to the command IOCB. 1992 * 1993 * This routine issues a Process Login (PRLI) ELS command for the 1994 * @vport. The PRLI service parameters are set up in the payload of the 1995 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 1996 * is put to the IOCB completion callback func field before invoking the 1997 * routine lpfc_sli_issue_iocb() to send out PRLI command. 1998 * 1999 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2000 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2001 * will be stored into the context1 field of the IOCB for the completion 2002 * callback function to the PRLI ELS command. 2003 * 2004 * Return code 2005 * 0 - successfully issued prli iocb command for @vport 2006 * 1 - failed to issue prli iocb command for @vport 2007 **/ 2008 int 2009 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2010 uint8_t retry) 2011 { 2012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2013 struct lpfc_hba *phba = vport->phba; 2014 PRLI *npr; 2015 IOCB_t *icmd; 2016 struct lpfc_iocbq *elsiocb; 2017 uint8_t *pcmd; 2018 uint16_t cmdsize; 2019 2020 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2021 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2022 ndlp->nlp_DID, ELS_CMD_PRLI); 2023 if (!elsiocb) 2024 return 1; 2025 2026 icmd = &elsiocb->iocb; 2027 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2028 2029 /* For PRLI request, remainder of payload is service parameters */ 2030 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); 2031 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 2032 pcmd += sizeof(uint32_t); 2033 2034 /* For PRLI, remainder of payload is PRLI parameter page */ 2035 npr = (PRLI *) pcmd; 2036 /* 2037 * If our firmware version is 3.20 or later, 2038 * set the following bits for FC-TAPE support. 2039 */ 2040 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2041 npr->ConfmComplAllowed = 1; 2042 npr->Retry = 1; 2043 npr->TaskRetryIdReq = 1; 2044 } 2045 npr->estabImagePair = 1; 2046 npr->readXferRdyDis = 1; 2047 2048 /* For FCP support */ 2049 npr->prliType = PRLI_FCP_TYPE; 2050 npr->initiatorFunc = 1; 2051 2052 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2053 "Issue PRLI: did:x%x", 2054 ndlp->nlp_DID, 0, 0); 2055 2056 phba->fc_stat.elsXmitPRLI++; 2057 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2058 spin_lock_irq(shost->host_lock); 2059 ndlp->nlp_flag |= NLP_PRLI_SND; 2060 spin_unlock_irq(shost->host_lock); 2061 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2062 IOCB_ERROR) { 2063 spin_lock_irq(shost->host_lock); 2064 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2065 spin_unlock_irq(shost->host_lock); 2066 lpfc_els_free_iocb(phba, elsiocb); 2067 return 1; 2068 } 2069 vport->fc_prli_sent++; 2070 return 0; 2071 } 2072 2073 /** 2074 * lpfc_rscn_disc - Perform rscn discovery for a vport 2075 * @vport: pointer to a host virtual N_Port data structure. 2076 * 2077 * This routine performs Registration State Change Notification (RSCN) 2078 * discovery for a @vport. If the @vport's node port recovery count is not 2079 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2080 * the nodes that need recovery. If none of the PLOGI were needed through 2081 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2082 * invoked to check and handle possible more RSCN came in during the period 2083 * of processing the current ones. 2084 **/ 2085 static void 2086 lpfc_rscn_disc(struct lpfc_vport *vport) 2087 { 2088 lpfc_can_disctmo(vport); 2089 2090 /* RSCN discovery */ 2091 /* go thru NPR nodes and issue ELS PLOGIs */ 2092 if (vport->fc_npr_cnt) 2093 if (lpfc_els_disc_plogi(vport)) 2094 return; 2095 2096 lpfc_end_rscn(vport); 2097 } 2098 2099 /** 2100 * lpfc_adisc_done - Complete the adisc phase of discovery 2101 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2102 * 2103 * This function is called when the final ADISC is completed during discovery. 2104 * This function handles clearing link attention or issuing reg_vpi depending 2105 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2106 * discovery. 2107 * This function is called with no locks held. 2108 **/ 2109 static void 2110 lpfc_adisc_done(struct lpfc_vport *vport) 2111 { 2112 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2113 struct lpfc_hba *phba = vport->phba; 2114 2115 /* 2116 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2117 * and continue discovery. 2118 */ 2119 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2120 !(vport->fc_flag & FC_RSCN_MODE) && 2121 (phba->sli_rev < LPFC_SLI_REV4)) { 2122 lpfc_issue_reg_vpi(phba, vport); 2123 return; 2124 } 2125 /* 2126 * For SLI2, we need to set port_state to READY 2127 * and continue discovery. 2128 */ 2129 if (vport->port_state < LPFC_VPORT_READY) { 2130 /* If we get here, there is nothing to ADISC */ 2131 if (vport->port_type == LPFC_PHYSICAL_PORT) 2132 lpfc_issue_clear_la(phba, vport); 2133 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2134 vport->num_disc_nodes = 0; 2135 /* go thru NPR list, issue ELS PLOGIs */ 2136 if (vport->fc_npr_cnt) 2137 lpfc_els_disc_plogi(vport); 2138 if (!vport->num_disc_nodes) { 2139 spin_lock_irq(shost->host_lock); 2140 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2141 spin_unlock_irq(shost->host_lock); 2142 lpfc_can_disctmo(vport); 2143 lpfc_end_rscn(vport); 2144 } 2145 } 2146 vport->port_state = LPFC_VPORT_READY; 2147 } else 2148 lpfc_rscn_disc(vport); 2149 } 2150 2151 /** 2152 * lpfc_more_adisc - Issue more adisc as needed 2153 * @vport: pointer to a host virtual N_Port data structure. 2154 * 2155 * This routine determines whether there are more ndlps on a @vport 2156 * node list need to have Address Discover (ADISC) issued. If so, it will 2157 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2158 * remaining nodes which need to have ADISC sent. 2159 **/ 2160 void 2161 lpfc_more_adisc(struct lpfc_vport *vport) 2162 { 2163 int sentadisc; 2164 2165 if (vport->num_disc_nodes) 2166 vport->num_disc_nodes--; 2167 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2168 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2169 "0210 Continue discovery with %d ADISCs to go " 2170 "Data: x%x x%x x%x\n", 2171 vport->num_disc_nodes, vport->fc_adisc_cnt, 2172 vport->fc_flag, vport->port_state); 2173 /* Check to see if there are more ADISCs to be sent */ 2174 if (vport->fc_flag & FC_NLP_MORE) { 2175 lpfc_set_disctmo(vport); 2176 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2177 sentadisc = lpfc_els_disc_adisc(vport); 2178 } 2179 if (!vport->num_disc_nodes) 2180 lpfc_adisc_done(vport); 2181 return; 2182 } 2183 2184 /** 2185 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2186 * @phba: pointer to lpfc hba data structure. 2187 * @cmdiocb: pointer to lpfc command iocb data structure. 2188 * @rspiocb: pointer to lpfc response iocb data structure. 2189 * 2190 * This routine is the completion function for issuing the Address Discover 2191 * (ADISC) command. It first checks to see whether link went down during 2192 * the discovery process. If so, the node will be marked as node port 2193 * recovery for issuing discover IOCB by the link attention handler and 2194 * exit. Otherwise, the response status is checked. If error was reported 2195 * in the response status, the ADISC command shall be retried by invoking 2196 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2197 * the response status, the state machine is invoked to set transition 2198 * with respect to NLP_EVT_CMPL_ADISC event. 2199 **/ 2200 static void 2201 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2202 struct lpfc_iocbq *rspiocb) 2203 { 2204 struct lpfc_vport *vport = cmdiocb->vport; 2205 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2206 IOCB_t *irsp; 2207 struct lpfc_nodelist *ndlp; 2208 int disc; 2209 2210 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2211 cmdiocb->context_un.rsp_iocb = rspiocb; 2212 2213 irsp = &(rspiocb->iocb); 2214 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2215 2216 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2217 "ADISC cmpl: status:x%x/x%x did:x%x", 2218 irsp->ulpStatus, irsp->un.ulpWord[4], 2219 ndlp->nlp_DID); 2220 2221 /* Since ndlp can be freed in the disc state machine, note if this node 2222 * is being used during discovery. 2223 */ 2224 spin_lock_irq(shost->host_lock); 2225 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2226 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2227 spin_unlock_irq(shost->host_lock); 2228 /* ADISC completes to NPort <nlp_DID> */ 2229 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2230 "0104 ADISC completes to NPort x%x " 2231 "Data: x%x x%x x%x x%x x%x\n", 2232 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2233 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2234 /* Check to see if link went down during discovery */ 2235 if (lpfc_els_chk_latt(vport)) { 2236 spin_lock_irq(shost->host_lock); 2237 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2238 spin_unlock_irq(shost->host_lock); 2239 goto out; 2240 } 2241 2242 if (irsp->ulpStatus) { 2243 /* Check for retry */ 2244 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2245 /* ELS command is being retried */ 2246 if (disc) { 2247 spin_lock_irq(shost->host_lock); 2248 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2249 spin_unlock_irq(shost->host_lock); 2250 lpfc_set_disctmo(vport); 2251 } 2252 goto out; 2253 } 2254 /* ADISC failed */ 2255 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2256 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2257 ndlp->nlp_DID, irsp->ulpStatus, 2258 irsp->un.ulpWord[4]); 2259 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2260 if (!lpfc_error_lost_link(irsp)) 2261 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2262 NLP_EVT_CMPL_ADISC); 2263 } else 2264 /* Good status, call state machine */ 2265 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2266 NLP_EVT_CMPL_ADISC); 2267 2268 /* Check to see if there are more ADISCs to be sent */ 2269 if (disc && vport->num_disc_nodes) 2270 lpfc_more_adisc(vport); 2271 out: 2272 lpfc_els_free_iocb(phba, cmdiocb); 2273 return; 2274 } 2275 2276 /** 2277 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2278 * @vport: pointer to a virtual N_Port data structure. 2279 * @ndlp: pointer to a node-list data structure. 2280 * @retry: number of retries to the command IOCB. 2281 * 2282 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2283 * @vport. It prepares the payload of the ADISC ELS command, updates the 2284 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2285 * to issue the ADISC ELS command. 2286 * 2287 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2288 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2289 * will be stored into the context1 field of the IOCB for the completion 2290 * callback function to the ADISC ELS command. 2291 * 2292 * Return code 2293 * 0 - successfully issued adisc 2294 * 1 - failed to issue adisc 2295 **/ 2296 int 2297 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2298 uint8_t retry) 2299 { 2300 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2301 struct lpfc_hba *phba = vport->phba; 2302 ADISC *ap; 2303 IOCB_t *icmd; 2304 struct lpfc_iocbq *elsiocb; 2305 uint8_t *pcmd; 2306 uint16_t cmdsize; 2307 2308 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2309 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2310 ndlp->nlp_DID, ELS_CMD_ADISC); 2311 if (!elsiocb) 2312 return 1; 2313 2314 icmd = &elsiocb->iocb; 2315 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2316 2317 /* For ADISC request, remainder of payload is service parameters */ 2318 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2319 pcmd += sizeof(uint32_t); 2320 2321 /* Fill in ADISC payload */ 2322 ap = (ADISC *) pcmd; 2323 ap->hardAL_PA = phba->fc_pref_ALPA; 2324 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2325 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2326 ap->DID = be32_to_cpu(vport->fc_myDID); 2327 2328 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2329 "Issue ADISC: did:x%x", 2330 ndlp->nlp_DID, 0, 0); 2331 2332 phba->fc_stat.elsXmitADISC++; 2333 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2334 spin_lock_irq(shost->host_lock); 2335 ndlp->nlp_flag |= NLP_ADISC_SND; 2336 spin_unlock_irq(shost->host_lock); 2337 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2338 IOCB_ERROR) { 2339 spin_lock_irq(shost->host_lock); 2340 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2341 spin_unlock_irq(shost->host_lock); 2342 lpfc_els_free_iocb(phba, elsiocb); 2343 return 1; 2344 } 2345 return 0; 2346 } 2347 2348 /** 2349 * lpfc_cmpl_els_logo - Completion callback function for logo 2350 * @phba: pointer to lpfc hba data structure. 2351 * @cmdiocb: pointer to lpfc command iocb data structure. 2352 * @rspiocb: pointer to lpfc response iocb data structure. 2353 * 2354 * This routine is the completion function for issuing the ELS Logout (LOGO) 2355 * command. If no error status was reported from the LOGO response, the 2356 * state machine of the associated ndlp shall be invoked for transition with 2357 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2358 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2359 **/ 2360 static void 2361 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2362 struct lpfc_iocbq *rspiocb) 2363 { 2364 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2365 struct lpfc_vport *vport = ndlp->vport; 2366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2367 IOCB_t *irsp; 2368 struct lpfc_sli *psli; 2369 struct lpfcMboxq *mbox; 2370 2371 psli = &phba->sli; 2372 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2373 cmdiocb->context_un.rsp_iocb = rspiocb; 2374 2375 irsp = &(rspiocb->iocb); 2376 spin_lock_irq(shost->host_lock); 2377 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2378 spin_unlock_irq(shost->host_lock); 2379 2380 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2381 "LOGO cmpl: status:x%x/x%x did:x%x", 2382 irsp->ulpStatus, irsp->un.ulpWord[4], 2383 ndlp->nlp_DID); 2384 /* LOGO completes to NPort <nlp_DID> */ 2385 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2386 "0105 LOGO completes to NPort x%x " 2387 "Data: x%x x%x x%x x%x\n", 2388 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2389 irsp->ulpTimeout, vport->num_disc_nodes); 2390 /* Check to see if link went down during discovery */ 2391 if (lpfc_els_chk_latt(vport)) 2392 goto out; 2393 2394 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2395 /* NLP_EVT_DEVICE_RM should unregister the RPI 2396 * which should abort all outstanding IOs. 2397 */ 2398 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2399 NLP_EVT_DEVICE_RM); 2400 goto out; 2401 } 2402 2403 if (irsp->ulpStatus) { 2404 /* Check for retry */ 2405 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 2406 /* ELS command is being retried */ 2407 goto out; 2408 /* LOGO failed */ 2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2410 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2411 ndlp->nlp_DID, irsp->ulpStatus, 2412 irsp->un.ulpWord[4]); 2413 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2414 if (lpfc_error_lost_link(irsp)) 2415 goto out; 2416 else 2417 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2418 NLP_EVT_CMPL_LOGO); 2419 } else 2420 /* Good status, call state machine. 2421 * This will unregister the rpi if needed. 2422 */ 2423 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2424 NLP_EVT_CMPL_LOGO); 2425 out: 2426 lpfc_els_free_iocb(phba, cmdiocb); 2427 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2428 if ((vport->fc_flag & FC_PT2PT) && 2429 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2430 phba->pport->fc_myDID = 0; 2431 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2432 if (mbox) { 2433 lpfc_config_link(phba, mbox); 2434 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2435 mbox->vport = vport; 2436 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2437 MBX_NOT_FINISHED) { 2438 mempool_free(mbox, phba->mbox_mem_pool); 2439 } 2440 } 2441 } 2442 return; 2443 } 2444 2445 /** 2446 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2447 * @vport: pointer to a virtual N_Port data structure. 2448 * @ndlp: pointer to a node-list data structure. 2449 * @retry: number of retries to the command IOCB. 2450 * 2451 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2452 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2453 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2454 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2455 * 2456 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2457 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2458 * will be stored into the context1 field of the IOCB for the completion 2459 * callback function to the LOGO ELS command. 2460 * 2461 * Return code 2462 * 0 - successfully issued logo 2463 * 1 - failed to issue logo 2464 **/ 2465 int 2466 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2467 uint8_t retry) 2468 { 2469 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2470 struct lpfc_hba *phba = vport->phba; 2471 IOCB_t *icmd; 2472 struct lpfc_iocbq *elsiocb; 2473 uint8_t *pcmd; 2474 uint16_t cmdsize; 2475 int rc; 2476 2477 spin_lock_irq(shost->host_lock); 2478 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2479 spin_unlock_irq(shost->host_lock); 2480 return 0; 2481 } 2482 spin_unlock_irq(shost->host_lock); 2483 2484 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2485 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2486 ndlp->nlp_DID, ELS_CMD_LOGO); 2487 if (!elsiocb) 2488 return 1; 2489 2490 icmd = &elsiocb->iocb; 2491 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2492 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2493 pcmd += sizeof(uint32_t); 2494 2495 /* Fill in LOGO payload */ 2496 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2497 pcmd += sizeof(uint32_t); 2498 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2499 2500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2501 "Issue LOGO: did:x%x", 2502 ndlp->nlp_DID, 0, 0); 2503 2504 phba->fc_stat.elsXmitLOGO++; 2505 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2506 spin_lock_irq(shost->host_lock); 2507 ndlp->nlp_flag |= NLP_LOGO_SND; 2508 spin_unlock_irq(shost->host_lock); 2509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2510 2511 if (rc == IOCB_ERROR) { 2512 spin_lock_irq(shost->host_lock); 2513 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2514 spin_unlock_irq(shost->host_lock); 2515 lpfc_els_free_iocb(phba, elsiocb); 2516 return 1; 2517 } 2518 return 0; 2519 } 2520 2521 /** 2522 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2523 * @phba: pointer to lpfc hba data structure. 2524 * @cmdiocb: pointer to lpfc command iocb data structure. 2525 * @rspiocb: pointer to lpfc response iocb data structure. 2526 * 2527 * This routine is a generic completion callback function for ELS commands. 2528 * Specifically, it is the callback function which does not need to perform 2529 * any command specific operations. It is currently used by the ELS command 2530 * issuing routines for the ELS State Change Request (SCR), 2531 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2532 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2533 * certain debug loggings, this callback function simply invokes the 2534 * lpfc_els_chk_latt() routine to check whether link went down during the 2535 * discovery process. 2536 **/ 2537 static void 2538 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2539 struct lpfc_iocbq *rspiocb) 2540 { 2541 struct lpfc_vport *vport = cmdiocb->vport; 2542 IOCB_t *irsp; 2543 2544 irsp = &rspiocb->iocb; 2545 2546 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2547 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2548 irsp->ulpStatus, irsp->un.ulpWord[4], 2549 irsp->un.elsreq64.remoteID); 2550 /* ELS cmd tag <ulpIoTag> completes */ 2551 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2552 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2553 irsp->ulpIoTag, irsp->ulpStatus, 2554 irsp->un.ulpWord[4], irsp->ulpTimeout); 2555 /* Check to see if link went down during discovery */ 2556 lpfc_els_chk_latt(vport); 2557 lpfc_els_free_iocb(phba, cmdiocb); 2558 return; 2559 } 2560 2561 /** 2562 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2563 * @vport: pointer to a host virtual N_Port data structure. 2564 * @nportid: N_Port identifier to the remote node. 2565 * @retry: number of retries to the command IOCB. 2566 * 2567 * This routine issues a State Change Request (SCR) to a fabric node 2568 * on a @vport. The remote node @nportid is passed into the function. It 2569 * first search the @vport node list to find the matching ndlp. If no such 2570 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 2571 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 2572 * routine is invoked to send the SCR IOCB. 2573 * 2574 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2575 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2576 * will be stored into the context1 field of the IOCB for the completion 2577 * callback function to the SCR ELS command. 2578 * 2579 * Return code 2580 * 0 - Successfully issued scr command 2581 * 1 - Failed to issue scr command 2582 **/ 2583 int 2584 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2585 { 2586 struct lpfc_hba *phba = vport->phba; 2587 IOCB_t *icmd; 2588 struct lpfc_iocbq *elsiocb; 2589 struct lpfc_sli *psli; 2590 uint8_t *pcmd; 2591 uint16_t cmdsize; 2592 struct lpfc_nodelist *ndlp; 2593 2594 psli = &phba->sli; 2595 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2596 2597 ndlp = lpfc_findnode_did(vport, nportid); 2598 if (!ndlp) { 2599 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2600 if (!ndlp) 2601 return 1; 2602 lpfc_nlp_init(vport, ndlp, nportid); 2603 lpfc_enqueue_node(vport, ndlp); 2604 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2605 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2606 if (!ndlp) 2607 return 1; 2608 } 2609 2610 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2611 ndlp->nlp_DID, ELS_CMD_SCR); 2612 2613 if (!elsiocb) { 2614 /* This will trigger the release of the node just 2615 * allocated 2616 */ 2617 lpfc_nlp_put(ndlp); 2618 return 1; 2619 } 2620 2621 icmd = &elsiocb->iocb; 2622 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2623 2624 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 2625 pcmd += sizeof(uint32_t); 2626 2627 /* For SCR, remainder of payload is SCR parameter page */ 2628 memset(pcmd, 0, sizeof(SCR)); 2629 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 2630 2631 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2632 "Issue SCR: did:x%x", 2633 ndlp->nlp_DID, 0, 0); 2634 2635 phba->fc_stat.elsXmitSCR++; 2636 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2637 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2638 IOCB_ERROR) { 2639 /* The additional lpfc_nlp_put will cause the following 2640 * lpfc_els_free_iocb routine to trigger the rlease of 2641 * the node. 2642 */ 2643 lpfc_nlp_put(ndlp); 2644 lpfc_els_free_iocb(phba, elsiocb); 2645 return 1; 2646 } 2647 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2648 * trigger the release of node. 2649 */ 2650 lpfc_nlp_put(ndlp); 2651 return 0; 2652 } 2653 2654 /** 2655 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 2656 * @vport: pointer to a host virtual N_Port data structure. 2657 * @nportid: N_Port identifier to the remote node. 2658 * @retry: number of retries to the command IOCB. 2659 * 2660 * This routine issues a Fibre Channel Address Resolution Response 2661 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 2662 * is passed into the function. It first search the @vport node list to find 2663 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 2664 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 2665 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 2666 * 2667 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2668 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2669 * will be stored into the context1 field of the IOCB for the completion 2670 * callback function to the PARPR ELS command. 2671 * 2672 * Return code 2673 * 0 - Successfully issued farpr command 2674 * 1 - Failed to issue farpr command 2675 **/ 2676 static int 2677 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2678 { 2679 struct lpfc_hba *phba = vport->phba; 2680 IOCB_t *icmd; 2681 struct lpfc_iocbq *elsiocb; 2682 struct lpfc_sli *psli; 2683 FARP *fp; 2684 uint8_t *pcmd; 2685 uint32_t *lp; 2686 uint16_t cmdsize; 2687 struct lpfc_nodelist *ondlp; 2688 struct lpfc_nodelist *ndlp; 2689 2690 psli = &phba->sli; 2691 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2692 2693 ndlp = lpfc_findnode_did(vport, nportid); 2694 if (!ndlp) { 2695 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2696 if (!ndlp) 2697 return 1; 2698 lpfc_nlp_init(vport, ndlp, nportid); 2699 lpfc_enqueue_node(vport, ndlp); 2700 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2701 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2702 if (!ndlp) 2703 return 1; 2704 } 2705 2706 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2707 ndlp->nlp_DID, ELS_CMD_RNID); 2708 if (!elsiocb) { 2709 /* This will trigger the release of the node just 2710 * allocated 2711 */ 2712 lpfc_nlp_put(ndlp); 2713 return 1; 2714 } 2715 2716 icmd = &elsiocb->iocb; 2717 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2718 2719 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 2720 pcmd += sizeof(uint32_t); 2721 2722 /* Fill in FARPR payload */ 2723 fp = (FARP *) (pcmd); 2724 memset(fp, 0, sizeof(FARP)); 2725 lp = (uint32_t *) pcmd; 2726 *lp++ = be32_to_cpu(nportid); 2727 *lp++ = be32_to_cpu(vport->fc_myDID); 2728 fp->Rflags = 0; 2729 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 2730 2731 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 2732 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2733 ondlp = lpfc_findnode_did(vport, nportid); 2734 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 2735 memcpy(&fp->OportName, &ondlp->nlp_portname, 2736 sizeof(struct lpfc_name)); 2737 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 2738 sizeof(struct lpfc_name)); 2739 } 2740 2741 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2742 "Issue FARPR: did:x%x", 2743 ndlp->nlp_DID, 0, 0); 2744 2745 phba->fc_stat.elsXmitFARPR++; 2746 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2747 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2748 IOCB_ERROR) { 2749 /* The additional lpfc_nlp_put will cause the following 2750 * lpfc_els_free_iocb routine to trigger the release of 2751 * the node. 2752 */ 2753 lpfc_nlp_put(ndlp); 2754 lpfc_els_free_iocb(phba, elsiocb); 2755 return 1; 2756 } 2757 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2758 * trigger the release of the node. 2759 */ 2760 lpfc_nlp_put(ndlp); 2761 return 0; 2762 } 2763 2764 /** 2765 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 2766 * @vport: pointer to a host virtual N_Port data structure. 2767 * @nlp: pointer to a node-list data structure. 2768 * 2769 * This routine cancels the timer with a delayed IOCB-command retry for 2770 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 2771 * removes the ELS retry event if it presents. In addition, if the 2772 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 2773 * commands are sent for the @vport's nodes that require issuing discovery 2774 * ADISC. 2775 **/ 2776 void 2777 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 2778 { 2779 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2780 struct lpfc_work_evt *evtp; 2781 2782 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 2783 return; 2784 spin_lock_irq(shost->host_lock); 2785 nlp->nlp_flag &= ~NLP_DELAY_TMO; 2786 spin_unlock_irq(shost->host_lock); 2787 del_timer_sync(&nlp->nlp_delayfunc); 2788 nlp->nlp_last_elscmd = 0; 2789 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 2790 list_del_init(&nlp->els_retry_evt.evt_listp); 2791 /* Decrement nlp reference count held for the delayed retry */ 2792 evtp = &nlp->els_retry_evt; 2793 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 2794 } 2795 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 2796 spin_lock_irq(shost->host_lock); 2797 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2798 spin_unlock_irq(shost->host_lock); 2799 if (vport->num_disc_nodes) { 2800 if (vport->port_state < LPFC_VPORT_READY) { 2801 /* Check if there are more ADISCs to be sent */ 2802 lpfc_more_adisc(vport); 2803 } else { 2804 /* Check if there are more PLOGIs to be sent */ 2805 lpfc_more_plogi(vport); 2806 if (vport->num_disc_nodes == 0) { 2807 spin_lock_irq(shost->host_lock); 2808 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2809 spin_unlock_irq(shost->host_lock); 2810 lpfc_can_disctmo(vport); 2811 lpfc_end_rscn(vport); 2812 } 2813 } 2814 } 2815 } 2816 return; 2817 } 2818 2819 /** 2820 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 2821 * @ptr: holder for the pointer to the timer function associated data (ndlp). 2822 * 2823 * This routine is invoked by the ndlp delayed-function timer to check 2824 * whether there is any pending ELS retry event(s) with the node. If not, it 2825 * simply returns. Otherwise, if there is at least one ELS delayed event, it 2826 * adds the delayed events to the HBA work list and invokes the 2827 * lpfc_worker_wake_up() routine to wake up worker thread to process the 2828 * event. Note that lpfc_nlp_get() is called before posting the event to 2829 * the work list to hold reference count of ndlp so that it guarantees the 2830 * reference to ndlp will still be available when the worker thread gets 2831 * to the event associated with the ndlp. 2832 **/ 2833 void 2834 lpfc_els_retry_delay(unsigned long ptr) 2835 { 2836 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 2837 struct lpfc_vport *vport = ndlp->vport; 2838 struct lpfc_hba *phba = vport->phba; 2839 unsigned long flags; 2840 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 2841 2842 spin_lock_irqsave(&phba->hbalock, flags); 2843 if (!list_empty(&evtp->evt_listp)) { 2844 spin_unlock_irqrestore(&phba->hbalock, flags); 2845 return; 2846 } 2847 2848 /* We need to hold the node by incrementing the reference 2849 * count until the queued work is done 2850 */ 2851 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 2852 if (evtp->evt_arg1) { 2853 evtp->evt = LPFC_EVT_ELS_RETRY; 2854 list_add_tail(&evtp->evt_listp, &phba->work_list); 2855 lpfc_worker_wake_up(phba); 2856 } 2857 spin_unlock_irqrestore(&phba->hbalock, flags); 2858 return; 2859 } 2860 2861 /** 2862 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 2863 * @ndlp: pointer to a node-list data structure. 2864 * 2865 * This routine is the worker-thread handler for processing the @ndlp delayed 2866 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 2867 * the last ELS command from the associated ndlp and invokes the proper ELS 2868 * function according to the delayed ELS command to retry the command. 2869 **/ 2870 void 2871 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 2872 { 2873 struct lpfc_vport *vport = ndlp->vport; 2874 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2875 uint32_t cmd, did, retry; 2876 2877 spin_lock_irq(shost->host_lock); 2878 did = ndlp->nlp_DID; 2879 cmd = ndlp->nlp_last_elscmd; 2880 ndlp->nlp_last_elscmd = 0; 2881 2882 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2883 spin_unlock_irq(shost->host_lock); 2884 return; 2885 } 2886 2887 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2888 spin_unlock_irq(shost->host_lock); 2889 /* 2890 * If a discovery event readded nlp_delayfunc after timer 2891 * firing and before processing the timer, cancel the 2892 * nlp_delayfunc. 2893 */ 2894 del_timer_sync(&ndlp->nlp_delayfunc); 2895 retry = ndlp->nlp_retry; 2896 ndlp->nlp_retry = 0; 2897 2898 switch (cmd) { 2899 case ELS_CMD_FLOGI: 2900 lpfc_issue_els_flogi(vport, ndlp, retry); 2901 break; 2902 case ELS_CMD_PLOGI: 2903 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 2904 ndlp->nlp_prev_state = ndlp->nlp_state; 2905 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2906 } 2907 break; 2908 case ELS_CMD_ADISC: 2909 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 2910 ndlp->nlp_prev_state = ndlp->nlp_state; 2911 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2912 } 2913 break; 2914 case ELS_CMD_PRLI: 2915 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 2916 ndlp->nlp_prev_state = ndlp->nlp_state; 2917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2918 } 2919 break; 2920 case ELS_CMD_LOGO: 2921 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2922 ndlp->nlp_prev_state = ndlp->nlp_state; 2923 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2924 } 2925 break; 2926 case ELS_CMD_FDISC: 2927 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 2928 lpfc_issue_els_fdisc(vport, ndlp, retry); 2929 break; 2930 } 2931 return; 2932 } 2933 2934 /** 2935 * lpfc_els_retry - Make retry decision on an els command iocb 2936 * @phba: pointer to lpfc hba data structure. 2937 * @cmdiocb: pointer to lpfc command iocb data structure. 2938 * @rspiocb: pointer to lpfc response iocb data structure. 2939 * 2940 * This routine makes a retry decision on an ELS command IOCB, which has 2941 * failed. The following ELS IOCBs use this function for retrying the command 2942 * when previously issued command responsed with error status: FLOGI, PLOGI, 2943 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 2944 * returned error status, it makes the decision whether a retry shall be 2945 * issued for the command, and whether a retry shall be made immediately or 2946 * delayed. In the former case, the corresponding ELS command issuing-function 2947 * is called to retry the command. In the later case, the ELS command shall 2948 * be posted to the ndlp delayed event and delayed function timer set to the 2949 * ndlp for the delayed command issusing. 2950 * 2951 * Return code 2952 * 0 - No retry of els command is made 2953 * 1 - Immediate or delayed retry of els command is made 2954 **/ 2955 static int 2956 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2957 struct lpfc_iocbq *rspiocb) 2958 { 2959 struct lpfc_vport *vport = cmdiocb->vport; 2960 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2961 IOCB_t *irsp = &rspiocb->iocb; 2962 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2963 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2964 uint32_t *elscmd; 2965 struct ls_rjt stat; 2966 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 2967 int logerr = 0; 2968 uint32_t cmd = 0; 2969 uint32_t did; 2970 2971 2972 /* Note: context2 may be 0 for internal driver abort 2973 * of delays ELS command. 2974 */ 2975 2976 if (pcmd && pcmd->virt) { 2977 elscmd = (uint32_t *) (pcmd->virt); 2978 cmd = *elscmd++; 2979 } 2980 2981 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 2982 did = ndlp->nlp_DID; 2983 else { 2984 /* We should only hit this case for retrying PLOGI */ 2985 did = irsp->un.elsreq64.remoteID; 2986 ndlp = lpfc_findnode_did(vport, did); 2987 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 2988 && (cmd != ELS_CMD_PLOGI)) 2989 return 1; 2990 } 2991 2992 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2993 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 2994 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 2995 2996 switch (irsp->ulpStatus) { 2997 case IOSTAT_FCP_RSP_ERROR: 2998 break; 2999 case IOSTAT_REMOTE_STOP: 3000 if (phba->sli_rev == LPFC_SLI_REV4) { 3001 /* This IO was aborted by the target, we don't 3002 * know the rxid and because we did not send the 3003 * ABTS we cannot generate and RRQ. 3004 */ 3005 lpfc_set_rrq_active(phba, ndlp, 3006 cmdiocb->sli4_lxritag, 0, 0); 3007 } 3008 break; 3009 case IOSTAT_LOCAL_REJECT: 3010 switch ((irsp->un.ulpWord[4] & 0xff)) { 3011 case IOERR_LOOP_OPEN_FAILURE: 3012 if (cmd == ELS_CMD_FLOGI) { 3013 if (PCI_DEVICE_ID_HORNET == 3014 phba->pcidev->device) { 3015 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3016 phba->pport->fc_myDID = 0; 3017 phba->alpa_map[0] = 0; 3018 phba->alpa_map[1] = 0; 3019 } 3020 } 3021 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3022 delay = 1000; 3023 retry = 1; 3024 break; 3025 3026 case IOERR_ILLEGAL_COMMAND: 3027 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3028 "0124 Retry illegal cmd x%x " 3029 "retry:x%x delay:x%x\n", 3030 cmd, cmdiocb->retry, delay); 3031 retry = 1; 3032 /* All command's retry policy */ 3033 maxretry = 8; 3034 if (cmdiocb->retry > 2) 3035 delay = 1000; 3036 break; 3037 3038 case IOERR_NO_RESOURCES: 3039 logerr = 1; /* HBA out of resources */ 3040 retry = 1; 3041 if (cmdiocb->retry > 100) 3042 delay = 100; 3043 maxretry = 250; 3044 break; 3045 3046 case IOERR_ILLEGAL_FRAME: 3047 delay = 100; 3048 retry = 1; 3049 break; 3050 3051 case IOERR_SEQUENCE_TIMEOUT: 3052 case IOERR_INVALID_RPI: 3053 retry = 1; 3054 break; 3055 } 3056 break; 3057 3058 case IOSTAT_NPORT_RJT: 3059 case IOSTAT_FABRIC_RJT: 3060 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3061 retry = 1; 3062 break; 3063 } 3064 break; 3065 3066 case IOSTAT_NPORT_BSY: 3067 case IOSTAT_FABRIC_BSY: 3068 logerr = 1; /* Fabric / Remote NPort out of resources */ 3069 retry = 1; 3070 break; 3071 3072 case IOSTAT_LS_RJT: 3073 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3074 /* Added for Vendor specifc support 3075 * Just keep retrying for these Rsn / Exp codes 3076 */ 3077 switch (stat.un.b.lsRjtRsnCode) { 3078 case LSRJT_UNABLE_TPC: 3079 if (stat.un.b.lsRjtRsnCodeExp == 3080 LSEXP_CMD_IN_PROGRESS) { 3081 if (cmd == ELS_CMD_PLOGI) { 3082 delay = 1000; 3083 maxretry = 48; 3084 } 3085 retry = 1; 3086 break; 3087 } 3088 if (stat.un.b.lsRjtRsnCodeExp == 3089 LSEXP_CANT_GIVE_DATA) { 3090 if (cmd == ELS_CMD_PLOGI) { 3091 delay = 1000; 3092 maxretry = 48; 3093 } 3094 retry = 1; 3095 break; 3096 } 3097 if (cmd == ELS_CMD_PLOGI) { 3098 delay = 1000; 3099 maxretry = lpfc_max_els_tries + 1; 3100 retry = 1; 3101 break; 3102 } 3103 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3104 (cmd == ELS_CMD_FDISC) && 3105 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3106 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3107 "0125 FDISC Failed (x%x). " 3108 "Fabric out of resources\n", 3109 stat.un.lsRjtError); 3110 lpfc_vport_set_state(vport, 3111 FC_VPORT_NO_FABRIC_RSCS); 3112 } 3113 break; 3114 3115 case LSRJT_LOGICAL_BSY: 3116 if ((cmd == ELS_CMD_PLOGI) || 3117 (cmd == ELS_CMD_PRLI)) { 3118 delay = 1000; 3119 maxretry = 48; 3120 } else if (cmd == ELS_CMD_FDISC) { 3121 /* FDISC retry policy */ 3122 maxretry = 48; 3123 if (cmdiocb->retry >= 32) 3124 delay = 1000; 3125 } 3126 retry = 1; 3127 break; 3128 3129 case LSRJT_LOGICAL_ERR: 3130 /* There are some cases where switches return this 3131 * error when they are not ready and should be returning 3132 * Logical Busy. We should delay every time. 3133 */ 3134 if (cmd == ELS_CMD_FDISC && 3135 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3136 maxretry = 3; 3137 delay = 1000; 3138 retry = 1; 3139 break; 3140 } 3141 case LSRJT_PROTOCOL_ERR: 3142 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3143 (cmd == ELS_CMD_FDISC) && 3144 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3145 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3146 ) { 3147 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3148 "0122 FDISC Failed (x%x). " 3149 "Fabric Detected Bad WWN\n", 3150 stat.un.lsRjtError); 3151 lpfc_vport_set_state(vport, 3152 FC_VPORT_FABRIC_REJ_WWN); 3153 } 3154 break; 3155 } 3156 break; 3157 3158 case IOSTAT_INTERMED_RSP: 3159 case IOSTAT_BA_RJT: 3160 break; 3161 3162 default: 3163 break; 3164 } 3165 3166 if (did == FDMI_DID) 3167 retry = 1; 3168 3169 if ((cmd == ELS_CMD_FLOGI) && 3170 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3171 !lpfc_error_lost_link(irsp)) { 3172 /* FLOGI retry policy */ 3173 retry = 1; 3174 /* retry FLOGI forever */ 3175 maxretry = 0; 3176 if (cmdiocb->retry >= 100) 3177 delay = 5000; 3178 else if (cmdiocb->retry >= 32) 3179 delay = 1000; 3180 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 3181 /* retry FDISCs every second up to devloss */ 3182 retry = 1; 3183 maxretry = vport->cfg_devloss_tmo; 3184 delay = 1000; 3185 } 3186 3187 cmdiocb->retry++; 3188 if (maxretry && (cmdiocb->retry >= maxretry)) { 3189 phba->fc_stat.elsRetryExceeded++; 3190 retry = 0; 3191 } 3192 3193 if ((vport->load_flag & FC_UNLOADING) != 0) 3194 retry = 0; 3195 3196 if (retry) { 3197 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3198 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3199 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3200 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3201 "2849 Stop retry ELS command " 3202 "x%x to remote NPORT x%x, " 3203 "Data: x%x x%x\n", cmd, did, 3204 cmdiocb->retry, delay); 3205 return 0; 3206 } 3207 } 3208 3209 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3210 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3211 "0107 Retry ELS command x%x to remote " 3212 "NPORT x%x Data: x%x x%x\n", 3213 cmd, did, cmdiocb->retry, delay); 3214 3215 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3216 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3217 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 3218 /* Don't reset timer for no resources */ 3219 3220 /* If discovery / RSCN timer is running, reset it */ 3221 if (timer_pending(&vport->fc_disctmo) || 3222 (vport->fc_flag & FC_RSCN_MODE)) 3223 lpfc_set_disctmo(vport); 3224 } 3225 3226 phba->fc_stat.elsXmitRetry++; 3227 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3228 phba->fc_stat.elsDelayRetry++; 3229 ndlp->nlp_retry = cmdiocb->retry; 3230 3231 /* delay is specified in milliseconds */ 3232 mod_timer(&ndlp->nlp_delayfunc, 3233 jiffies + msecs_to_jiffies(delay)); 3234 spin_lock_irq(shost->host_lock); 3235 ndlp->nlp_flag |= NLP_DELAY_TMO; 3236 spin_unlock_irq(shost->host_lock); 3237 3238 ndlp->nlp_prev_state = ndlp->nlp_state; 3239 if (cmd == ELS_CMD_PRLI) 3240 lpfc_nlp_set_state(vport, ndlp, 3241 NLP_STE_REG_LOGIN_ISSUE); 3242 else 3243 lpfc_nlp_set_state(vport, ndlp, 3244 NLP_STE_NPR_NODE); 3245 ndlp->nlp_last_elscmd = cmd; 3246 3247 return 1; 3248 } 3249 switch (cmd) { 3250 case ELS_CMD_FLOGI: 3251 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3252 return 1; 3253 case ELS_CMD_FDISC: 3254 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3255 return 1; 3256 case ELS_CMD_PLOGI: 3257 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3258 ndlp->nlp_prev_state = ndlp->nlp_state; 3259 lpfc_nlp_set_state(vport, ndlp, 3260 NLP_STE_PLOGI_ISSUE); 3261 } 3262 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3263 return 1; 3264 case ELS_CMD_ADISC: 3265 ndlp->nlp_prev_state = ndlp->nlp_state; 3266 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3267 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3268 return 1; 3269 case ELS_CMD_PRLI: 3270 ndlp->nlp_prev_state = ndlp->nlp_state; 3271 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3272 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3273 return 1; 3274 case ELS_CMD_LOGO: 3275 ndlp->nlp_prev_state = ndlp->nlp_state; 3276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3277 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3278 return 1; 3279 } 3280 } 3281 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3282 if (logerr) { 3283 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3284 "0137 No retry ELS command x%x to remote " 3285 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3286 cmd, did, irsp->ulpStatus, 3287 irsp->un.ulpWord[4]); 3288 } 3289 else { 3290 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3291 "0108 No retry ELS command x%x to remote " 3292 "NPORT x%x Retried:%d Error:x%x/%x\n", 3293 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3294 irsp->un.ulpWord[4]); 3295 } 3296 return 0; 3297 } 3298 3299 /** 3300 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3301 * @phba: pointer to lpfc hba data structure. 3302 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3303 * 3304 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3305 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3306 * checks to see whether there is a lpfc DMA buffer associated with the 3307 * response of the command IOCB. If so, it will be released before releasing 3308 * the lpfc DMA buffer associated with the IOCB itself. 3309 * 3310 * Return code 3311 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3312 **/ 3313 static int 3314 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3315 { 3316 struct lpfc_dmabuf *buf_ptr; 3317 3318 /* Free the response before processing the command. */ 3319 if (!list_empty(&buf_ptr1->list)) { 3320 list_remove_head(&buf_ptr1->list, buf_ptr, 3321 struct lpfc_dmabuf, 3322 list); 3323 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3324 kfree(buf_ptr); 3325 } 3326 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3327 kfree(buf_ptr1); 3328 return 0; 3329 } 3330 3331 /** 3332 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3333 * @phba: pointer to lpfc hba data structure. 3334 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3335 * 3336 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3337 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3338 * pool. 3339 * 3340 * Return code 3341 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3342 **/ 3343 static int 3344 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3345 { 3346 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3347 kfree(buf_ptr); 3348 return 0; 3349 } 3350 3351 /** 3352 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3353 * @phba: pointer to lpfc hba data structure. 3354 * @elsiocb: pointer to lpfc els command iocb data structure. 3355 * 3356 * This routine frees a command IOCB and its associated resources. The 3357 * command IOCB data structure contains the reference to various associated 3358 * resources, these fields must be set to NULL if the associated reference 3359 * not present: 3360 * context1 - reference to ndlp 3361 * context2 - reference to cmd 3362 * context2->next - reference to rsp 3363 * context3 - reference to bpl 3364 * 3365 * It first properly decrements the reference count held on ndlp for the 3366 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3367 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3368 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3369 * adds the DMA buffer the @phba data structure for the delayed release. 3370 * If reference to the Buffer Pointer List (BPL) is present, the 3371 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3372 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3373 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3374 * 3375 * Return code 3376 * 0 - Success (currently, always return 0) 3377 **/ 3378 int 3379 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3380 { 3381 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3382 struct lpfc_nodelist *ndlp; 3383 3384 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3385 if (ndlp) { 3386 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3387 lpfc_nlp_put(ndlp); 3388 3389 /* If the ndlp is not being used by another discovery 3390 * thread, free it. 3391 */ 3392 if (!lpfc_nlp_not_used(ndlp)) { 3393 /* If ndlp is being used by another discovery 3394 * thread, just clear NLP_DEFER_RM 3395 */ 3396 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3397 } 3398 } 3399 else 3400 lpfc_nlp_put(ndlp); 3401 elsiocb->context1 = NULL; 3402 } 3403 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3404 if (elsiocb->context2) { 3405 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3406 /* Firmware could still be in progress of DMAing 3407 * payload, so don't free data buffer till after 3408 * a hbeat. 3409 */ 3410 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3411 buf_ptr = elsiocb->context2; 3412 elsiocb->context2 = NULL; 3413 if (buf_ptr) { 3414 buf_ptr1 = NULL; 3415 spin_lock_irq(&phba->hbalock); 3416 if (!list_empty(&buf_ptr->list)) { 3417 list_remove_head(&buf_ptr->list, 3418 buf_ptr1, struct lpfc_dmabuf, 3419 list); 3420 INIT_LIST_HEAD(&buf_ptr1->list); 3421 list_add_tail(&buf_ptr1->list, 3422 &phba->elsbuf); 3423 phba->elsbuf_cnt++; 3424 } 3425 INIT_LIST_HEAD(&buf_ptr->list); 3426 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3427 phba->elsbuf_cnt++; 3428 spin_unlock_irq(&phba->hbalock); 3429 } 3430 } else { 3431 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3432 lpfc_els_free_data(phba, buf_ptr1); 3433 } 3434 } 3435 3436 if (elsiocb->context3) { 3437 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3438 lpfc_els_free_bpl(phba, buf_ptr); 3439 } 3440 lpfc_sli_release_iocbq(phba, elsiocb); 3441 return 0; 3442 } 3443 3444 /** 3445 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 3446 * @phba: pointer to lpfc hba data structure. 3447 * @cmdiocb: pointer to lpfc command iocb data structure. 3448 * @rspiocb: pointer to lpfc response iocb data structure. 3449 * 3450 * This routine is the completion callback function to the Logout (LOGO) 3451 * Accept (ACC) Response ELS command. This routine is invoked to indicate 3452 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 3453 * release the ndlp if it has the last reference remaining (reference count 3454 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 3455 * field to NULL to inform the following lpfc_els_free_iocb() routine no 3456 * ndlp reference count needs to be decremented. Otherwise, the ndlp 3457 * reference use-count shall be decremented by the lpfc_els_free_iocb() 3458 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 3459 * IOCB data structure. 3460 **/ 3461 static void 3462 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3463 struct lpfc_iocbq *rspiocb) 3464 { 3465 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3466 struct lpfc_vport *vport = cmdiocb->vport; 3467 IOCB_t *irsp; 3468 3469 irsp = &rspiocb->iocb; 3470 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3471 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 3472 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 3473 /* ACC to LOGO completes to NPort <nlp_DID> */ 3474 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3475 "0109 ACC to LOGO completes to NPort x%x " 3476 "Data: x%x x%x x%x\n", 3477 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3478 ndlp->nlp_rpi); 3479 3480 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 3481 /* NPort Recovery mode or node is just allocated */ 3482 if (!lpfc_nlp_not_used(ndlp)) { 3483 /* If the ndlp is being used by another discovery 3484 * thread, just unregister the RPI. 3485 */ 3486 lpfc_unreg_rpi(vport, ndlp); 3487 } else { 3488 /* Indicate the node has already released, should 3489 * not reference to it from within lpfc_els_free_iocb. 3490 */ 3491 cmdiocb->context1 = NULL; 3492 } 3493 } 3494 3495 /* 3496 * The driver received a LOGO from the rport and has ACK'd it. 3497 * At this point, the driver is done so release the IOCB 3498 */ 3499 lpfc_els_free_iocb(phba, cmdiocb); 3500 3501 /* 3502 * Remove the ndlp reference if it's a fabric node that has 3503 * sent us an unsolicted LOGO. 3504 */ 3505 if (ndlp->nlp_type & NLP_FABRIC) 3506 lpfc_nlp_put(ndlp); 3507 3508 return; 3509 } 3510 3511 /** 3512 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 3513 * @phba: pointer to lpfc hba data structure. 3514 * @pmb: pointer to the driver internal queue element for mailbox command. 3515 * 3516 * This routine is the completion callback function for unregister default 3517 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 3518 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 3519 * decrements the ndlp reference count held for this completion callback 3520 * function. After that, it invokes the lpfc_nlp_not_used() to check 3521 * whether there is only one reference left on the ndlp. If so, it will 3522 * perform one more decrement and trigger the release of the ndlp. 3523 **/ 3524 void 3525 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3526 { 3527 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3528 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3529 3530 pmb->context1 = NULL; 3531 pmb->context2 = NULL; 3532 3533 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3534 kfree(mp); 3535 mempool_free(pmb, phba->mbox_mem_pool); 3536 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3537 lpfc_nlp_put(ndlp); 3538 /* This is the end of the default RPI cleanup logic for this 3539 * ndlp. If no other discovery threads are using this ndlp. 3540 * we should free all resources associated with it. 3541 */ 3542 lpfc_nlp_not_used(ndlp); 3543 } 3544 3545 return; 3546 } 3547 3548 /** 3549 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 3550 * @phba: pointer to lpfc hba data structure. 3551 * @cmdiocb: pointer to lpfc command iocb data structure. 3552 * @rspiocb: pointer to lpfc response iocb data structure. 3553 * 3554 * This routine is the completion callback function for ELS Response IOCB 3555 * command. In normal case, this callback function just properly sets the 3556 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 3557 * field in the command IOCB is not NULL, the referred mailbox command will 3558 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 3559 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 3560 * link down event occurred during the discovery, the lpfc_nlp_not_used() 3561 * routine shall be invoked trying to release the ndlp if no other threads 3562 * are currently referring it. 3563 **/ 3564 static void 3565 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3566 struct lpfc_iocbq *rspiocb) 3567 { 3568 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3569 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 3570 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 3571 IOCB_t *irsp; 3572 uint8_t *pcmd; 3573 LPFC_MBOXQ_t *mbox = NULL; 3574 struct lpfc_dmabuf *mp = NULL; 3575 uint32_t ls_rjt = 0; 3576 3577 irsp = &rspiocb->iocb; 3578 3579 if (cmdiocb->context_un.mbox) 3580 mbox = cmdiocb->context_un.mbox; 3581 3582 /* First determine if this is a LS_RJT cmpl. Note, this callback 3583 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 3584 */ 3585 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 3586 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3587 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3588 /* A LS_RJT associated with Default RPI cleanup has its own 3589 * separate code path. 3590 */ 3591 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3592 ls_rjt = 1; 3593 } 3594 3595 /* Check to see if link went down during discovery */ 3596 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 3597 if (mbox) { 3598 mp = (struct lpfc_dmabuf *) mbox->context1; 3599 if (mp) { 3600 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3601 kfree(mp); 3602 } 3603 mempool_free(mbox, phba->mbox_mem_pool); 3604 } 3605 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3606 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3607 if (lpfc_nlp_not_used(ndlp)) { 3608 ndlp = NULL; 3609 /* Indicate the node has already released, 3610 * should not reference to it from within 3611 * the routine lpfc_els_free_iocb. 3612 */ 3613 cmdiocb->context1 = NULL; 3614 } 3615 goto out; 3616 } 3617 3618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3619 "ELS rsp cmpl: status:x%x/x%x did:x%x", 3620 irsp->ulpStatus, irsp->un.ulpWord[4], 3621 cmdiocb->iocb.un.elsreq64.remoteID); 3622 /* ELS response tag <ulpIoTag> completes */ 3623 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3624 "0110 ELS response tag x%x completes " 3625 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 3626 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 3627 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 3628 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3629 ndlp->nlp_rpi); 3630 if (mbox) { 3631 if ((rspiocb->iocb.ulpStatus == 0) 3632 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 3633 lpfc_unreg_rpi(vport, ndlp); 3634 /* Increment reference count to ndlp to hold the 3635 * reference to ndlp for the callback function. 3636 */ 3637 mbox->context2 = lpfc_nlp_get(ndlp); 3638 mbox->vport = vport; 3639 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 3640 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 3641 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 3642 } 3643 else { 3644 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 3645 ndlp->nlp_prev_state = ndlp->nlp_state; 3646 lpfc_nlp_set_state(vport, ndlp, 3647 NLP_STE_REG_LOGIN_ISSUE); 3648 } 3649 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 3650 != MBX_NOT_FINISHED) 3651 goto out; 3652 else 3653 /* Decrement the ndlp reference count we 3654 * set for this failed mailbox command. 3655 */ 3656 lpfc_nlp_put(ndlp); 3657 3658 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 3659 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3660 "0138 ELS rsp: Cannot issue reg_login for x%x " 3661 "Data: x%x x%x x%x\n", 3662 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3663 ndlp->nlp_rpi); 3664 3665 if (lpfc_nlp_not_used(ndlp)) { 3666 ndlp = NULL; 3667 /* Indicate node has already been released, 3668 * should not reference to it from within 3669 * the routine lpfc_els_free_iocb. 3670 */ 3671 cmdiocb->context1 = NULL; 3672 } 3673 } else { 3674 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 3675 if (!lpfc_error_lost_link(irsp) && 3676 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 3677 if (lpfc_nlp_not_used(ndlp)) { 3678 ndlp = NULL; 3679 /* Indicate node has already been 3680 * released, should not reference 3681 * to it from within the routine 3682 * lpfc_els_free_iocb. 3683 */ 3684 cmdiocb->context1 = NULL; 3685 } 3686 } 3687 } 3688 mp = (struct lpfc_dmabuf *) mbox->context1; 3689 if (mp) { 3690 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3691 kfree(mp); 3692 } 3693 mempool_free(mbox, phba->mbox_mem_pool); 3694 } 3695 out: 3696 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3697 spin_lock_irq(shost->host_lock); 3698 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 3699 spin_unlock_irq(shost->host_lock); 3700 3701 /* If the node is not being used by another discovery thread, 3702 * and we are sending a reject, we are done with it. 3703 * Release driver reference count here and free associated 3704 * resources. 3705 */ 3706 if (ls_rjt) 3707 if (lpfc_nlp_not_used(ndlp)) 3708 /* Indicate node has already been released, 3709 * should not reference to it from within 3710 * the routine lpfc_els_free_iocb. 3711 */ 3712 cmdiocb->context1 = NULL; 3713 } 3714 3715 lpfc_els_free_iocb(phba, cmdiocb); 3716 return; 3717 } 3718 3719 /** 3720 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 3721 * @vport: pointer to a host virtual N_Port data structure. 3722 * @flag: the els command code to be accepted. 3723 * @oldiocb: pointer to the original lpfc command iocb data structure. 3724 * @ndlp: pointer to a node-list data structure. 3725 * @mbox: pointer to the driver internal queue element for mailbox command. 3726 * 3727 * This routine prepares and issues an Accept (ACC) response IOCB 3728 * command. It uses the @flag to properly set up the IOCB field for the 3729 * specific ACC response command to be issued and invokes the 3730 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 3731 * @mbox pointer is passed in, it will be put into the context_un.mbox 3732 * field of the IOCB for the completion callback function to issue the 3733 * mailbox command to the HBA later when callback is invoked. 3734 * 3735 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3736 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3737 * will be stored into the context1 field of the IOCB for the completion 3738 * callback function to the corresponding response ELS IOCB command. 3739 * 3740 * Return code 3741 * 0 - Successfully issued acc response 3742 * 1 - Failed to issue acc response 3743 **/ 3744 int 3745 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 3746 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3747 LPFC_MBOXQ_t *mbox) 3748 { 3749 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3750 struct lpfc_hba *phba = vport->phba; 3751 IOCB_t *icmd; 3752 IOCB_t *oldcmd; 3753 struct lpfc_iocbq *elsiocb; 3754 struct lpfc_sli *psli; 3755 uint8_t *pcmd; 3756 uint16_t cmdsize; 3757 int rc; 3758 ELS_PKT *els_pkt_ptr; 3759 3760 psli = &phba->sli; 3761 oldcmd = &oldiocb->iocb; 3762 3763 switch (flag) { 3764 case ELS_CMD_ACC: 3765 cmdsize = sizeof(uint32_t); 3766 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3767 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3768 if (!elsiocb) { 3769 spin_lock_irq(shost->host_lock); 3770 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3771 spin_unlock_irq(shost->host_lock); 3772 return 1; 3773 } 3774 3775 icmd = &elsiocb->iocb; 3776 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3777 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3778 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3779 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3780 pcmd += sizeof(uint32_t); 3781 3782 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3783 "Issue ACC: did:x%x flg:x%x", 3784 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3785 break; 3786 case ELS_CMD_PLOGI: 3787 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 3788 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3789 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3790 if (!elsiocb) 3791 return 1; 3792 3793 icmd = &elsiocb->iocb; 3794 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3795 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3796 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3797 3798 if (mbox) 3799 elsiocb->context_un.mbox = mbox; 3800 3801 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3802 pcmd += sizeof(uint32_t); 3803 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 3804 3805 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3806 "Issue ACC PLOGI: did:x%x flg:x%x", 3807 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3808 break; 3809 case ELS_CMD_PRLO: 3810 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 3811 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3812 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 3813 if (!elsiocb) 3814 return 1; 3815 3816 icmd = &elsiocb->iocb; 3817 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3818 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3819 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3820 3821 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3822 sizeof(uint32_t) + sizeof(PRLO)); 3823 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 3824 els_pkt_ptr = (ELS_PKT *) pcmd; 3825 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 3826 3827 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3828 "Issue ACC PRLO: did:x%x flg:x%x", 3829 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3830 break; 3831 default: 3832 return 1; 3833 } 3834 /* Xmit ELS ACC response tag <ulpIoTag> */ 3835 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3836 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 3837 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x " 3838 "fc_flag x%x\n", 3839 elsiocb->iotag, elsiocb->iocb.ulpContext, 3840 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3841 ndlp->nlp_rpi, vport->fc_flag); 3842 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 3843 spin_lock_irq(shost->host_lock); 3844 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3845 spin_unlock_irq(shost->host_lock); 3846 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 3847 } else { 3848 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3849 } 3850 3851 phba->fc_stat.elsXmitACC++; 3852 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3853 if (rc == IOCB_ERROR) { 3854 lpfc_els_free_iocb(phba, elsiocb); 3855 return 1; 3856 } 3857 return 0; 3858 } 3859 3860 /** 3861 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 3862 * @vport: pointer to a virtual N_Port data structure. 3863 * @rejectError: 3864 * @oldiocb: pointer to the original lpfc command iocb data structure. 3865 * @ndlp: pointer to a node-list data structure. 3866 * @mbox: pointer to the driver internal queue element for mailbox command. 3867 * 3868 * This routine prepares and issue an Reject (RJT) response IOCB 3869 * command. If a @mbox pointer is passed in, it will be put into the 3870 * context_un.mbox field of the IOCB for the completion callback function 3871 * to issue to the HBA later. 3872 * 3873 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3874 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3875 * will be stored into the context1 field of the IOCB for the completion 3876 * callback function to the reject response ELS IOCB command. 3877 * 3878 * Return code 3879 * 0 - Successfully issued reject response 3880 * 1 - Failed to issue reject response 3881 **/ 3882 int 3883 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 3884 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3885 LPFC_MBOXQ_t *mbox) 3886 { 3887 struct lpfc_hba *phba = vport->phba; 3888 IOCB_t *icmd; 3889 IOCB_t *oldcmd; 3890 struct lpfc_iocbq *elsiocb; 3891 struct lpfc_sli *psli; 3892 uint8_t *pcmd; 3893 uint16_t cmdsize; 3894 int rc; 3895 3896 psli = &phba->sli; 3897 cmdsize = 2 * sizeof(uint32_t); 3898 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3899 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3900 if (!elsiocb) 3901 return 1; 3902 3903 icmd = &elsiocb->iocb; 3904 oldcmd = &oldiocb->iocb; 3905 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3906 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3907 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3908 3909 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3910 pcmd += sizeof(uint32_t); 3911 *((uint32_t *) (pcmd)) = rejectError; 3912 3913 if (mbox) 3914 elsiocb->context_un.mbox = mbox; 3915 3916 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 3917 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3918 "0129 Xmit ELS RJT x%x response tag x%x " 3919 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 3920 "rpi x%x\n", 3921 rejectError, elsiocb->iotag, 3922 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3923 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3924 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3925 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 3926 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 3927 3928 phba->fc_stat.elsXmitLSRJT++; 3929 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3930 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3931 3932 if (rc == IOCB_ERROR) { 3933 lpfc_els_free_iocb(phba, elsiocb); 3934 return 1; 3935 } 3936 return 0; 3937 } 3938 3939 /** 3940 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 3941 * @vport: pointer to a virtual N_Port data structure. 3942 * @oldiocb: pointer to the original lpfc command iocb data structure. 3943 * @ndlp: pointer to a node-list data structure. 3944 * 3945 * This routine prepares and issues an Accept (ACC) response to Address 3946 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 3947 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 3948 * 3949 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3950 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3951 * will be stored into the context1 field of the IOCB for the completion 3952 * callback function to the ADISC Accept response ELS IOCB command. 3953 * 3954 * Return code 3955 * 0 - Successfully issued acc adisc response 3956 * 1 - Failed to issue adisc acc response 3957 **/ 3958 int 3959 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3960 struct lpfc_nodelist *ndlp) 3961 { 3962 struct lpfc_hba *phba = vport->phba; 3963 ADISC *ap; 3964 IOCB_t *icmd, *oldcmd; 3965 struct lpfc_iocbq *elsiocb; 3966 uint8_t *pcmd; 3967 uint16_t cmdsize; 3968 int rc; 3969 3970 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 3971 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3972 ndlp->nlp_DID, ELS_CMD_ACC); 3973 if (!elsiocb) 3974 return 1; 3975 3976 icmd = &elsiocb->iocb; 3977 oldcmd = &oldiocb->iocb; 3978 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3979 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3980 3981 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3982 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3983 "0130 Xmit ADISC ACC response iotag x%x xri: " 3984 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 3985 elsiocb->iotag, elsiocb->iocb.ulpContext, 3986 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3987 ndlp->nlp_rpi); 3988 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3989 3990 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3991 pcmd += sizeof(uint32_t); 3992 3993 ap = (ADISC *) (pcmd); 3994 ap->hardAL_PA = phba->fc_pref_ALPA; 3995 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 3996 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3997 ap->DID = be32_to_cpu(vport->fc_myDID); 3998 3999 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4000 "Issue ACC ADISC: did:x%x flg:x%x", 4001 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4002 4003 phba->fc_stat.elsXmitACC++; 4004 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4005 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4006 if (rc == IOCB_ERROR) { 4007 lpfc_els_free_iocb(phba, elsiocb); 4008 return 1; 4009 } 4010 return 0; 4011 } 4012 4013 /** 4014 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 4015 * @vport: pointer to a virtual N_Port data structure. 4016 * @oldiocb: pointer to the original lpfc command iocb data structure. 4017 * @ndlp: pointer to a node-list data structure. 4018 * 4019 * This routine prepares and issues an Accept (ACC) response to Process 4020 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 4021 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4022 * 4023 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4024 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4025 * will be stored into the context1 field of the IOCB for the completion 4026 * callback function to the PRLI Accept response ELS IOCB command. 4027 * 4028 * Return code 4029 * 0 - Successfully issued acc prli response 4030 * 1 - Failed to issue acc prli response 4031 **/ 4032 int 4033 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4034 struct lpfc_nodelist *ndlp) 4035 { 4036 struct lpfc_hba *phba = vport->phba; 4037 PRLI *npr; 4038 lpfc_vpd_t *vpd; 4039 IOCB_t *icmd; 4040 IOCB_t *oldcmd; 4041 struct lpfc_iocbq *elsiocb; 4042 struct lpfc_sli *psli; 4043 uint8_t *pcmd; 4044 uint16_t cmdsize; 4045 int rc; 4046 4047 psli = &phba->sli; 4048 4049 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 4050 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4051 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 4052 if (!elsiocb) 4053 return 1; 4054 4055 icmd = &elsiocb->iocb; 4056 oldcmd = &oldiocb->iocb; 4057 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4058 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4059 4060 /* Xmit PRLI ACC response tag <ulpIoTag> */ 4061 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4062 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 4063 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4064 elsiocb->iotag, elsiocb->iocb.ulpContext, 4065 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4066 ndlp->nlp_rpi); 4067 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4068 4069 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 4070 pcmd += sizeof(uint32_t); 4071 4072 /* For PRLI, remainder of payload is PRLI parameter page */ 4073 memset(pcmd, 0, sizeof(PRLI)); 4074 4075 npr = (PRLI *) pcmd; 4076 vpd = &phba->vpd; 4077 /* 4078 * If the remote port is a target and our firmware version is 3.20 or 4079 * later, set the following bits for FC-TAPE support. 4080 */ 4081 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4082 (vpd->rev.feaLevelHigh >= 0x02)) { 4083 npr->ConfmComplAllowed = 1; 4084 npr->Retry = 1; 4085 npr->TaskRetryIdReq = 1; 4086 } 4087 4088 npr->acceptRspCode = PRLI_REQ_EXECUTED; 4089 npr->estabImagePair = 1; 4090 npr->readXferRdyDis = 1; 4091 npr->ConfmComplAllowed = 1; 4092 4093 npr->prliType = PRLI_FCP_TYPE; 4094 npr->initiatorFunc = 1; 4095 4096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4097 "Issue ACC PRLI: did:x%x flg:x%x", 4098 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4099 4100 phba->fc_stat.elsXmitACC++; 4101 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4102 4103 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4104 if (rc == IOCB_ERROR) { 4105 lpfc_els_free_iocb(phba, elsiocb); 4106 return 1; 4107 } 4108 return 0; 4109 } 4110 4111 /** 4112 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 4113 * @vport: pointer to a virtual N_Port data structure. 4114 * @format: rnid command format. 4115 * @oldiocb: pointer to the original lpfc command iocb data structure. 4116 * @ndlp: pointer to a node-list data structure. 4117 * 4118 * This routine issues a Request Node Identification Data (RNID) Accept 4119 * (ACC) response. It constructs the RNID ACC response command according to 4120 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 4121 * issue the response. Note that this command does not need to hold the ndlp 4122 * reference count for the callback. So, the ndlp reference count taken by 4123 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 4124 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4125 * there is no ndlp reference available. 4126 * 4127 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4128 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4129 * will be stored into the context1 field of the IOCB for the completion 4130 * callback function. However, for the RNID Accept Response ELS command, 4131 * this is undone later by this routine after the IOCB is allocated. 4132 * 4133 * Return code 4134 * 0 - Successfully issued acc rnid response 4135 * 1 - Failed to issue acc rnid response 4136 **/ 4137 static int 4138 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4139 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4140 { 4141 struct lpfc_hba *phba = vport->phba; 4142 RNID *rn; 4143 IOCB_t *icmd, *oldcmd; 4144 struct lpfc_iocbq *elsiocb; 4145 struct lpfc_sli *psli; 4146 uint8_t *pcmd; 4147 uint16_t cmdsize; 4148 int rc; 4149 4150 psli = &phba->sli; 4151 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4152 + (2 * sizeof(struct lpfc_name)); 4153 if (format) 4154 cmdsize += sizeof(RNID_TOP_DISC); 4155 4156 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4157 ndlp->nlp_DID, ELS_CMD_ACC); 4158 if (!elsiocb) 4159 return 1; 4160 4161 icmd = &elsiocb->iocb; 4162 oldcmd = &oldiocb->iocb; 4163 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4164 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4165 4166 /* Xmit RNID ACC response tag <ulpIoTag> */ 4167 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4168 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4169 elsiocb->iotag, elsiocb->iocb.ulpContext); 4170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4171 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4172 pcmd += sizeof(uint32_t); 4173 4174 memset(pcmd, 0, sizeof(RNID)); 4175 rn = (RNID *) (pcmd); 4176 rn->Format = format; 4177 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4178 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4179 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4180 switch (format) { 4181 case 0: 4182 rn->SpecificLen = 0; 4183 break; 4184 case RNID_TOPOLOGY_DISC: 4185 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4186 memcpy(&rn->un.topologyDisc.portName, 4187 &vport->fc_portname, sizeof(struct lpfc_name)); 4188 rn->un.topologyDisc.unitType = RNID_HBA; 4189 rn->un.topologyDisc.physPort = 0; 4190 rn->un.topologyDisc.attachedNodes = 0; 4191 break; 4192 default: 4193 rn->CommonLen = 0; 4194 rn->SpecificLen = 0; 4195 break; 4196 } 4197 4198 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4199 "Issue ACC RNID: did:x%x flg:x%x", 4200 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4201 4202 phba->fc_stat.elsXmitACC++; 4203 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4204 4205 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4206 if (rc == IOCB_ERROR) { 4207 lpfc_els_free_iocb(phba, elsiocb); 4208 return 1; 4209 } 4210 return 0; 4211 } 4212 4213 /** 4214 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4215 * @vport: pointer to a virtual N_Port data structure. 4216 * @iocb: pointer to the lpfc command iocb data structure. 4217 * @ndlp: pointer to a node-list data structure. 4218 * 4219 * Return 4220 **/ 4221 static void 4222 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4223 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4224 { 4225 struct lpfc_hba *phba = vport->phba; 4226 uint8_t *pcmd; 4227 struct RRQ *rrq; 4228 uint16_t rxid; 4229 uint16_t xri; 4230 struct lpfc_node_rrq *prrq; 4231 4232 4233 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4234 pcmd += sizeof(uint32_t); 4235 rrq = (struct RRQ *)pcmd; 4236 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4237 rxid = bf_get(rrq_rxid, rrq); 4238 4239 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4240 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4241 " x%x x%x\n", 4242 be32_to_cpu(bf_get(rrq_did, rrq)), 4243 bf_get(rrq_oxid, rrq), 4244 rxid, 4245 iocb->iotag, iocb->iocb.ulpContext); 4246 4247 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4248 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4249 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4250 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4251 xri = bf_get(rrq_oxid, rrq); 4252 else 4253 xri = rxid; 4254 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4255 if (prrq) 4256 lpfc_clr_rrq_active(phba, xri, prrq); 4257 return; 4258 } 4259 4260 /** 4261 * lpfc_els_rsp_echo_acc - Issue echo acc response 4262 * @vport: pointer to a virtual N_Port data structure. 4263 * @data: pointer to echo data to return in the accept. 4264 * @oldiocb: pointer to the original lpfc command iocb data structure. 4265 * @ndlp: pointer to a node-list data structure. 4266 * 4267 * Return code 4268 * 0 - Successfully issued acc echo response 4269 * 1 - Failed to issue acc echo response 4270 **/ 4271 static int 4272 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4273 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4274 { 4275 struct lpfc_hba *phba = vport->phba; 4276 struct lpfc_iocbq *elsiocb; 4277 struct lpfc_sli *psli; 4278 uint8_t *pcmd; 4279 uint16_t cmdsize; 4280 int rc; 4281 4282 psli = &phba->sli; 4283 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4284 4285 /* The accumulated length can exceed the BPL_SIZE. For 4286 * now, use this as the limit 4287 */ 4288 if (cmdsize > LPFC_BPL_SIZE) 4289 cmdsize = LPFC_BPL_SIZE; 4290 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4291 ndlp->nlp_DID, ELS_CMD_ACC); 4292 if (!elsiocb) 4293 return 1; 4294 4295 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 4296 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 4297 4298 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4299 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4300 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4301 elsiocb->iotag, elsiocb->iocb.ulpContext); 4302 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4303 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4304 pcmd += sizeof(uint32_t); 4305 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4306 4307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4308 "Issue ACC ECHO: did:x%x flg:x%x", 4309 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4310 4311 phba->fc_stat.elsXmitACC++; 4312 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4313 4314 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4315 if (rc == IOCB_ERROR) { 4316 lpfc_els_free_iocb(phba, elsiocb); 4317 return 1; 4318 } 4319 return 0; 4320 } 4321 4322 /** 4323 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 4324 * @vport: pointer to a host virtual N_Port data structure. 4325 * 4326 * This routine issues Address Discover (ADISC) ELS commands to those 4327 * N_Ports which are in node port recovery state and ADISC has not been issued 4328 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 4329 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 4330 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 4331 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 4332 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 4333 * IOCBs quit for later pick up. On the other hand, after walking through 4334 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 4335 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 4336 * no more ADISC need to be sent. 4337 * 4338 * Return code 4339 * The number of N_Ports with adisc issued. 4340 **/ 4341 int 4342 lpfc_els_disc_adisc(struct lpfc_vport *vport) 4343 { 4344 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4345 struct lpfc_nodelist *ndlp, *next_ndlp; 4346 int sentadisc = 0; 4347 4348 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 4349 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4350 if (!NLP_CHK_NODE_ACT(ndlp)) 4351 continue; 4352 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4353 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4354 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 4355 spin_lock_irq(shost->host_lock); 4356 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4357 spin_unlock_irq(shost->host_lock); 4358 ndlp->nlp_prev_state = ndlp->nlp_state; 4359 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4360 lpfc_issue_els_adisc(vport, ndlp, 0); 4361 sentadisc++; 4362 vport->num_disc_nodes++; 4363 if (vport->num_disc_nodes >= 4364 vport->cfg_discovery_threads) { 4365 spin_lock_irq(shost->host_lock); 4366 vport->fc_flag |= FC_NLP_MORE; 4367 spin_unlock_irq(shost->host_lock); 4368 break; 4369 } 4370 } 4371 } 4372 if (sentadisc == 0) { 4373 spin_lock_irq(shost->host_lock); 4374 vport->fc_flag &= ~FC_NLP_MORE; 4375 spin_unlock_irq(shost->host_lock); 4376 } 4377 return sentadisc; 4378 } 4379 4380 /** 4381 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 4382 * @vport: pointer to a host virtual N_Port data structure. 4383 * 4384 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 4385 * which are in node port recovery state, with a @vport. Each time an ELS 4386 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 4387 * the per @vport number of discover count (num_disc_nodes) shall be 4388 * incremented. If the num_disc_nodes reaches a pre-configured threshold 4389 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 4390 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 4391 * later pick up. On the other hand, after walking through all the ndlps with 4392 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 4393 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 4394 * PLOGI need to be sent. 4395 * 4396 * Return code 4397 * The number of N_Ports with plogi issued. 4398 **/ 4399 int 4400 lpfc_els_disc_plogi(struct lpfc_vport *vport) 4401 { 4402 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4403 struct lpfc_nodelist *ndlp, *next_ndlp; 4404 int sentplogi = 0; 4405 4406 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 4407 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4408 if (!NLP_CHK_NODE_ACT(ndlp)) 4409 continue; 4410 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4411 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4412 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4413 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4414 ndlp->nlp_prev_state = ndlp->nlp_state; 4415 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4416 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4417 sentplogi++; 4418 vport->num_disc_nodes++; 4419 if (vport->num_disc_nodes >= 4420 vport->cfg_discovery_threads) { 4421 spin_lock_irq(shost->host_lock); 4422 vport->fc_flag |= FC_NLP_MORE; 4423 spin_unlock_irq(shost->host_lock); 4424 break; 4425 } 4426 } 4427 } 4428 if (sentplogi) { 4429 lpfc_set_disctmo(vport); 4430 } 4431 else { 4432 spin_lock_irq(shost->host_lock); 4433 vport->fc_flag &= ~FC_NLP_MORE; 4434 spin_unlock_irq(shost->host_lock); 4435 } 4436 return sentplogi; 4437 } 4438 4439 /** 4440 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 4441 * @vport: pointer to a host virtual N_Port data structure. 4442 * 4443 * This routine cleans up any Registration State Change Notification 4444 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 4445 * @vport together with the host_lock is used to prevent multiple thread 4446 * trying to access the RSCN array on a same @vport at the same time. 4447 **/ 4448 void 4449 lpfc_els_flush_rscn(struct lpfc_vport *vport) 4450 { 4451 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4452 struct lpfc_hba *phba = vport->phba; 4453 int i; 4454 4455 spin_lock_irq(shost->host_lock); 4456 if (vport->fc_rscn_flush) { 4457 /* Another thread is walking fc_rscn_id_list on this vport */ 4458 spin_unlock_irq(shost->host_lock); 4459 return; 4460 } 4461 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 4462 vport->fc_rscn_flush = 1; 4463 spin_unlock_irq(shost->host_lock); 4464 4465 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4466 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 4467 vport->fc_rscn_id_list[i] = NULL; 4468 } 4469 spin_lock_irq(shost->host_lock); 4470 vport->fc_rscn_id_cnt = 0; 4471 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 4472 spin_unlock_irq(shost->host_lock); 4473 lpfc_can_disctmo(vport); 4474 /* Indicate we are done walking this fc_rscn_id_list */ 4475 vport->fc_rscn_flush = 0; 4476 } 4477 4478 /** 4479 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 4480 * @vport: pointer to a host virtual N_Port data structure. 4481 * @did: remote destination port identifier. 4482 * 4483 * This routine checks whether there is any pending Registration State 4484 * Configuration Notification (RSCN) to a @did on @vport. 4485 * 4486 * Return code 4487 * None zero - The @did matched with a pending rscn 4488 * 0 - not able to match @did with a pending rscn 4489 **/ 4490 int 4491 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 4492 { 4493 D_ID ns_did; 4494 D_ID rscn_did; 4495 uint32_t *lp; 4496 uint32_t payload_len, i; 4497 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4498 4499 ns_did.un.word = did; 4500 4501 /* Never match fabric nodes for RSCNs */ 4502 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 4503 return 0; 4504 4505 /* If we are doing a FULL RSCN rediscovery, match everything */ 4506 if (vport->fc_flag & FC_RSCN_DISCOVERY) 4507 return did; 4508 4509 spin_lock_irq(shost->host_lock); 4510 if (vport->fc_rscn_flush) { 4511 /* Another thread is walking fc_rscn_id_list on this vport */ 4512 spin_unlock_irq(shost->host_lock); 4513 return 0; 4514 } 4515 /* Indicate we are walking fc_rscn_id_list on this vport */ 4516 vport->fc_rscn_flush = 1; 4517 spin_unlock_irq(shost->host_lock); 4518 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4519 lp = vport->fc_rscn_id_list[i]->virt; 4520 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4521 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4522 while (payload_len) { 4523 rscn_did.un.word = be32_to_cpu(*lp++); 4524 payload_len -= sizeof(uint32_t); 4525 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 4526 case RSCN_ADDRESS_FORMAT_PORT: 4527 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4528 && (ns_did.un.b.area == rscn_did.un.b.area) 4529 && (ns_did.un.b.id == rscn_did.un.b.id)) 4530 goto return_did_out; 4531 break; 4532 case RSCN_ADDRESS_FORMAT_AREA: 4533 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4534 && (ns_did.un.b.area == rscn_did.un.b.area)) 4535 goto return_did_out; 4536 break; 4537 case RSCN_ADDRESS_FORMAT_DOMAIN: 4538 if (ns_did.un.b.domain == rscn_did.un.b.domain) 4539 goto return_did_out; 4540 break; 4541 case RSCN_ADDRESS_FORMAT_FABRIC: 4542 goto return_did_out; 4543 } 4544 } 4545 } 4546 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4547 vport->fc_rscn_flush = 0; 4548 return 0; 4549 return_did_out: 4550 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4551 vport->fc_rscn_flush = 0; 4552 return did; 4553 } 4554 4555 /** 4556 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 4557 * @vport: pointer to a host virtual N_Port data structure. 4558 * 4559 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 4560 * state machine for a @vport's nodes that are with pending RSCN (Registration 4561 * State Change Notification). 4562 * 4563 * Return code 4564 * 0 - Successful (currently alway return 0) 4565 **/ 4566 static int 4567 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 4568 { 4569 struct lpfc_nodelist *ndlp = NULL; 4570 4571 /* Move all affected nodes by pending RSCNs to NPR state. */ 4572 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4573 if (!NLP_CHK_NODE_ACT(ndlp) || 4574 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 4575 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 4576 continue; 4577 lpfc_disc_state_machine(vport, ndlp, NULL, 4578 NLP_EVT_DEVICE_RECOVERY); 4579 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4580 } 4581 return 0; 4582 } 4583 4584 /** 4585 * lpfc_send_rscn_event - Send an RSCN event to management application 4586 * @vport: pointer to a host virtual N_Port data structure. 4587 * @cmdiocb: pointer to lpfc command iocb data structure. 4588 * 4589 * lpfc_send_rscn_event sends an RSCN netlink event to management 4590 * applications. 4591 */ 4592 static void 4593 lpfc_send_rscn_event(struct lpfc_vport *vport, 4594 struct lpfc_iocbq *cmdiocb) 4595 { 4596 struct lpfc_dmabuf *pcmd; 4597 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4598 uint32_t *payload_ptr; 4599 uint32_t payload_len; 4600 struct lpfc_rscn_event_header *rscn_event_data; 4601 4602 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4603 payload_ptr = (uint32_t *) pcmd->virt; 4604 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 4605 4606 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 4607 payload_len, GFP_KERNEL); 4608 if (!rscn_event_data) { 4609 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4610 "0147 Failed to allocate memory for RSCN event\n"); 4611 return; 4612 } 4613 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 4614 rscn_event_data->payload_length = payload_len; 4615 memcpy(rscn_event_data->rscn_payload, payload_ptr, 4616 payload_len); 4617 4618 fc_host_post_vendor_event(shost, 4619 fc_get_event_number(), 4620 sizeof(struct lpfc_els_event_header) + payload_len, 4621 (char *)rscn_event_data, 4622 LPFC_NL_VENDOR_ID); 4623 4624 kfree(rscn_event_data); 4625 } 4626 4627 /** 4628 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 4629 * @vport: pointer to a host virtual N_Port data structure. 4630 * @cmdiocb: pointer to lpfc command iocb data structure. 4631 * @ndlp: pointer to a node-list data structure. 4632 * 4633 * This routine processes an unsolicited RSCN (Registration State Change 4634 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 4635 * to invoke fc_host_post_event() routine to the FC transport layer. If the 4636 * discover state machine is about to begin discovery, it just accepts the 4637 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 4638 * contains N_Port IDs for other vports on this HBA, it just accepts the 4639 * RSCN and ignore processing it. If the state machine is in the recovery 4640 * state, the fc_rscn_id_list of this @vport is walked and the 4641 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 4642 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 4643 * routine is invoked to handle the RSCN event. 4644 * 4645 * Return code 4646 * 0 - Just sent the acc response 4647 * 1 - Sent the acc response and waited for name server completion 4648 **/ 4649 static int 4650 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4651 struct lpfc_nodelist *ndlp) 4652 { 4653 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4654 struct lpfc_hba *phba = vport->phba; 4655 struct lpfc_dmabuf *pcmd; 4656 uint32_t *lp, *datap; 4657 IOCB_t *icmd; 4658 uint32_t payload_len, length, nportid, *cmd; 4659 int rscn_cnt; 4660 int rscn_id = 0, hba_id = 0; 4661 int i; 4662 4663 icmd = &cmdiocb->iocb; 4664 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4665 lp = (uint32_t *) pcmd->virt; 4666 4667 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4668 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4669 /* RSCN received */ 4670 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4671 "0214 RSCN received Data: x%x x%x x%x x%x\n", 4672 vport->fc_flag, payload_len, *lp, 4673 vport->fc_rscn_id_cnt); 4674 4675 /* Send an RSCN event to the management application */ 4676 lpfc_send_rscn_event(vport, cmdiocb); 4677 4678 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 4679 fc_host_post_event(shost, fc_get_event_number(), 4680 FCH_EVT_RSCN, lp[i]); 4681 4682 /* If we are about to begin discovery, just ACC the RSCN. 4683 * Discovery processing will satisfy it. 4684 */ 4685 if (vport->port_state <= LPFC_NS_QRY) { 4686 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4687 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 4688 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4689 4690 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4691 return 0; 4692 } 4693 4694 /* If this RSCN just contains NPortIDs for other vports on this HBA, 4695 * just ACC and ignore it. 4696 */ 4697 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4698 !(vport->cfg_peer_port_login)) { 4699 i = payload_len; 4700 datap = lp; 4701 while (i > 0) { 4702 nportid = *datap++; 4703 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 4704 i -= sizeof(uint32_t); 4705 rscn_id++; 4706 if (lpfc_find_vport_by_did(phba, nportid)) 4707 hba_id++; 4708 } 4709 if (rscn_id == hba_id) { 4710 /* ALL NPortIDs in RSCN are on HBA */ 4711 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4712 "0219 Ignore RSCN " 4713 "Data: x%x x%x x%x x%x\n", 4714 vport->fc_flag, payload_len, 4715 *lp, vport->fc_rscn_id_cnt); 4716 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4717 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 4718 ndlp->nlp_DID, vport->port_state, 4719 ndlp->nlp_flag); 4720 4721 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 4722 ndlp, NULL); 4723 return 0; 4724 } 4725 } 4726 4727 spin_lock_irq(shost->host_lock); 4728 if (vport->fc_rscn_flush) { 4729 /* Another thread is walking fc_rscn_id_list on this vport */ 4730 vport->fc_flag |= FC_RSCN_DISCOVERY; 4731 spin_unlock_irq(shost->host_lock); 4732 /* Send back ACC */ 4733 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4734 return 0; 4735 } 4736 /* Indicate we are walking fc_rscn_id_list on this vport */ 4737 vport->fc_rscn_flush = 1; 4738 spin_unlock_irq(shost->host_lock); 4739 /* Get the array count after successfully have the token */ 4740 rscn_cnt = vport->fc_rscn_id_cnt; 4741 /* If we are already processing an RSCN, save the received 4742 * RSCN payload buffer, cmdiocb->context2 to process later. 4743 */ 4744 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 4745 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4746 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 4747 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4748 4749 spin_lock_irq(shost->host_lock); 4750 vport->fc_flag |= FC_RSCN_DEFERRED; 4751 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 4752 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 4753 vport->fc_flag |= FC_RSCN_MODE; 4754 spin_unlock_irq(shost->host_lock); 4755 if (rscn_cnt) { 4756 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 4757 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 4758 } 4759 if ((rscn_cnt) && 4760 (payload_len + length <= LPFC_BPL_SIZE)) { 4761 *cmd &= ELS_CMD_MASK; 4762 *cmd |= cpu_to_be32(payload_len + length); 4763 memcpy(((uint8_t *)cmd) + length, lp, 4764 payload_len); 4765 } else { 4766 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 4767 vport->fc_rscn_id_cnt++; 4768 /* If we zero, cmdiocb->context2, the calling 4769 * routine will not try to free it. 4770 */ 4771 cmdiocb->context2 = NULL; 4772 } 4773 /* Deferred RSCN */ 4774 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4775 "0235 Deferred RSCN " 4776 "Data: x%x x%x x%x\n", 4777 vport->fc_rscn_id_cnt, vport->fc_flag, 4778 vport->port_state); 4779 } else { 4780 vport->fc_flag |= FC_RSCN_DISCOVERY; 4781 spin_unlock_irq(shost->host_lock); 4782 /* ReDiscovery RSCN */ 4783 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4784 "0234 ReDiscovery RSCN " 4785 "Data: x%x x%x x%x\n", 4786 vport->fc_rscn_id_cnt, vport->fc_flag, 4787 vport->port_state); 4788 } 4789 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4790 vport->fc_rscn_flush = 0; 4791 /* Send back ACC */ 4792 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4793 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4794 lpfc_rscn_recovery_check(vport); 4795 spin_lock_irq(shost->host_lock); 4796 vport->fc_flag &= ~FC_RSCN_DEFERRED; 4797 spin_unlock_irq(shost->host_lock); 4798 return 0; 4799 } 4800 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4801 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 4802 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4803 4804 spin_lock_irq(shost->host_lock); 4805 vport->fc_flag |= FC_RSCN_MODE; 4806 spin_unlock_irq(shost->host_lock); 4807 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 4808 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4809 vport->fc_rscn_flush = 0; 4810 /* 4811 * If we zero, cmdiocb->context2, the calling routine will 4812 * not try to free it. 4813 */ 4814 cmdiocb->context2 = NULL; 4815 lpfc_set_disctmo(vport); 4816 /* Send back ACC */ 4817 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4818 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4819 lpfc_rscn_recovery_check(vport); 4820 return lpfc_els_handle_rscn(vport); 4821 } 4822 4823 /** 4824 * lpfc_els_handle_rscn - Handle rscn for a vport 4825 * @vport: pointer to a host virtual N_Port data structure. 4826 * 4827 * This routine handles the Registration State Configuration Notification 4828 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 4829 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 4830 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 4831 * NameServer shall be issued. If CT command to the NameServer fails to be 4832 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 4833 * RSCN activities with the @vport. 4834 * 4835 * Return code 4836 * 0 - Cleaned up rscn on the @vport 4837 * 1 - Wait for plogi to name server before proceed 4838 **/ 4839 int 4840 lpfc_els_handle_rscn(struct lpfc_vport *vport) 4841 { 4842 struct lpfc_nodelist *ndlp; 4843 struct lpfc_hba *phba = vport->phba; 4844 4845 /* Ignore RSCN if the port is being torn down. */ 4846 if (vport->load_flag & FC_UNLOADING) { 4847 lpfc_els_flush_rscn(vport); 4848 return 0; 4849 } 4850 4851 /* Start timer for RSCN processing */ 4852 lpfc_set_disctmo(vport); 4853 4854 /* RSCN processed */ 4855 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4856 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 4857 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 4858 vport->port_state); 4859 4860 /* To process RSCN, first compare RSCN data with NameServer */ 4861 vport->fc_ns_retry = 0; 4862 vport->num_disc_nodes = 0; 4863 4864 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4865 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 4866 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 4867 /* Good ndlp, issue CT Request to NameServer */ 4868 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 4869 /* Wait for NameServer query cmpl before we can 4870 continue */ 4871 return 1; 4872 } else { 4873 /* If login to NameServer does not exist, issue one */ 4874 /* Good status, issue PLOGI to NameServer */ 4875 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4876 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 4877 /* Wait for NameServer login cmpl before we can 4878 continue */ 4879 return 1; 4880 4881 if (ndlp) { 4882 ndlp = lpfc_enable_node(vport, ndlp, 4883 NLP_STE_PLOGI_ISSUE); 4884 if (!ndlp) { 4885 lpfc_els_flush_rscn(vport); 4886 return 0; 4887 } 4888 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 4889 } else { 4890 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4891 if (!ndlp) { 4892 lpfc_els_flush_rscn(vport); 4893 return 0; 4894 } 4895 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4896 ndlp->nlp_prev_state = ndlp->nlp_state; 4897 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4898 } 4899 ndlp->nlp_type |= NLP_FABRIC; 4900 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 4901 /* Wait for NameServer login cmpl before we can 4902 * continue 4903 */ 4904 return 1; 4905 } 4906 4907 lpfc_els_flush_rscn(vport); 4908 return 0; 4909 } 4910 4911 /** 4912 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 4913 * @vport: pointer to a host virtual N_Port data structure. 4914 * @cmdiocb: pointer to lpfc command iocb data structure. 4915 * @ndlp: pointer to a node-list data structure. 4916 * 4917 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 4918 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 4919 * point topology. As an unsolicited FLOGI should not be received in a loop 4920 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 4921 * lpfc_check_sparm() routine is invoked to check the parameters in the 4922 * unsolicited FLOGI. If parameters validation failed, the routine 4923 * lpfc_els_rsp_reject() shall be called with reject reason code set to 4924 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 4925 * FLOGI shall be compared with the Port WWN of the @vport to determine who 4926 * will initiate PLOGI. The higher lexicographical value party shall has 4927 * higher priority (as the winning port) and will initiate PLOGI and 4928 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 4929 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 4930 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 4931 * 4932 * Return code 4933 * 0 - Successfully processed the unsolicited flogi 4934 * 1 - Failed to process the unsolicited flogi 4935 **/ 4936 static int 4937 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4938 struct lpfc_nodelist *ndlp) 4939 { 4940 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4941 struct lpfc_hba *phba = vport->phba; 4942 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4943 uint32_t *lp = (uint32_t *) pcmd->virt; 4944 IOCB_t *icmd = &cmdiocb->iocb; 4945 struct serv_parm *sp; 4946 LPFC_MBOXQ_t *mbox; 4947 struct ls_rjt stat; 4948 uint32_t cmd, did; 4949 int rc; 4950 4951 cmd = *lp++; 4952 sp = (struct serv_parm *) lp; 4953 4954 /* FLOGI received */ 4955 4956 lpfc_set_disctmo(vport); 4957 4958 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4959 /* We should never receive a FLOGI in loop mode, ignore it */ 4960 did = icmd->un.elsreq64.remoteID; 4961 4962 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 4963 Loop Mode */ 4964 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4965 "0113 An FLOGI ELS command x%x was " 4966 "received from DID x%x in Loop Mode\n", 4967 cmd, did); 4968 return 1; 4969 } 4970 4971 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { 4972 /* For a FLOGI we accept, then if our portname is greater 4973 * then the remote portname we initiate Nport login. 4974 */ 4975 4976 rc = memcmp(&vport->fc_portname, &sp->portName, 4977 sizeof(struct lpfc_name)); 4978 4979 if (!rc) { 4980 if (phba->sli_rev < LPFC_SLI_REV4) { 4981 mbox = mempool_alloc(phba->mbox_mem_pool, 4982 GFP_KERNEL); 4983 if (!mbox) 4984 return 1; 4985 lpfc_linkdown(phba); 4986 lpfc_init_link(phba, mbox, 4987 phba->cfg_topology, 4988 phba->cfg_link_speed); 4989 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 4990 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4991 mbox->vport = vport; 4992 rc = lpfc_sli_issue_mbox(phba, mbox, 4993 MBX_NOWAIT); 4994 lpfc_set_loopback_flag(phba); 4995 if (rc == MBX_NOT_FINISHED) 4996 mempool_free(mbox, phba->mbox_mem_pool); 4997 return 1; 4998 } else { 4999 /* abort the flogi coming back to ourselves 5000 * due to external loopback on the port. 5001 */ 5002 lpfc_els_abort_flogi(phba); 5003 return 0; 5004 } 5005 } else if (rc > 0) { /* greater than */ 5006 spin_lock_irq(shost->host_lock); 5007 vport->fc_flag |= FC_PT2PT_PLOGI; 5008 spin_unlock_irq(shost->host_lock); 5009 5010 /* If we have the high WWPN we can assign our own 5011 * myDID; otherwise, we have to WAIT for a PLOGI 5012 * from the remote NPort to find out what it 5013 * will be. 5014 */ 5015 vport->fc_myDID = PT2PT_LocalID; 5016 } 5017 5018 /* 5019 * The vport state should go to LPFC_FLOGI only 5020 * AFTER we issue a FLOGI, not receive one. 5021 */ 5022 spin_lock_irq(shost->host_lock); 5023 vport->fc_flag |= FC_PT2PT; 5024 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5025 spin_unlock_irq(shost->host_lock); 5026 5027 /* 5028 * We temporarily set fc_myDID to make it look like we are 5029 * a Fabric. This is done just so we end up with the right 5030 * did / sid on the FLOGI ACC rsp. 5031 */ 5032 did = vport->fc_myDID; 5033 vport->fc_myDID = Fabric_DID; 5034 5035 } else { 5036 /* Reject this request because invalid parameters */ 5037 stat.un.b.lsRjtRsvd0 = 0; 5038 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5039 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 5040 stat.un.b.vendorUnique = 0; 5041 5042 /* 5043 * We temporarily set fc_myDID to make it look like we are 5044 * a Fabric. This is done just so we end up with the right 5045 * did / sid on the FLOGI LS_RJT rsp. 5046 */ 5047 did = vport->fc_myDID; 5048 vport->fc_myDID = Fabric_DID; 5049 5050 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5051 NULL); 5052 5053 /* Now lets put fc_myDID back to what its supposed to be */ 5054 vport->fc_myDID = did; 5055 5056 return 1; 5057 } 5058 5059 /* Send back ACC */ 5060 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 5061 5062 /* Now lets put fc_myDID back to what its supposed to be */ 5063 vport->fc_myDID = did; 5064 5065 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 5066 5067 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5068 if (!mbox) 5069 goto fail; 5070 5071 lpfc_config_link(phba, mbox); 5072 5073 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5074 mbox->vport = vport; 5075 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5076 if (rc == MBX_NOT_FINISHED) { 5077 mempool_free(mbox, phba->mbox_mem_pool); 5078 goto fail; 5079 } 5080 } 5081 5082 return 0; 5083 fail: 5084 return 1; 5085 } 5086 5087 /** 5088 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 5089 * @vport: pointer to a host virtual N_Port data structure. 5090 * @cmdiocb: pointer to lpfc command iocb data structure. 5091 * @ndlp: pointer to a node-list data structure. 5092 * 5093 * This routine processes Request Node Identification Data (RNID) IOCB 5094 * received as an ELS unsolicited event. Only when the RNID specified format 5095 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 5096 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 5097 * Accept (ACC) the RNID ELS command. All the other RNID formats are 5098 * rejected by invoking the lpfc_els_rsp_reject() routine. 5099 * 5100 * Return code 5101 * 0 - Successfully processed rnid iocb (currently always return 0) 5102 **/ 5103 static int 5104 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5105 struct lpfc_nodelist *ndlp) 5106 { 5107 struct lpfc_dmabuf *pcmd; 5108 uint32_t *lp; 5109 IOCB_t *icmd; 5110 RNID *rn; 5111 struct ls_rjt stat; 5112 uint32_t cmd, did; 5113 5114 icmd = &cmdiocb->iocb; 5115 did = icmd->un.elsreq64.remoteID; 5116 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5117 lp = (uint32_t *) pcmd->virt; 5118 5119 cmd = *lp++; 5120 rn = (RNID *) lp; 5121 5122 /* RNID received */ 5123 5124 switch (rn->Format) { 5125 case 0: 5126 case RNID_TOPOLOGY_DISC: 5127 /* Send back ACC */ 5128 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 5129 break; 5130 default: 5131 /* Reject this request because format not supported */ 5132 stat.un.b.lsRjtRsvd0 = 0; 5133 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5134 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5135 stat.un.b.vendorUnique = 0; 5136 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5137 NULL); 5138 } 5139 return 0; 5140 } 5141 5142 /** 5143 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 5144 * @vport: pointer to a host virtual N_Port data structure. 5145 * @cmdiocb: pointer to lpfc command iocb data structure. 5146 * @ndlp: pointer to a node-list data structure. 5147 * 5148 * Return code 5149 * 0 - Successfully processed echo iocb (currently always return 0) 5150 **/ 5151 static int 5152 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5153 struct lpfc_nodelist *ndlp) 5154 { 5155 uint8_t *pcmd; 5156 5157 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 5158 5159 /* skip over first word of echo command to find echo data */ 5160 pcmd += sizeof(uint32_t); 5161 5162 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 5163 return 0; 5164 } 5165 5166 /** 5167 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 5168 * @vport: pointer to a host virtual N_Port data structure. 5169 * @cmdiocb: pointer to lpfc command iocb data structure. 5170 * @ndlp: pointer to a node-list data structure. 5171 * 5172 * This routine processes a Link Incident Report Registration(LIRR) IOCB 5173 * received as an ELS unsolicited event. Currently, this function just invokes 5174 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 5175 * 5176 * Return code 5177 * 0 - Successfully processed lirr iocb (currently always return 0) 5178 **/ 5179 static int 5180 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5181 struct lpfc_nodelist *ndlp) 5182 { 5183 struct ls_rjt stat; 5184 5185 /* For now, unconditionally reject this command */ 5186 stat.un.b.lsRjtRsvd0 = 0; 5187 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5188 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5189 stat.un.b.vendorUnique = 0; 5190 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5191 return 0; 5192 } 5193 5194 /** 5195 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 5196 * @vport: pointer to a host virtual N_Port data structure. 5197 * @cmdiocb: pointer to lpfc command iocb data structure. 5198 * @ndlp: pointer to a node-list data structure. 5199 * 5200 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 5201 * received as an ELS unsolicited event. A request to RRQ shall only 5202 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 5203 * Nx_Port N_Port_ID of the target Exchange is the same as the 5204 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 5205 * not accepted, an LS_RJT with reason code "Unable to perform 5206 * command request" and reason code explanation "Invalid Originator 5207 * S_ID" shall be returned. For now, we just unconditionally accept 5208 * RRQ from the target. 5209 **/ 5210 static void 5211 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5212 struct lpfc_nodelist *ndlp) 5213 { 5214 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5215 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5216 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 5217 } 5218 5219 /** 5220 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5221 * @phba: pointer to lpfc hba data structure. 5222 * @pmb: pointer to the driver internal queue element for mailbox command. 5223 * 5224 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5225 * mailbox command. This callback function is to actually send the Accept 5226 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5227 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5228 * mailbox command, constructs the RPS response with the link statistics 5229 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5230 * response to the RPS. 5231 * 5232 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5233 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5234 * will be stored into the context1 field of the IOCB for the completion 5235 * callback function to the RPS Accept Response ELS IOCB command. 5236 * 5237 **/ 5238 static void 5239 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5240 { 5241 MAILBOX_t *mb; 5242 IOCB_t *icmd; 5243 struct RLS_RSP *rls_rsp; 5244 uint8_t *pcmd; 5245 struct lpfc_iocbq *elsiocb; 5246 struct lpfc_nodelist *ndlp; 5247 uint16_t oxid; 5248 uint16_t rxid; 5249 uint32_t cmdsize; 5250 5251 mb = &pmb->u.mb; 5252 5253 ndlp = (struct lpfc_nodelist *) pmb->context2; 5254 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5255 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5256 pmb->context1 = NULL; 5257 pmb->context2 = NULL; 5258 5259 if (mb->mbxStatus) { 5260 mempool_free(pmb, phba->mbox_mem_pool); 5261 return; 5262 } 5263 5264 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 5265 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5266 lpfc_max_els_tries, ndlp, 5267 ndlp->nlp_DID, ELS_CMD_ACC); 5268 5269 /* Decrement the ndlp reference count from previous mbox command */ 5270 lpfc_nlp_put(ndlp); 5271 5272 if (!elsiocb) { 5273 mempool_free(pmb, phba->mbox_mem_pool); 5274 return; 5275 } 5276 5277 icmd = &elsiocb->iocb; 5278 icmd->ulpContext = rxid; 5279 icmd->unsli3.rcvsli3.ox_id = oxid; 5280 5281 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5282 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5283 pcmd += sizeof(uint32_t); /* Skip past command */ 5284 rls_rsp = (struct RLS_RSP *)pcmd; 5285 5286 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5287 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5288 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5289 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5290 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5291 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5292 mempool_free(pmb, phba->mbox_mem_pool); 5293 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5294 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5295 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 5296 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5297 elsiocb->iotag, elsiocb->iocb.ulpContext, 5298 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5299 ndlp->nlp_rpi); 5300 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5301 phba->fc_stat.elsXmitACC++; 5302 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5303 lpfc_els_free_iocb(phba, elsiocb); 5304 } 5305 5306 /** 5307 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5308 * @phba: pointer to lpfc hba data structure. 5309 * @pmb: pointer to the driver internal queue element for mailbox command. 5310 * 5311 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5312 * mailbox command. This callback function is to actually send the Accept 5313 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5314 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5315 * mailbox command, constructs the RPS response with the link statistics 5316 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5317 * response to the RPS. 5318 * 5319 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5320 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5321 * will be stored into the context1 field of the IOCB for the completion 5322 * callback function to the RPS Accept Response ELS IOCB command. 5323 * 5324 **/ 5325 static void 5326 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5327 { 5328 MAILBOX_t *mb; 5329 IOCB_t *icmd; 5330 RPS_RSP *rps_rsp; 5331 uint8_t *pcmd; 5332 struct lpfc_iocbq *elsiocb; 5333 struct lpfc_nodelist *ndlp; 5334 uint16_t status; 5335 uint16_t oxid; 5336 uint16_t rxid; 5337 uint32_t cmdsize; 5338 5339 mb = &pmb->u.mb; 5340 5341 ndlp = (struct lpfc_nodelist *) pmb->context2; 5342 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5343 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5344 pmb->context1 = NULL; 5345 pmb->context2 = NULL; 5346 5347 if (mb->mbxStatus) { 5348 mempool_free(pmb, phba->mbox_mem_pool); 5349 return; 5350 } 5351 5352 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 5353 mempool_free(pmb, phba->mbox_mem_pool); 5354 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5355 lpfc_max_els_tries, ndlp, 5356 ndlp->nlp_DID, ELS_CMD_ACC); 5357 5358 /* Decrement the ndlp reference count from previous mbox command */ 5359 lpfc_nlp_put(ndlp); 5360 5361 if (!elsiocb) 5362 return; 5363 5364 icmd = &elsiocb->iocb; 5365 icmd->ulpContext = rxid; 5366 icmd->unsli3.rcvsli3.ox_id = oxid; 5367 5368 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5369 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5370 pcmd += sizeof(uint32_t); /* Skip past command */ 5371 rps_rsp = (RPS_RSP *)pcmd; 5372 5373 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5374 status = 0x10; 5375 else 5376 status = 0x8; 5377 if (phba->pport->fc_flag & FC_FABRIC) 5378 status |= 0x4; 5379 5380 rps_rsp->rsvd1 = 0; 5381 rps_rsp->portStatus = cpu_to_be16(status); 5382 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5383 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5384 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5385 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5386 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5387 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5388 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 5389 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5390 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 5391 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5392 elsiocb->iotag, elsiocb->iocb.ulpContext, 5393 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5394 ndlp->nlp_rpi); 5395 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5396 phba->fc_stat.elsXmitACC++; 5397 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5398 lpfc_els_free_iocb(phba, elsiocb); 5399 return; 5400 } 5401 5402 /** 5403 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 5404 * @vport: pointer to a host virtual N_Port data structure. 5405 * @cmdiocb: pointer to lpfc command iocb data structure. 5406 * @ndlp: pointer to a node-list data structure. 5407 * 5408 * This routine processes Read Port Status (RPL) IOCB received as an 5409 * ELS unsolicited event. It first checks the remote port state. If the 5410 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5411 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5412 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5413 * for reading the HBA link statistics. It is for the callback function, 5414 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 5415 * to actually sending out RPL Accept (ACC) response. 5416 * 5417 * Return codes 5418 * 0 - Successfully processed rls iocb (currently always return 0) 5419 **/ 5420 static int 5421 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5422 struct lpfc_nodelist *ndlp) 5423 { 5424 struct lpfc_hba *phba = vport->phba; 5425 LPFC_MBOXQ_t *mbox; 5426 struct lpfc_dmabuf *pcmd; 5427 struct ls_rjt stat; 5428 5429 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5430 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5431 /* reject the unsolicited RPS request and done with it */ 5432 goto reject_out; 5433 5434 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5435 5436 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5437 if (mbox) { 5438 lpfc_read_lnk_stat(phba, mbox); 5439 mbox->context1 = (void *)((unsigned long) 5440 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5441 cmdiocb->iocb.ulpContext)); /* rx_id */ 5442 mbox->context2 = lpfc_nlp_get(ndlp); 5443 mbox->vport = vport; 5444 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5445 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5446 != MBX_NOT_FINISHED) 5447 /* Mbox completion will send ELS Response */ 5448 return 0; 5449 /* Decrement reference count used for the failed mbox 5450 * command. 5451 */ 5452 lpfc_nlp_put(ndlp); 5453 mempool_free(mbox, phba->mbox_mem_pool); 5454 } 5455 reject_out: 5456 /* issue rejection response */ 5457 stat.un.b.lsRjtRsvd0 = 0; 5458 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5459 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5460 stat.un.b.vendorUnique = 0; 5461 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5462 return 0; 5463 } 5464 5465 /** 5466 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 5467 * @vport: pointer to a host virtual N_Port data structure. 5468 * @cmdiocb: pointer to lpfc command iocb data structure. 5469 * @ndlp: pointer to a node-list data structure. 5470 * 5471 * This routine processes Read Timout Value (RTV) IOCB received as an 5472 * ELS unsolicited event. It first checks the remote port state. If the 5473 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5474 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5475 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 5476 * Value (RTV) unsolicited IOCB event. 5477 * 5478 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5479 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5480 * will be stored into the context1 field of the IOCB for the completion 5481 * callback function to the RPS Accept Response ELS IOCB command. 5482 * 5483 * Return codes 5484 * 0 - Successfully processed rtv iocb (currently always return 0) 5485 **/ 5486 static int 5487 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5488 struct lpfc_nodelist *ndlp) 5489 { 5490 struct lpfc_hba *phba = vport->phba; 5491 struct ls_rjt stat; 5492 struct RTV_RSP *rtv_rsp; 5493 uint8_t *pcmd; 5494 struct lpfc_iocbq *elsiocb; 5495 uint32_t cmdsize; 5496 5497 5498 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5499 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5500 /* reject the unsolicited RPS request and done with it */ 5501 goto reject_out; 5502 5503 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 5504 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5505 lpfc_max_els_tries, ndlp, 5506 ndlp->nlp_DID, ELS_CMD_ACC); 5507 5508 if (!elsiocb) 5509 return 1; 5510 5511 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5512 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5513 pcmd += sizeof(uint32_t); /* Skip past command */ 5514 5515 /* use the command's xri in the response */ 5516 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 5517 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 5518 5519 rtv_rsp = (struct RTV_RSP *)pcmd; 5520 5521 /* populate RTV payload */ 5522 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 5523 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 5524 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 5525 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 5526 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 5527 5528 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5529 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5530 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 5531 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 5532 "Data: x%x x%x x%x\n", 5533 elsiocb->iotag, elsiocb->iocb.ulpContext, 5534 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5535 ndlp->nlp_rpi, 5536 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 5537 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5538 phba->fc_stat.elsXmitACC++; 5539 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5540 lpfc_els_free_iocb(phba, elsiocb); 5541 return 0; 5542 5543 reject_out: 5544 /* issue rejection response */ 5545 stat.un.b.lsRjtRsvd0 = 0; 5546 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5547 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5548 stat.un.b.vendorUnique = 0; 5549 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5550 return 0; 5551 } 5552 5553 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 5554 * @vport: pointer to a host virtual N_Port data structure. 5555 * @cmdiocb: pointer to lpfc command iocb data structure. 5556 * @ndlp: pointer to a node-list data structure. 5557 * 5558 * This routine processes Read Port Status (RPS) IOCB received as an 5559 * ELS unsolicited event. It first checks the remote port state. If the 5560 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5561 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 5562 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5563 * for reading the HBA link statistics. It is for the callback function, 5564 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 5565 * to actually sending out RPS Accept (ACC) response. 5566 * 5567 * Return codes 5568 * 0 - Successfully processed rps iocb (currently always return 0) 5569 **/ 5570 static int 5571 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5572 struct lpfc_nodelist *ndlp) 5573 { 5574 struct lpfc_hba *phba = vport->phba; 5575 uint32_t *lp; 5576 uint8_t flag; 5577 LPFC_MBOXQ_t *mbox; 5578 struct lpfc_dmabuf *pcmd; 5579 RPS *rps; 5580 struct ls_rjt stat; 5581 5582 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5583 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5584 /* reject the unsolicited RPS request and done with it */ 5585 goto reject_out; 5586 5587 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5588 lp = (uint32_t *) pcmd->virt; 5589 flag = (be32_to_cpu(*lp++) & 0xf); 5590 rps = (RPS *) lp; 5591 5592 if ((flag == 0) || 5593 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 5594 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 5595 sizeof(struct lpfc_name)) == 0))) { 5596 5597 printk("Fix me....\n"); 5598 dump_stack(); 5599 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5600 if (mbox) { 5601 lpfc_read_lnk_stat(phba, mbox); 5602 mbox->context1 = (void *)((unsigned long) 5603 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5604 cmdiocb->iocb.ulpContext)); /* rx_id */ 5605 mbox->context2 = lpfc_nlp_get(ndlp); 5606 mbox->vport = vport; 5607 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5608 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5609 != MBX_NOT_FINISHED) 5610 /* Mbox completion will send ELS Response */ 5611 return 0; 5612 /* Decrement reference count used for the failed mbox 5613 * command. 5614 */ 5615 lpfc_nlp_put(ndlp); 5616 mempool_free(mbox, phba->mbox_mem_pool); 5617 } 5618 } 5619 5620 reject_out: 5621 /* issue rejection response */ 5622 stat.un.b.lsRjtRsvd0 = 0; 5623 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5624 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5625 stat.un.b.vendorUnique = 0; 5626 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5627 return 0; 5628 } 5629 5630 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 5631 * @vport: pointer to a host virtual N_Port data structure. 5632 * @ndlp: pointer to a node-list data structure. 5633 * @did: DID of the target. 5634 * @rrq: Pointer to the rrq struct. 5635 * 5636 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 5637 * Successful the the completion handler will clear the RRQ. 5638 * 5639 * Return codes 5640 * 0 - Successfully sent rrq els iocb. 5641 * 1 - Failed to send rrq els iocb. 5642 **/ 5643 static int 5644 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5645 uint32_t did, struct lpfc_node_rrq *rrq) 5646 { 5647 struct lpfc_hba *phba = vport->phba; 5648 struct RRQ *els_rrq; 5649 IOCB_t *icmd; 5650 struct lpfc_iocbq *elsiocb; 5651 uint8_t *pcmd; 5652 uint16_t cmdsize; 5653 int ret; 5654 5655 5656 if (ndlp != rrq->ndlp) 5657 ndlp = rrq->ndlp; 5658 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 5659 return 1; 5660 5661 /* If ndlp is not NULL, we will bump the reference count on it */ 5662 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 5663 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 5664 ELS_CMD_RRQ); 5665 if (!elsiocb) 5666 return 1; 5667 5668 icmd = &elsiocb->iocb; 5669 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5670 5671 /* For RRQ request, remainder of payload is Exchange IDs */ 5672 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 5673 pcmd += sizeof(uint32_t); 5674 els_rrq = (struct RRQ *) pcmd; 5675 5676 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 5677 bf_set(rrq_rxid, els_rrq, rrq->rxid); 5678 bf_set(rrq_did, els_rrq, vport->fc_myDID); 5679 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 5680 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 5681 5682 5683 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5684 "Issue RRQ: did:x%x", 5685 did, rrq->xritag, rrq->rxid); 5686 elsiocb->context_un.rrq = rrq; 5687 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 5688 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5689 5690 if (ret == IOCB_ERROR) { 5691 lpfc_els_free_iocb(phba, elsiocb); 5692 return 1; 5693 } 5694 return 0; 5695 } 5696 5697 /** 5698 * lpfc_send_rrq - Sends ELS RRQ if needed. 5699 * @phba: pointer to lpfc hba data structure. 5700 * @rrq: pointer to the active rrq. 5701 * 5702 * This routine will call the lpfc_issue_els_rrq if the rrq is 5703 * still active for the xri. If this function returns a failure then 5704 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 5705 * 5706 * Returns 0 Success. 5707 * 1 Failure. 5708 **/ 5709 int 5710 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 5711 { 5712 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 5713 rrq->nlp_DID); 5714 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 5715 return lpfc_issue_els_rrq(rrq->vport, ndlp, 5716 rrq->nlp_DID, rrq); 5717 else 5718 return 1; 5719 } 5720 5721 /** 5722 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 5723 * @vport: pointer to a host virtual N_Port data structure. 5724 * @cmdsize: size of the ELS command. 5725 * @oldiocb: pointer to the original lpfc command iocb data structure. 5726 * @ndlp: pointer to a node-list data structure. 5727 * 5728 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 5729 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 5730 * 5731 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5732 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5733 * will be stored into the context1 field of the IOCB for the completion 5734 * callback function to the RPL Accept Response ELS command. 5735 * 5736 * Return code 5737 * 0 - Successfully issued ACC RPL ELS command 5738 * 1 - Failed to issue ACC RPL ELS command 5739 **/ 5740 static int 5741 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 5742 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5743 { 5744 struct lpfc_hba *phba = vport->phba; 5745 IOCB_t *icmd, *oldcmd; 5746 RPL_RSP rpl_rsp; 5747 struct lpfc_iocbq *elsiocb; 5748 uint8_t *pcmd; 5749 5750 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5751 ndlp->nlp_DID, ELS_CMD_ACC); 5752 5753 if (!elsiocb) 5754 return 1; 5755 5756 icmd = &elsiocb->iocb; 5757 oldcmd = &oldiocb->iocb; 5758 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5759 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5760 5761 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5762 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5763 pcmd += sizeof(uint16_t); 5764 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 5765 pcmd += sizeof(uint16_t); 5766 5767 /* Setup the RPL ACC payload */ 5768 rpl_rsp.listLen = be32_to_cpu(1); 5769 rpl_rsp.index = 0; 5770 rpl_rsp.port_num_blk.portNum = 0; 5771 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 5772 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 5773 sizeof(struct lpfc_name)); 5774 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 5775 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 5776 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5777 "0120 Xmit ELS RPL ACC response tag x%x " 5778 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5779 "rpi x%x\n", 5780 elsiocb->iotag, elsiocb->iocb.ulpContext, 5781 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5782 ndlp->nlp_rpi); 5783 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5784 phba->fc_stat.elsXmitACC++; 5785 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 5786 IOCB_ERROR) { 5787 lpfc_els_free_iocb(phba, elsiocb); 5788 return 1; 5789 } 5790 return 0; 5791 } 5792 5793 /** 5794 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 5795 * @vport: pointer to a host virtual N_Port data structure. 5796 * @cmdiocb: pointer to lpfc command iocb data structure. 5797 * @ndlp: pointer to a node-list data structure. 5798 * 5799 * This routine processes Read Port List (RPL) IOCB received as an ELS 5800 * unsolicited event. It first checks the remote port state. If the remote 5801 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 5802 * invokes the lpfc_els_rsp_reject() routine to send reject response. 5803 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 5804 * to accept the RPL. 5805 * 5806 * Return code 5807 * 0 - Successfully processed rpl iocb (currently always return 0) 5808 **/ 5809 static int 5810 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5811 struct lpfc_nodelist *ndlp) 5812 { 5813 struct lpfc_dmabuf *pcmd; 5814 uint32_t *lp; 5815 uint32_t maxsize; 5816 uint16_t cmdsize; 5817 RPL *rpl; 5818 struct ls_rjt stat; 5819 5820 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5821 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 5822 /* issue rejection response */ 5823 stat.un.b.lsRjtRsvd0 = 0; 5824 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5825 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5826 stat.un.b.vendorUnique = 0; 5827 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5828 NULL); 5829 /* rejected the unsolicited RPL request and done with it */ 5830 return 0; 5831 } 5832 5833 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5834 lp = (uint32_t *) pcmd->virt; 5835 rpl = (RPL *) (lp + 1); 5836 maxsize = be32_to_cpu(rpl->maxsize); 5837 5838 /* We support only one port */ 5839 if ((rpl->index == 0) && 5840 ((maxsize == 0) || 5841 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 5842 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 5843 } else { 5844 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 5845 } 5846 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 5847 5848 return 0; 5849 } 5850 5851 /** 5852 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 5853 * @vport: pointer to a virtual N_Port data structure. 5854 * @cmdiocb: pointer to lpfc command iocb data structure. 5855 * @ndlp: pointer to a node-list data structure. 5856 * 5857 * This routine processes Fibre Channel Address Resolution Protocol 5858 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 5859 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 5860 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 5861 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 5862 * remote PortName is compared against the FC PortName stored in the @vport 5863 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 5864 * compared against the FC NodeName stored in the @vport data structure. 5865 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 5866 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 5867 * invoked to send out FARP Response to the remote node. Before sending the 5868 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 5869 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 5870 * routine is invoked to log into the remote port first. 5871 * 5872 * Return code 5873 * 0 - Either the FARP Match Mode not supported or successfully processed 5874 **/ 5875 static int 5876 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5877 struct lpfc_nodelist *ndlp) 5878 { 5879 struct lpfc_dmabuf *pcmd; 5880 uint32_t *lp; 5881 IOCB_t *icmd; 5882 FARP *fp; 5883 uint32_t cmd, cnt, did; 5884 5885 icmd = &cmdiocb->iocb; 5886 did = icmd->un.elsreq64.remoteID; 5887 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5888 lp = (uint32_t *) pcmd->virt; 5889 5890 cmd = *lp++; 5891 fp = (FARP *) lp; 5892 /* FARP-REQ received from DID <did> */ 5893 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5894 "0601 FARP-REQ received from DID x%x\n", did); 5895 /* We will only support match on WWPN or WWNN */ 5896 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 5897 return 0; 5898 } 5899 5900 cnt = 0; 5901 /* If this FARP command is searching for my portname */ 5902 if (fp->Mflags & FARP_MATCH_PORT) { 5903 if (memcmp(&fp->RportName, &vport->fc_portname, 5904 sizeof(struct lpfc_name)) == 0) 5905 cnt = 1; 5906 } 5907 5908 /* If this FARP command is searching for my nodename */ 5909 if (fp->Mflags & FARP_MATCH_NODE) { 5910 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 5911 sizeof(struct lpfc_name)) == 0) 5912 cnt = 1; 5913 } 5914 5915 if (cnt) { 5916 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 5917 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 5918 /* Log back into the node before sending the FARP. */ 5919 if (fp->Rflags & FARP_REQUEST_PLOGI) { 5920 ndlp->nlp_prev_state = ndlp->nlp_state; 5921 lpfc_nlp_set_state(vport, ndlp, 5922 NLP_STE_PLOGI_ISSUE); 5923 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5924 } 5925 5926 /* Send a FARP response to that node */ 5927 if (fp->Rflags & FARP_REQUEST_FARPR) 5928 lpfc_issue_els_farpr(vport, did, 0); 5929 } 5930 } 5931 return 0; 5932 } 5933 5934 /** 5935 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 5936 * @vport: pointer to a host virtual N_Port data structure. 5937 * @cmdiocb: pointer to lpfc command iocb data structure. 5938 * @ndlp: pointer to a node-list data structure. 5939 * 5940 * This routine processes Fibre Channel Address Resolution Protocol 5941 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 5942 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 5943 * the FARP response request. 5944 * 5945 * Return code 5946 * 0 - Successfully processed FARPR IOCB (currently always return 0) 5947 **/ 5948 static int 5949 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5950 struct lpfc_nodelist *ndlp) 5951 { 5952 struct lpfc_dmabuf *pcmd; 5953 uint32_t *lp; 5954 IOCB_t *icmd; 5955 uint32_t cmd, did; 5956 5957 icmd = &cmdiocb->iocb; 5958 did = icmd->un.elsreq64.remoteID; 5959 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5960 lp = (uint32_t *) pcmd->virt; 5961 5962 cmd = *lp++; 5963 /* FARP-RSP received from DID <did> */ 5964 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5965 "0600 FARP-RSP received from DID x%x\n", did); 5966 /* ACCEPT the Farp resp request */ 5967 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5968 5969 return 0; 5970 } 5971 5972 /** 5973 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 5974 * @vport: pointer to a host virtual N_Port data structure. 5975 * @cmdiocb: pointer to lpfc command iocb data structure. 5976 * @fan_ndlp: pointer to a node-list data structure. 5977 * 5978 * This routine processes a Fabric Address Notification (FAN) IOCB 5979 * command received as an ELS unsolicited event. The FAN ELS command will 5980 * only be processed on a physical port (i.e., the @vport represents the 5981 * physical port). The fabric NodeName and PortName from the FAN IOCB are 5982 * compared against those in the phba data structure. If any of those is 5983 * different, the lpfc_initial_flogi() routine is invoked to initialize 5984 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 5985 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 5986 * is invoked to register login to the fabric. 5987 * 5988 * Return code 5989 * 0 - Successfully processed fan iocb (currently always return 0). 5990 **/ 5991 static int 5992 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5993 struct lpfc_nodelist *fan_ndlp) 5994 { 5995 struct lpfc_hba *phba = vport->phba; 5996 uint32_t *lp; 5997 FAN *fp; 5998 5999 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 6000 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 6001 fp = (FAN *) ++lp; 6002 /* FAN received; Fan does not have a reply sequence */ 6003 if ((vport == phba->pport) && 6004 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 6005 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 6006 sizeof(struct lpfc_name))) || 6007 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 6008 sizeof(struct lpfc_name)))) { 6009 /* This port has switched fabrics. FLOGI is required */ 6010 lpfc_issue_init_vfi(vport); 6011 } else { 6012 /* FAN verified - skip FLOGI */ 6013 vport->fc_myDID = vport->fc_prevDID; 6014 if (phba->sli_rev < LPFC_SLI_REV4) 6015 lpfc_issue_fabric_reglogin(vport); 6016 else { 6017 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6018 "3138 Need register VFI: (x%x/%x)\n", 6019 vport->fc_prevDID, vport->fc_myDID); 6020 lpfc_issue_reg_vfi(vport); 6021 } 6022 } 6023 } 6024 return 0; 6025 } 6026 6027 /** 6028 * lpfc_els_timeout - Handler funciton to the els timer 6029 * @ptr: holder for the timer function associated data. 6030 * 6031 * This routine is invoked by the ELS timer after timeout. It posts the ELS 6032 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 6033 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 6034 * up the worker thread. It is for the worker thread to invoke the routine 6035 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 6036 **/ 6037 void 6038 lpfc_els_timeout(unsigned long ptr) 6039 { 6040 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 6041 struct lpfc_hba *phba = vport->phba; 6042 uint32_t tmo_posted; 6043 unsigned long iflag; 6044 6045 spin_lock_irqsave(&vport->work_port_lock, iflag); 6046 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 6047 if (!tmo_posted) 6048 vport->work_port_events |= WORKER_ELS_TMO; 6049 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 6050 6051 if (!tmo_posted) 6052 lpfc_worker_wake_up(phba); 6053 return; 6054 } 6055 6056 6057 /** 6058 * lpfc_els_timeout_handler - Process an els timeout event 6059 * @vport: pointer to a virtual N_Port data structure. 6060 * 6061 * This routine is the actual handler function that processes an ELS timeout 6062 * event. It walks the ELS ring to get and abort all the IOCBs (except the 6063 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 6064 * invoking the lpfc_sli_issue_abort_iotag() routine. 6065 **/ 6066 void 6067 lpfc_els_timeout_handler(struct lpfc_vport *vport) 6068 { 6069 struct lpfc_hba *phba = vport->phba; 6070 struct lpfc_sli_ring *pring; 6071 struct lpfc_iocbq *tmp_iocb, *piocb; 6072 IOCB_t *cmd = NULL; 6073 struct lpfc_dmabuf *pcmd; 6074 uint32_t els_command = 0; 6075 uint32_t timeout; 6076 uint32_t remote_ID = 0xffffffff; 6077 LIST_HEAD(txcmplq_completions); 6078 LIST_HEAD(abort_list); 6079 6080 6081 timeout = (uint32_t)(phba->fc_ratov << 1); 6082 6083 pring = &phba->sli.ring[LPFC_ELS_RING]; 6084 6085 spin_lock_irq(&phba->hbalock); 6086 list_splice_init(&pring->txcmplq, &txcmplq_completions); 6087 spin_unlock_irq(&phba->hbalock); 6088 6089 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { 6090 cmd = &piocb->iocb; 6091 6092 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 6093 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6094 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6095 continue; 6096 6097 if (piocb->vport != vport) 6098 continue; 6099 6100 pcmd = (struct lpfc_dmabuf *) piocb->context2; 6101 if (pcmd) 6102 els_command = *(uint32_t *) (pcmd->virt); 6103 6104 if (els_command == ELS_CMD_FARP || 6105 els_command == ELS_CMD_FARPR || 6106 els_command == ELS_CMD_FDISC) 6107 continue; 6108 6109 if (piocb->drvrTimeout > 0) { 6110 if (piocb->drvrTimeout >= timeout) 6111 piocb->drvrTimeout -= timeout; 6112 else 6113 piocb->drvrTimeout = 0; 6114 continue; 6115 } 6116 6117 remote_ID = 0xffffffff; 6118 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 6119 remote_ID = cmd->un.elsreq64.remoteID; 6120 else { 6121 struct lpfc_nodelist *ndlp; 6122 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 6123 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 6124 remote_ID = ndlp->nlp_DID; 6125 } 6126 list_add_tail(&piocb->dlist, &abort_list); 6127 } 6128 spin_lock_irq(&phba->hbalock); 6129 list_splice(&txcmplq_completions, &pring->txcmplq); 6130 spin_unlock_irq(&phba->hbalock); 6131 6132 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 6133 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6134 "0127 ELS timeout Data: x%x x%x x%x " 6135 "x%x\n", els_command, 6136 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 6137 spin_lock_irq(&phba->hbalock); 6138 list_del_init(&piocb->dlist); 6139 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6140 spin_unlock_irq(&phba->hbalock); 6141 } 6142 6143 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 6144 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6145 } 6146 6147 /** 6148 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 6149 * @vport: pointer to a host virtual N_Port data structure. 6150 * 6151 * This routine is used to clean up all the outstanding ELS commands on a 6152 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 6153 * routine. After that, it walks the ELS transmit queue to remove all the 6154 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 6155 * the IOCBs with a non-NULL completion callback function, the callback 6156 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6157 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 6158 * callback function, the IOCB will simply be released. Finally, it walks 6159 * the ELS transmit completion queue to issue an abort IOCB to any transmit 6160 * completion queue IOCB that is associated with the @vport and is not 6161 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 6162 * part of the discovery state machine) out to HBA by invoking the 6163 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 6164 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 6165 * the IOCBs are aborted when this function returns. 6166 **/ 6167 void 6168 lpfc_els_flush_cmd(struct lpfc_vport *vport) 6169 { 6170 LIST_HEAD(completions); 6171 struct lpfc_hba *phba = vport->phba; 6172 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6173 struct lpfc_iocbq *tmp_iocb, *piocb; 6174 IOCB_t *cmd = NULL; 6175 6176 lpfc_fabric_abort_vport(vport); 6177 6178 spin_lock_irq(&phba->hbalock); 6179 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6180 cmd = &piocb->iocb; 6181 6182 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6183 continue; 6184 } 6185 6186 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6187 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6188 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6189 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6190 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6191 continue; 6192 6193 if (piocb->vport != vport) 6194 continue; 6195 6196 list_move_tail(&piocb->list, &completions); 6197 pring->txq_cnt--; 6198 } 6199 6200 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6201 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6202 continue; 6203 } 6204 6205 if (piocb->vport != vport) 6206 continue; 6207 6208 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6209 } 6210 spin_unlock_irq(&phba->hbalock); 6211 6212 /* Cancell all the IOCBs from the completions list */ 6213 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6214 IOERR_SLI_ABORTED); 6215 6216 return; 6217 } 6218 6219 /** 6220 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 6221 * @phba: pointer to lpfc hba data structure. 6222 * 6223 * This routine is used to clean up all the outstanding ELS commands on a 6224 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 6225 * routine. After that, it walks the ELS transmit queue to remove all the 6226 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 6227 * the IOCBs with the completion callback function associated, the callback 6228 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6229 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 6230 * callback function associated, the IOCB will simply be released. Finally, 6231 * it walks the ELS transmit completion queue to issue an abort IOCB to any 6232 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 6233 * management plane IOCBs that are not part of the discovery state machine) 6234 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 6235 **/ 6236 void 6237 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 6238 { 6239 LIST_HEAD(completions); 6240 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6241 struct lpfc_iocbq *tmp_iocb, *piocb; 6242 IOCB_t *cmd = NULL; 6243 6244 lpfc_fabric_abort_hba(phba); 6245 spin_lock_irq(&phba->hbalock); 6246 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6247 cmd = &piocb->iocb; 6248 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6249 continue; 6250 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6251 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6252 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6253 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6254 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6255 continue; 6256 list_move_tail(&piocb->list, &completions); 6257 pring->txq_cnt--; 6258 } 6259 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6260 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6261 continue; 6262 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6263 } 6264 spin_unlock_irq(&phba->hbalock); 6265 6266 /* Cancel all the IOCBs from the completions list */ 6267 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6268 IOERR_SLI_ABORTED); 6269 6270 return; 6271 } 6272 6273 /** 6274 * lpfc_send_els_failure_event - Posts an ELS command failure event 6275 * @phba: Pointer to hba context object. 6276 * @cmdiocbp: Pointer to command iocb which reported error. 6277 * @rspiocbp: Pointer to response iocb which reported error. 6278 * 6279 * This function sends an event when there is an ELS command 6280 * failure. 6281 **/ 6282 void 6283 lpfc_send_els_failure_event(struct lpfc_hba *phba, 6284 struct lpfc_iocbq *cmdiocbp, 6285 struct lpfc_iocbq *rspiocbp) 6286 { 6287 struct lpfc_vport *vport = cmdiocbp->vport; 6288 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6289 struct lpfc_lsrjt_event lsrjt_event; 6290 struct lpfc_fabric_event_header fabric_event; 6291 struct ls_rjt stat; 6292 struct lpfc_nodelist *ndlp; 6293 uint32_t *pcmd; 6294 6295 ndlp = cmdiocbp->context1; 6296 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 6297 return; 6298 6299 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 6300 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 6301 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 6302 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 6303 sizeof(struct lpfc_name)); 6304 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 6305 sizeof(struct lpfc_name)); 6306 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 6307 cmdiocbp->context2)->virt); 6308 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 6309 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 6310 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 6311 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 6312 fc_host_post_vendor_event(shost, 6313 fc_get_event_number(), 6314 sizeof(lsrjt_event), 6315 (char *)&lsrjt_event, 6316 LPFC_NL_VENDOR_ID); 6317 return; 6318 } 6319 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 6320 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 6321 fabric_event.event_type = FC_REG_FABRIC_EVENT; 6322 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 6323 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 6324 else 6325 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 6326 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 6327 sizeof(struct lpfc_name)); 6328 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 6329 sizeof(struct lpfc_name)); 6330 fc_host_post_vendor_event(shost, 6331 fc_get_event_number(), 6332 sizeof(fabric_event), 6333 (char *)&fabric_event, 6334 LPFC_NL_VENDOR_ID); 6335 return; 6336 } 6337 6338 } 6339 6340 /** 6341 * lpfc_send_els_event - Posts unsolicited els event 6342 * @vport: Pointer to vport object. 6343 * @ndlp: Pointer FC node object. 6344 * @cmd: ELS command code. 6345 * 6346 * This function posts an event when there is an incoming 6347 * unsolicited ELS command. 6348 **/ 6349 static void 6350 lpfc_send_els_event(struct lpfc_vport *vport, 6351 struct lpfc_nodelist *ndlp, 6352 uint32_t *payload) 6353 { 6354 struct lpfc_els_event_header *els_data = NULL; 6355 struct lpfc_logo_event *logo_data = NULL; 6356 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6357 6358 if (*payload == ELS_CMD_LOGO) { 6359 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 6360 if (!logo_data) { 6361 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6362 "0148 Failed to allocate memory " 6363 "for LOGO event\n"); 6364 return; 6365 } 6366 els_data = &logo_data->header; 6367 } else { 6368 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 6369 GFP_KERNEL); 6370 if (!els_data) { 6371 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6372 "0149 Failed to allocate memory " 6373 "for ELS event\n"); 6374 return; 6375 } 6376 } 6377 els_data->event_type = FC_REG_ELS_EVENT; 6378 switch (*payload) { 6379 case ELS_CMD_PLOGI: 6380 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 6381 break; 6382 case ELS_CMD_PRLO: 6383 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 6384 break; 6385 case ELS_CMD_ADISC: 6386 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 6387 break; 6388 case ELS_CMD_LOGO: 6389 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 6390 /* Copy the WWPN in the LOGO payload */ 6391 memcpy(logo_data->logo_wwpn, &payload[2], 6392 sizeof(struct lpfc_name)); 6393 break; 6394 default: 6395 kfree(els_data); 6396 return; 6397 } 6398 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 6399 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 6400 if (*payload == ELS_CMD_LOGO) { 6401 fc_host_post_vendor_event(shost, 6402 fc_get_event_number(), 6403 sizeof(struct lpfc_logo_event), 6404 (char *)logo_data, 6405 LPFC_NL_VENDOR_ID); 6406 kfree(logo_data); 6407 } else { 6408 fc_host_post_vendor_event(shost, 6409 fc_get_event_number(), 6410 sizeof(struct lpfc_els_event_header), 6411 (char *)els_data, 6412 LPFC_NL_VENDOR_ID); 6413 kfree(els_data); 6414 } 6415 6416 return; 6417 } 6418 6419 6420 /** 6421 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 6422 * @phba: pointer to lpfc hba data structure. 6423 * @pring: pointer to a SLI ring. 6424 * @vport: pointer to a host virtual N_Port data structure. 6425 * @elsiocb: pointer to lpfc els command iocb data structure. 6426 * 6427 * This routine is used for processing the IOCB associated with a unsolicited 6428 * event. It first determines whether there is an existing ndlp that matches 6429 * the DID from the unsolicited IOCB. If not, it will create a new one with 6430 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 6431 * IOCB is then used to invoke the proper routine and to set up proper state 6432 * of the discovery state machine. 6433 **/ 6434 static void 6435 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6436 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 6437 { 6438 struct Scsi_Host *shost; 6439 struct lpfc_nodelist *ndlp; 6440 struct ls_rjt stat; 6441 uint32_t *payload; 6442 uint32_t cmd, did, newnode, rjt_err = 0; 6443 IOCB_t *icmd = &elsiocb->iocb; 6444 6445 if (!vport || !(elsiocb->context2)) 6446 goto dropit; 6447 6448 newnode = 0; 6449 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 6450 cmd = *payload; 6451 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 6452 lpfc_post_buffer(phba, pring, 1); 6453 6454 did = icmd->un.rcvels.remoteID; 6455 if (icmd->ulpStatus) { 6456 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6457 "RCV Unsol ELS: status:x%x/x%x did:x%x", 6458 icmd->ulpStatus, icmd->un.ulpWord[4], did); 6459 goto dropit; 6460 } 6461 6462 /* Check to see if link went down during discovery */ 6463 if (lpfc_els_chk_latt(vport)) 6464 goto dropit; 6465 6466 /* Ignore traffic received during vport shutdown. */ 6467 if (vport->load_flag & FC_UNLOADING) 6468 goto dropit; 6469 6470 /* If NPort discovery is delayed drop incoming ELS */ 6471 if ((vport->fc_flag & FC_DISC_DELAYED) && 6472 (cmd != ELS_CMD_PLOGI)) 6473 goto dropit; 6474 6475 ndlp = lpfc_findnode_did(vport, did); 6476 if (!ndlp) { 6477 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6479 if (!ndlp) 6480 goto dropit; 6481 6482 lpfc_nlp_init(vport, ndlp, did); 6483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6484 newnode = 1; 6485 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6486 ndlp->nlp_type |= NLP_FABRIC; 6487 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6488 ndlp = lpfc_enable_node(vport, ndlp, 6489 NLP_STE_UNUSED_NODE); 6490 if (!ndlp) 6491 goto dropit; 6492 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6493 newnode = 1; 6494 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6495 ndlp->nlp_type |= NLP_FABRIC; 6496 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 6497 /* This is similar to the new node path */ 6498 ndlp = lpfc_nlp_get(ndlp); 6499 if (!ndlp) 6500 goto dropit; 6501 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6502 newnode = 1; 6503 } 6504 6505 phba->fc_stat.elsRcvFrame++; 6506 6507 elsiocb->context1 = lpfc_nlp_get(ndlp); 6508 elsiocb->vport = vport; 6509 6510 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 6511 cmd &= ELS_CMD_MASK; 6512 } 6513 /* ELS command <elsCmd> received from NPORT <did> */ 6514 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6515 "0112 ELS command x%x received from NPORT x%x " 6516 "Data: x%x\n", cmd, did, vport->port_state); 6517 switch (cmd) { 6518 case ELS_CMD_PLOGI: 6519 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6520 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 6521 did, vport->port_state, ndlp->nlp_flag); 6522 6523 phba->fc_stat.elsRcvPLOGI++; 6524 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6525 6526 lpfc_send_els_event(vport, ndlp, payload); 6527 6528 /* If Nport discovery is delayed, reject PLOGIs */ 6529 if (vport->fc_flag & FC_DISC_DELAYED) { 6530 rjt_err = LSRJT_UNABLE_TPC; 6531 break; 6532 } 6533 if (vport->port_state < LPFC_DISC_AUTH) { 6534 if (!(phba->pport->fc_flag & FC_PT2PT) || 6535 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6536 rjt_err = LSRJT_UNABLE_TPC; 6537 break; 6538 } 6539 /* We get here, and drop thru, if we are PT2PT with 6540 * another NPort and the other side has initiated 6541 * the PLOGI before responding to our FLOGI. 6542 */ 6543 } 6544 6545 shost = lpfc_shost_from_vport(vport); 6546 spin_lock_irq(shost->host_lock); 6547 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6548 spin_unlock_irq(shost->host_lock); 6549 6550 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6551 NLP_EVT_RCV_PLOGI); 6552 6553 break; 6554 case ELS_CMD_FLOGI: 6555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6556 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 6557 did, vport->port_state, ndlp->nlp_flag); 6558 6559 phba->fc_stat.elsRcvFLOGI++; 6560 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 6561 if (newnode) 6562 lpfc_nlp_put(ndlp); 6563 break; 6564 case ELS_CMD_LOGO: 6565 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6566 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 6567 did, vport->port_state, ndlp->nlp_flag); 6568 6569 phba->fc_stat.elsRcvLOGO++; 6570 lpfc_send_els_event(vport, ndlp, payload); 6571 if (vport->port_state < LPFC_DISC_AUTH) { 6572 rjt_err = LSRJT_UNABLE_TPC; 6573 break; 6574 } 6575 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6576 break; 6577 case ELS_CMD_PRLO: 6578 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6579 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 6580 did, vport->port_state, ndlp->nlp_flag); 6581 6582 phba->fc_stat.elsRcvPRLO++; 6583 lpfc_send_els_event(vport, ndlp, payload); 6584 if (vport->port_state < LPFC_DISC_AUTH) { 6585 rjt_err = LSRJT_UNABLE_TPC; 6586 break; 6587 } 6588 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6589 break; 6590 case ELS_CMD_RSCN: 6591 phba->fc_stat.elsRcvRSCN++; 6592 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 6593 if (newnode) 6594 lpfc_nlp_put(ndlp); 6595 break; 6596 case ELS_CMD_ADISC: 6597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6598 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 6599 did, vport->port_state, ndlp->nlp_flag); 6600 6601 lpfc_send_els_event(vport, ndlp, payload); 6602 phba->fc_stat.elsRcvADISC++; 6603 if (vport->port_state < LPFC_DISC_AUTH) { 6604 rjt_err = LSRJT_UNABLE_TPC; 6605 break; 6606 } 6607 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6608 NLP_EVT_RCV_ADISC); 6609 break; 6610 case ELS_CMD_PDISC: 6611 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6612 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 6613 did, vport->port_state, ndlp->nlp_flag); 6614 6615 phba->fc_stat.elsRcvPDISC++; 6616 if (vport->port_state < LPFC_DISC_AUTH) { 6617 rjt_err = LSRJT_UNABLE_TPC; 6618 break; 6619 } 6620 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6621 NLP_EVT_RCV_PDISC); 6622 break; 6623 case ELS_CMD_FARPR: 6624 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6625 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 6626 did, vport->port_state, ndlp->nlp_flag); 6627 6628 phba->fc_stat.elsRcvFARPR++; 6629 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 6630 break; 6631 case ELS_CMD_FARP: 6632 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6633 "RCV FARP: did:x%x/ste:x%x flg:x%x", 6634 did, vport->port_state, ndlp->nlp_flag); 6635 6636 phba->fc_stat.elsRcvFARP++; 6637 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 6638 break; 6639 case ELS_CMD_FAN: 6640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6641 "RCV FAN: did:x%x/ste:x%x flg:x%x", 6642 did, vport->port_state, ndlp->nlp_flag); 6643 6644 phba->fc_stat.elsRcvFAN++; 6645 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 6646 break; 6647 case ELS_CMD_PRLI: 6648 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6649 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 6650 did, vport->port_state, ndlp->nlp_flag); 6651 6652 phba->fc_stat.elsRcvPRLI++; 6653 if (vport->port_state < LPFC_DISC_AUTH) { 6654 rjt_err = LSRJT_UNABLE_TPC; 6655 break; 6656 } 6657 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6658 break; 6659 case ELS_CMD_LIRR: 6660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6661 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 6662 did, vport->port_state, ndlp->nlp_flag); 6663 6664 phba->fc_stat.elsRcvLIRR++; 6665 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 6666 if (newnode) 6667 lpfc_nlp_put(ndlp); 6668 break; 6669 case ELS_CMD_RLS: 6670 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6671 "RCV RLS: did:x%x/ste:x%x flg:x%x", 6672 did, vport->port_state, ndlp->nlp_flag); 6673 6674 phba->fc_stat.elsRcvRLS++; 6675 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 6676 if (newnode) 6677 lpfc_nlp_put(ndlp); 6678 break; 6679 case ELS_CMD_RPS: 6680 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6681 "RCV RPS: did:x%x/ste:x%x flg:x%x", 6682 did, vport->port_state, ndlp->nlp_flag); 6683 6684 phba->fc_stat.elsRcvRPS++; 6685 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 6686 if (newnode) 6687 lpfc_nlp_put(ndlp); 6688 break; 6689 case ELS_CMD_RPL: 6690 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6691 "RCV RPL: did:x%x/ste:x%x flg:x%x", 6692 did, vport->port_state, ndlp->nlp_flag); 6693 6694 phba->fc_stat.elsRcvRPL++; 6695 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 6696 if (newnode) 6697 lpfc_nlp_put(ndlp); 6698 break; 6699 case ELS_CMD_RNID: 6700 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6701 "RCV RNID: did:x%x/ste:x%x flg:x%x", 6702 did, vport->port_state, ndlp->nlp_flag); 6703 6704 phba->fc_stat.elsRcvRNID++; 6705 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 6706 if (newnode) 6707 lpfc_nlp_put(ndlp); 6708 break; 6709 case ELS_CMD_RTV: 6710 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6711 "RCV RTV: did:x%x/ste:x%x flg:x%x", 6712 did, vport->port_state, ndlp->nlp_flag); 6713 phba->fc_stat.elsRcvRTV++; 6714 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 6715 if (newnode) 6716 lpfc_nlp_put(ndlp); 6717 break; 6718 case ELS_CMD_RRQ: 6719 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6720 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 6721 did, vport->port_state, ndlp->nlp_flag); 6722 6723 phba->fc_stat.elsRcvRRQ++; 6724 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 6725 if (newnode) 6726 lpfc_nlp_put(ndlp); 6727 break; 6728 case ELS_CMD_ECHO: 6729 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6730 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 6731 did, vport->port_state, ndlp->nlp_flag); 6732 6733 phba->fc_stat.elsRcvECHO++; 6734 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 6735 if (newnode) 6736 lpfc_nlp_put(ndlp); 6737 break; 6738 default: 6739 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6740 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6741 cmd, did, vport->port_state); 6742 6743 /* Unsupported ELS command, reject */ 6744 rjt_err = LSRJT_CMD_UNSUPPORTED; 6745 6746 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6747 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6748 "0115 Unknown ELS command x%x " 6749 "received from NPORT x%x\n", cmd, did); 6750 if (newnode) 6751 lpfc_nlp_put(ndlp); 6752 break; 6753 } 6754 6755 /* check if need to LS_RJT received ELS cmd */ 6756 if (rjt_err) { 6757 memset(&stat, 0, sizeof(stat)); 6758 stat.un.b.lsRjtRsnCode = rjt_err; 6759 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6760 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6761 NULL); 6762 } 6763 6764 lpfc_nlp_put(elsiocb->context1); 6765 elsiocb->context1 = NULL; 6766 return; 6767 6768 dropit: 6769 if (vport && !(vport->load_flag & FC_UNLOADING)) 6770 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6771 "0111 Dropping received ELS cmd " 6772 "Data: x%x x%x x%x\n", 6773 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 6774 phba->fc_stat.elsRcvDrop++; 6775 } 6776 6777 /** 6778 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 6779 * @phba: pointer to lpfc hba data structure. 6780 * @pring: pointer to a SLI ring. 6781 * @elsiocb: pointer to lpfc els iocb data structure. 6782 * 6783 * This routine is used to process an unsolicited event received from a SLI 6784 * (Service Level Interface) ring. The actual processing of the data buffer 6785 * associated with the unsolicited event is done by invoking the routine 6786 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 6787 * SLI ring on which the unsolicited event was received. 6788 **/ 6789 void 6790 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6791 struct lpfc_iocbq *elsiocb) 6792 { 6793 struct lpfc_vport *vport = phba->pport; 6794 IOCB_t *icmd = &elsiocb->iocb; 6795 dma_addr_t paddr; 6796 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 6797 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 6798 6799 elsiocb->context1 = NULL; 6800 elsiocb->context2 = NULL; 6801 elsiocb->context3 = NULL; 6802 6803 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6804 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6805 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6806 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 6807 phba->fc_stat.NoRcvBuf++; 6808 /* Not enough posted buffers; Try posting more buffers */ 6809 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6810 lpfc_post_buffer(phba, pring, 0); 6811 return; 6812 } 6813 6814 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6815 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 6816 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 6817 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 6818 vport = phba->pport; 6819 else 6820 vport = lpfc_find_vport_by_vpid(phba, 6821 icmd->unsli3.rcvsli3.vpi); 6822 } 6823 6824 /* If there are no BDEs associated 6825 * with this IOCB, there is nothing to do. 6826 */ 6827 if (icmd->ulpBdeCount == 0) 6828 return; 6829 6830 /* type of ELS cmd is first 32bit word 6831 * in packet 6832 */ 6833 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 6834 elsiocb->context2 = bdeBuf1; 6835 } else { 6836 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 6837 icmd->un.cont64[0].addrLow); 6838 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 6839 paddr); 6840 } 6841 6842 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6843 /* 6844 * The different unsolicited event handlers would tell us 6845 * if they are done with "mp" by setting context2 to NULL. 6846 */ 6847 if (elsiocb->context2) { 6848 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 6849 elsiocb->context2 = NULL; 6850 } 6851 6852 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 6853 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 6854 icmd->ulpBdeCount == 2) { 6855 elsiocb->context2 = bdeBuf2; 6856 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6857 /* free mp if we are done with it */ 6858 if (elsiocb->context2) { 6859 lpfc_in_buf_free(phba, elsiocb->context2); 6860 elsiocb->context2 = NULL; 6861 } 6862 } 6863 } 6864 6865 /** 6866 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 6867 * @phba: pointer to lpfc hba data structure. 6868 * @vport: pointer to a virtual N_Port data structure. 6869 * 6870 * This routine issues a Port Login (PLOGI) to the Name Server with 6871 * State Change Request (SCR) for a @vport. This routine will create an 6872 * ndlp for the Name Server associated to the @vport if such node does 6873 * not already exist. The PLOGI to Name Server is issued by invoking the 6874 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 6875 * (FDMI) is configured to the @vport, a FDMI node will be created and 6876 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 6877 **/ 6878 void 6879 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 6880 { 6881 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 6882 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6883 6884 /* 6885 * If lpfc_delay_discovery parameter is set and the clean address 6886 * bit is cleared and fc fabric parameters chenged, delay FC NPort 6887 * discovery. 6888 */ 6889 spin_lock_irq(shost->host_lock); 6890 if (vport->fc_flag & FC_DISC_DELAYED) { 6891 spin_unlock_irq(shost->host_lock); 6892 mod_timer(&vport->delayed_disc_tmo, 6893 jiffies + HZ * phba->fc_ratov); 6894 return; 6895 } 6896 spin_unlock_irq(shost->host_lock); 6897 6898 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6899 if (!ndlp) { 6900 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6901 if (!ndlp) { 6902 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6903 lpfc_disc_start(vport); 6904 return; 6905 } 6906 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6907 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6908 "0251 NameServer login: no memory\n"); 6909 return; 6910 } 6911 lpfc_nlp_init(vport, ndlp, NameServer_DID); 6912 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6913 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 6914 if (!ndlp) { 6915 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6916 lpfc_disc_start(vport); 6917 return; 6918 } 6919 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6920 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6921 "0348 NameServer login: node freed\n"); 6922 return; 6923 } 6924 } 6925 ndlp->nlp_type |= NLP_FABRIC; 6926 6927 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6928 6929 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 6930 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6931 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6932 "0252 Cannot issue NameServer login\n"); 6933 return; 6934 } 6935 6936 if (vport->cfg_fdmi_on) { 6937 /* If this is the first time, allocate an ndlp and initialize 6938 * it. Otherwise, make sure the node is enabled and then do the 6939 * login. 6940 */ 6941 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); 6942 if (!ndlp_fdmi) { 6943 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 6944 GFP_KERNEL); 6945 if (ndlp_fdmi) { 6946 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 6947 ndlp_fdmi->nlp_type |= NLP_FABRIC; 6948 } else 6949 return; 6950 } 6951 if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) 6952 ndlp_fdmi = lpfc_enable_node(vport, 6953 ndlp_fdmi, 6954 NLP_STE_NPR_NODE); 6955 6956 if (ndlp_fdmi) { 6957 lpfc_nlp_set_state(vport, ndlp_fdmi, 6958 NLP_STE_PLOGI_ISSUE); 6959 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); 6960 } 6961 } 6962 } 6963 6964 /** 6965 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 6966 * @phba: pointer to lpfc hba data structure. 6967 * @pmb: pointer to the driver internal queue element for mailbox command. 6968 * 6969 * This routine is the completion callback function to register new vport 6970 * mailbox command. If the new vport mailbox command completes successfully, 6971 * the fabric registration login shall be performed on physical port (the 6972 * new vport created is actually a physical port, with VPI 0) or the port 6973 * login to Name Server for State Change Request (SCR) will be performed 6974 * on virtual port (real virtual port, with VPI greater than 0). 6975 **/ 6976 static void 6977 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6978 { 6979 struct lpfc_vport *vport = pmb->vport; 6980 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6981 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 6982 MAILBOX_t *mb = &pmb->u.mb; 6983 int rc; 6984 6985 spin_lock_irq(shost->host_lock); 6986 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6987 spin_unlock_irq(shost->host_lock); 6988 6989 if (mb->mbxStatus) { 6990 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 6991 "0915 Register VPI failed : Status: x%x" 6992 " upd bit: x%x \n", mb->mbxStatus, 6993 mb->un.varRegVpi.upd); 6994 if (phba->sli_rev == LPFC_SLI_REV4 && 6995 mb->un.varRegVpi.upd) 6996 goto mbox_err_exit ; 6997 6998 switch (mb->mbxStatus) { 6999 case 0x11: /* unsupported feature */ 7000 case 0x9603: /* max_vpi exceeded */ 7001 case 0x9602: /* Link event since CLEAR_LA */ 7002 /* giving up on vport registration */ 7003 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7004 spin_lock_irq(shost->host_lock); 7005 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7006 spin_unlock_irq(shost->host_lock); 7007 lpfc_can_disctmo(vport); 7008 break; 7009 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 7010 case 0x20: 7011 spin_lock_irq(shost->host_lock); 7012 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7013 spin_unlock_irq(shost->host_lock); 7014 lpfc_init_vpi(phba, pmb, vport->vpi); 7015 pmb->vport = vport; 7016 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 7017 rc = lpfc_sli_issue_mbox(phba, pmb, 7018 MBX_NOWAIT); 7019 if (rc == MBX_NOT_FINISHED) { 7020 lpfc_printf_vlog(vport, 7021 KERN_ERR, LOG_MBOX, 7022 "2732 Failed to issue INIT_VPI" 7023 " mailbox command\n"); 7024 } else { 7025 lpfc_nlp_put(ndlp); 7026 return; 7027 } 7028 7029 default: 7030 /* Try to recover from this error */ 7031 if (phba->sli_rev == LPFC_SLI_REV4) 7032 lpfc_sli4_unreg_all_rpis(vport); 7033 lpfc_mbx_unreg_vpi(vport); 7034 spin_lock_irq(shost->host_lock); 7035 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7036 spin_unlock_irq(shost->host_lock); 7037 if (vport->port_type == LPFC_PHYSICAL_PORT 7038 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 7039 lpfc_issue_init_vfi(vport); 7040 else 7041 lpfc_initial_fdisc(vport); 7042 break; 7043 } 7044 } else { 7045 spin_lock_irq(shost->host_lock); 7046 vport->vpi_state |= LPFC_VPI_REGISTERED; 7047 spin_unlock_irq(shost->host_lock); 7048 if (vport == phba->pport) { 7049 if (phba->sli_rev < LPFC_SLI_REV4) 7050 lpfc_issue_fabric_reglogin(vport); 7051 else { 7052 /* 7053 * If the physical port is instantiated using 7054 * FDISC, do not start vport discovery. 7055 */ 7056 if (vport->port_state != LPFC_FDISC) 7057 lpfc_start_fdiscs(phba); 7058 lpfc_do_scr_ns_plogi(phba, vport); 7059 } 7060 } else 7061 lpfc_do_scr_ns_plogi(phba, vport); 7062 } 7063 mbox_err_exit: 7064 /* Now, we decrement the ndlp reference count held for this 7065 * callback function 7066 */ 7067 lpfc_nlp_put(ndlp); 7068 7069 mempool_free(pmb, phba->mbox_mem_pool); 7070 return; 7071 } 7072 7073 /** 7074 * lpfc_register_new_vport - Register a new vport with a HBA 7075 * @phba: pointer to lpfc hba data structure. 7076 * @vport: pointer to a host virtual N_Port data structure. 7077 * @ndlp: pointer to a node-list data structure. 7078 * 7079 * This routine registers the @vport as a new virtual port with a HBA. 7080 * It is done through a registering vpi mailbox command. 7081 **/ 7082 void 7083 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 7084 struct lpfc_nodelist *ndlp) 7085 { 7086 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7087 LPFC_MBOXQ_t *mbox; 7088 7089 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7090 if (mbox) { 7091 lpfc_reg_vpi(vport, mbox); 7092 mbox->vport = vport; 7093 mbox->context2 = lpfc_nlp_get(ndlp); 7094 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 7095 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7096 == MBX_NOT_FINISHED) { 7097 /* mailbox command not success, decrement ndlp 7098 * reference count for this command 7099 */ 7100 lpfc_nlp_put(ndlp); 7101 mempool_free(mbox, phba->mbox_mem_pool); 7102 7103 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7104 "0253 Register VPI: Can't send mbox\n"); 7105 goto mbox_err_exit; 7106 } 7107 } else { 7108 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7109 "0254 Register VPI: no memory\n"); 7110 goto mbox_err_exit; 7111 } 7112 return; 7113 7114 mbox_err_exit: 7115 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7116 spin_lock_irq(shost->host_lock); 7117 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 7118 spin_unlock_irq(shost->host_lock); 7119 return; 7120 } 7121 7122 /** 7123 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 7124 * @phba: pointer to lpfc hba data structure. 7125 * 7126 * This routine cancels the retry delay timers to all the vports. 7127 **/ 7128 void 7129 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 7130 { 7131 struct lpfc_vport **vports; 7132 struct lpfc_nodelist *ndlp; 7133 uint32_t link_state; 7134 int i; 7135 7136 /* Treat this failure as linkdown for all vports */ 7137 link_state = phba->link_state; 7138 lpfc_linkdown(phba); 7139 phba->link_state = link_state; 7140 7141 vports = lpfc_create_vport_work_array(phba); 7142 7143 if (vports) { 7144 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 7145 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 7146 if (ndlp) 7147 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 7148 lpfc_els_flush_cmd(vports[i]); 7149 } 7150 lpfc_destroy_vport_work_array(phba, vports); 7151 } 7152 } 7153 7154 /** 7155 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 7156 * @phba: pointer to lpfc hba data structure. 7157 * 7158 * This routine abort all pending discovery commands and 7159 * start a timer to retry FLOGI for the physical port 7160 * discovery. 7161 **/ 7162 void 7163 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 7164 { 7165 struct lpfc_nodelist *ndlp; 7166 struct Scsi_Host *shost; 7167 7168 /* Cancel the all vports retry delay retry timers */ 7169 lpfc_cancel_all_vport_retry_delay_timer(phba); 7170 7171 /* If fabric require FLOGI, then re-instantiate physical login */ 7172 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 7173 if (!ndlp) 7174 return; 7175 7176 shost = lpfc_shost_from_vport(phba->pport); 7177 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7178 spin_lock_irq(shost->host_lock); 7179 ndlp->nlp_flag |= NLP_DELAY_TMO; 7180 spin_unlock_irq(shost->host_lock); 7181 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 7182 phba->pport->port_state = LPFC_FLOGI; 7183 return; 7184 } 7185 7186 /** 7187 * lpfc_fabric_login_reqd - Check if FLOGI required. 7188 * @phba: pointer to lpfc hba data structure. 7189 * @cmdiocb: pointer to FDISC command iocb. 7190 * @rspiocb: pointer to FDISC response iocb. 7191 * 7192 * This routine checks if a FLOGI is reguired for FDISC 7193 * to succeed. 7194 **/ 7195 static int 7196 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 7197 struct lpfc_iocbq *cmdiocb, 7198 struct lpfc_iocbq *rspiocb) 7199 { 7200 7201 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 7202 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 7203 return 0; 7204 else 7205 return 1; 7206 } 7207 7208 /** 7209 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 7210 * @phba: pointer to lpfc hba data structure. 7211 * @cmdiocb: pointer to lpfc command iocb data structure. 7212 * @rspiocb: pointer to lpfc response iocb data structure. 7213 * 7214 * This routine is the completion callback function to a Fabric Discover 7215 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 7216 * single threaded, each FDISC completion callback function will reset 7217 * the discovery timer for all vports such that the timers will not get 7218 * unnecessary timeout. The function checks the FDISC IOCB status. If error 7219 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 7220 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 7221 * assigned to the vport has been changed with the completion of the FDISC 7222 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 7223 * are unregistered from the HBA, and then the lpfc_register_new_vport() 7224 * routine is invoked to register new vport with the HBA. Otherwise, the 7225 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 7226 * Server for State Change Request (SCR). 7227 **/ 7228 static void 7229 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7230 struct lpfc_iocbq *rspiocb) 7231 { 7232 struct lpfc_vport *vport = cmdiocb->vport; 7233 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7234 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 7235 struct lpfc_nodelist *np; 7236 struct lpfc_nodelist *next_np; 7237 IOCB_t *irsp = &rspiocb->iocb; 7238 struct lpfc_iocbq *piocb; 7239 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 7240 struct serv_parm *sp; 7241 uint8_t fabric_param_changed; 7242 7243 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7244 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 7245 irsp->ulpStatus, irsp->un.ulpWord[4], 7246 vport->fc_prevDID); 7247 /* Since all FDISCs are being single threaded, we 7248 * must reset the discovery timer for ALL vports 7249 * waiting to send FDISC when one completes. 7250 */ 7251 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 7252 lpfc_set_disctmo(piocb->vport); 7253 } 7254 7255 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7256 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 7257 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 7258 7259 if (irsp->ulpStatus) { 7260 7261 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 7262 lpfc_retry_pport_discovery(phba); 7263 goto out; 7264 } 7265 7266 /* Check for retry */ 7267 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 7268 goto out; 7269 /* FDISC failed */ 7270 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7271 "0126 FDISC failed. (x%x/x%x)\n", 7272 irsp->ulpStatus, irsp->un.ulpWord[4]); 7273 goto fdisc_failed; 7274 } 7275 spin_lock_irq(shost->host_lock); 7276 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 7277 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 7278 vport->fc_flag |= FC_FABRIC; 7279 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 7280 vport->fc_flag |= FC_PUBLIC_LOOP; 7281 spin_unlock_irq(shost->host_lock); 7282 7283 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 7284 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 7285 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 7286 sp = prsp->virt + sizeof(uint32_t); 7287 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 7288 memcpy(&vport->fabric_portname, &sp->portName, 7289 sizeof(struct lpfc_name)); 7290 memcpy(&vport->fabric_nodename, &sp->nodeName, 7291 sizeof(struct lpfc_name)); 7292 if (fabric_param_changed && 7293 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7294 /* If our NportID changed, we need to ensure all 7295 * remaining NPORTs get unreg_login'ed so we can 7296 * issue unreg_vpi. 7297 */ 7298 list_for_each_entry_safe(np, next_np, 7299 &vport->fc_nodes, nlp_listp) { 7300 if (!NLP_CHK_NODE_ACT(ndlp) || 7301 (np->nlp_state != NLP_STE_NPR_NODE) || 7302 !(np->nlp_flag & NLP_NPR_ADISC)) 7303 continue; 7304 spin_lock_irq(shost->host_lock); 7305 np->nlp_flag &= ~NLP_NPR_ADISC; 7306 spin_unlock_irq(shost->host_lock); 7307 lpfc_unreg_rpi(vport, np); 7308 } 7309 lpfc_cleanup_pending_mbox(vport); 7310 7311 if (phba->sli_rev == LPFC_SLI_REV4) 7312 lpfc_sli4_unreg_all_rpis(vport); 7313 7314 lpfc_mbx_unreg_vpi(vport); 7315 spin_lock_irq(shost->host_lock); 7316 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7317 if (phba->sli_rev == LPFC_SLI_REV4) 7318 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 7319 else 7320 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 7321 spin_unlock_irq(shost->host_lock); 7322 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 7323 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7324 /* 7325 * Driver needs to re-reg VPI in order for f/w 7326 * to update the MAC address. 7327 */ 7328 lpfc_register_new_vport(phba, vport, ndlp); 7329 goto out; 7330 } 7331 7332 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 7333 lpfc_issue_init_vpi(vport); 7334 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 7335 lpfc_register_new_vport(phba, vport, ndlp); 7336 else 7337 lpfc_do_scr_ns_plogi(phba, vport); 7338 goto out; 7339 fdisc_failed: 7340 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7341 /* Cancel discovery timer */ 7342 lpfc_can_disctmo(vport); 7343 lpfc_nlp_put(ndlp); 7344 out: 7345 lpfc_els_free_iocb(phba, cmdiocb); 7346 } 7347 7348 /** 7349 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 7350 * @vport: pointer to a virtual N_Port data structure. 7351 * @ndlp: pointer to a node-list data structure. 7352 * @retry: number of retries to the command IOCB. 7353 * 7354 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 7355 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 7356 * routine to issue the IOCB, which makes sure only one outstanding fabric 7357 * IOCB will be sent off HBA at any given time. 7358 * 7359 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7360 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7361 * will be stored into the context1 field of the IOCB for the completion 7362 * callback function to the FDISC ELS command. 7363 * 7364 * Return code 7365 * 0 - Successfully issued fdisc iocb command 7366 * 1 - Failed to issue fdisc iocb command 7367 **/ 7368 static int 7369 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7370 uint8_t retry) 7371 { 7372 struct lpfc_hba *phba = vport->phba; 7373 IOCB_t *icmd; 7374 struct lpfc_iocbq *elsiocb; 7375 struct serv_parm *sp; 7376 uint8_t *pcmd; 7377 uint16_t cmdsize; 7378 int did = ndlp->nlp_DID; 7379 int rc; 7380 7381 vport->port_state = LPFC_FDISC; 7382 vport->fc_myDID = 0; 7383 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 7384 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 7385 ELS_CMD_FDISC); 7386 if (!elsiocb) { 7387 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7388 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7389 "0255 Issue FDISC: no IOCB\n"); 7390 return 1; 7391 } 7392 7393 icmd = &elsiocb->iocb; 7394 icmd->un.elsreq64.myID = 0; 7395 icmd->un.elsreq64.fl = 1; 7396 7397 /* 7398 * SLI3 ports require a different context type value than SLI4. 7399 * Catch SLI3 ports here and override the prep. 7400 */ 7401 if (phba->sli_rev == LPFC_SLI_REV3) { 7402 icmd->ulpCt_h = 1; 7403 icmd->ulpCt_l = 0; 7404 } 7405 7406 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7407 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 7408 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 7409 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 7410 sp = (struct serv_parm *) pcmd; 7411 /* Setup CSPs accordingly for Fabric */ 7412 sp->cmn.e_d_tov = 0; 7413 sp->cmn.w2.r_a_tov = 0; 7414 sp->cmn.virtual_fabric_support = 0; 7415 sp->cls1.classValid = 0; 7416 sp->cls2.seqDelivery = 1; 7417 sp->cls3.seqDelivery = 1; 7418 7419 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 7420 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 7421 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 7422 pcmd += sizeof(uint32_t); /* Port Name */ 7423 memcpy(pcmd, &vport->fc_portname, 8); 7424 pcmd += sizeof(uint32_t); /* Node Name */ 7425 pcmd += sizeof(uint32_t); /* Node Name */ 7426 memcpy(pcmd, &vport->fc_nodename, 8); 7427 7428 lpfc_set_disctmo(vport); 7429 7430 phba->fc_stat.elsXmitFDISC++; 7431 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 7432 7433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7434 "Issue FDISC: did:x%x", 7435 did, 0, 0); 7436 7437 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 7438 if (rc == IOCB_ERROR) { 7439 lpfc_els_free_iocb(phba, elsiocb); 7440 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7441 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7442 "0256 Issue FDISC: Cannot send IOCB\n"); 7443 return 1; 7444 } 7445 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 7446 return 0; 7447 } 7448 7449 /** 7450 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 7451 * @phba: pointer to lpfc hba data structure. 7452 * @cmdiocb: pointer to lpfc command iocb data structure. 7453 * @rspiocb: pointer to lpfc response iocb data structure. 7454 * 7455 * This routine is the completion callback function to the issuing of a LOGO 7456 * ELS command off a vport. It frees the command IOCB and then decrement the 7457 * reference count held on ndlp for this completion function, indicating that 7458 * the reference to the ndlp is no long needed. Note that the 7459 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 7460 * callback function and an additional explicit ndlp reference decrementation 7461 * will trigger the actual release of the ndlp. 7462 **/ 7463 static void 7464 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7465 struct lpfc_iocbq *rspiocb) 7466 { 7467 struct lpfc_vport *vport = cmdiocb->vport; 7468 IOCB_t *irsp; 7469 struct lpfc_nodelist *ndlp; 7470 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7471 7472 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7473 irsp = &rspiocb->iocb; 7474 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7475 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7476 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 7477 7478 lpfc_els_free_iocb(phba, cmdiocb); 7479 vport->unreg_vpi_cmpl = VPORT_ERROR; 7480 7481 /* Trigger the release of the ndlp after logo */ 7482 lpfc_nlp_put(ndlp); 7483 7484 /* NPIV LOGO completes to NPort <nlp_DID> */ 7485 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7486 "2928 NPIV LOGO completes to NPort x%x " 7487 "Data: x%x x%x x%x x%x\n", 7488 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 7489 irsp->ulpTimeout, vport->num_disc_nodes); 7490 7491 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 7492 spin_lock_irq(shost->host_lock); 7493 vport->fc_flag &= ~FC_FABRIC; 7494 spin_unlock_irq(shost->host_lock); 7495 } 7496 } 7497 7498 /** 7499 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 7500 * @vport: pointer to a virtual N_Port data structure. 7501 * @ndlp: pointer to a node-list data structure. 7502 * 7503 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 7504 * 7505 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7506 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7507 * will be stored into the context1 field of the IOCB for the completion 7508 * callback function to the LOGO ELS command. 7509 * 7510 * Return codes 7511 * 0 - Successfully issued logo off the @vport 7512 * 1 - Failed to issue logo off the @vport 7513 **/ 7514 int 7515 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7516 { 7517 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7518 struct lpfc_hba *phba = vport->phba; 7519 IOCB_t *icmd; 7520 struct lpfc_iocbq *elsiocb; 7521 uint8_t *pcmd; 7522 uint16_t cmdsize; 7523 7524 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 7525 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 7526 ELS_CMD_LOGO); 7527 if (!elsiocb) 7528 return 1; 7529 7530 icmd = &elsiocb->iocb; 7531 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7532 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 7533 pcmd += sizeof(uint32_t); 7534 7535 /* Fill in LOGO payload */ 7536 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 7537 pcmd += sizeof(uint32_t); 7538 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 7539 7540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7541 "Issue LOGO npiv did:x%x flg:x%x", 7542 ndlp->nlp_DID, ndlp->nlp_flag, 0); 7543 7544 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 7545 spin_lock_irq(shost->host_lock); 7546 ndlp->nlp_flag |= NLP_LOGO_SND; 7547 spin_unlock_irq(shost->host_lock); 7548 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7549 IOCB_ERROR) { 7550 spin_lock_irq(shost->host_lock); 7551 ndlp->nlp_flag &= ~NLP_LOGO_SND; 7552 spin_unlock_irq(shost->host_lock); 7553 lpfc_els_free_iocb(phba, elsiocb); 7554 return 1; 7555 } 7556 return 0; 7557 } 7558 7559 /** 7560 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 7561 * @ptr: holder for the timer function associated data. 7562 * 7563 * This routine is invoked by the fabric iocb block timer after 7564 * timeout. It posts the fabric iocb block timeout event by setting the 7565 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 7566 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 7567 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 7568 * posted event WORKER_FABRIC_BLOCK_TMO. 7569 **/ 7570 void 7571 lpfc_fabric_block_timeout(unsigned long ptr) 7572 { 7573 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7574 unsigned long iflags; 7575 uint32_t tmo_posted; 7576 7577 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 7578 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 7579 if (!tmo_posted) 7580 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 7581 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 7582 7583 if (!tmo_posted) 7584 lpfc_worker_wake_up(phba); 7585 return; 7586 } 7587 7588 /** 7589 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 7590 * @phba: pointer to lpfc hba data structure. 7591 * 7592 * This routine issues one fabric iocb from the driver internal list to 7593 * the HBA. It first checks whether it's ready to issue one fabric iocb to 7594 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 7595 * remove one pending fabric iocb from the driver internal list and invokes 7596 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 7597 **/ 7598 static void 7599 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 7600 { 7601 struct lpfc_iocbq *iocb; 7602 unsigned long iflags; 7603 int ret; 7604 IOCB_t *cmd; 7605 7606 repeat: 7607 iocb = NULL; 7608 spin_lock_irqsave(&phba->hbalock, iflags); 7609 /* Post any pending iocb to the SLI layer */ 7610 if (atomic_read(&phba->fabric_iocb_count) == 0) { 7611 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 7612 list); 7613 if (iocb) 7614 /* Increment fabric iocb count to hold the position */ 7615 atomic_inc(&phba->fabric_iocb_count); 7616 } 7617 spin_unlock_irqrestore(&phba->hbalock, iflags); 7618 if (iocb) { 7619 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7620 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7621 iocb->iocb_flag |= LPFC_IO_FABRIC; 7622 7623 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7624 "Fabric sched1: ste:x%x", 7625 iocb->vport->port_state, 0, 0); 7626 7627 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7628 7629 if (ret == IOCB_ERROR) { 7630 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7631 iocb->fabric_iocb_cmpl = NULL; 7632 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7633 cmd = &iocb->iocb; 7634 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 7635 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 7636 iocb->iocb_cmpl(phba, iocb, iocb); 7637 7638 atomic_dec(&phba->fabric_iocb_count); 7639 goto repeat; 7640 } 7641 } 7642 7643 return; 7644 } 7645 7646 /** 7647 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 7648 * @phba: pointer to lpfc hba data structure. 7649 * 7650 * This routine unblocks the issuing fabric iocb command. The function 7651 * will clear the fabric iocb block bit and then invoke the routine 7652 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 7653 * from the driver internal fabric iocb list. 7654 **/ 7655 void 7656 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 7657 { 7658 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7659 7660 lpfc_resume_fabric_iocbs(phba); 7661 return; 7662 } 7663 7664 /** 7665 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 7666 * @phba: pointer to lpfc hba data structure. 7667 * 7668 * This routine blocks the issuing fabric iocb for a specified amount of 7669 * time (currently 100 ms). This is done by set the fabric iocb block bit 7670 * and set up a timeout timer for 100ms. When the block bit is set, no more 7671 * fabric iocb will be issued out of the HBA. 7672 **/ 7673 static void 7674 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 7675 { 7676 int blocked; 7677 7678 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7679 /* Start a timer to unblock fabric iocbs after 100ms */ 7680 if (!blocked) 7681 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7682 7683 return; 7684 } 7685 7686 /** 7687 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 7688 * @phba: pointer to lpfc hba data structure. 7689 * @cmdiocb: pointer to lpfc command iocb data structure. 7690 * @rspiocb: pointer to lpfc response iocb data structure. 7691 * 7692 * This routine is the callback function that is put to the fabric iocb's 7693 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 7694 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 7695 * function first restores and invokes the original iocb's callback function 7696 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 7697 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 7698 **/ 7699 static void 7700 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7701 struct lpfc_iocbq *rspiocb) 7702 { 7703 struct ls_rjt stat; 7704 7705 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 7706 BUG(); 7707 7708 switch (rspiocb->iocb.ulpStatus) { 7709 case IOSTAT_NPORT_RJT: 7710 case IOSTAT_FABRIC_RJT: 7711 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 7712 lpfc_block_fabric_iocbs(phba); 7713 } 7714 break; 7715 7716 case IOSTAT_NPORT_BSY: 7717 case IOSTAT_FABRIC_BSY: 7718 lpfc_block_fabric_iocbs(phba); 7719 break; 7720 7721 case IOSTAT_LS_RJT: 7722 stat.un.lsRjtError = 7723 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 7724 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 7725 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 7726 lpfc_block_fabric_iocbs(phba); 7727 break; 7728 } 7729 7730 if (atomic_read(&phba->fabric_iocb_count) == 0) 7731 BUG(); 7732 7733 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 7734 cmdiocb->fabric_iocb_cmpl = NULL; 7735 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 7736 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 7737 7738 atomic_dec(&phba->fabric_iocb_count); 7739 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 7740 /* Post any pending iocbs to HBA */ 7741 lpfc_resume_fabric_iocbs(phba); 7742 } 7743 } 7744 7745 /** 7746 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 7747 * @phba: pointer to lpfc hba data structure. 7748 * @iocb: pointer to lpfc command iocb data structure. 7749 * 7750 * This routine is used as the top-level API for issuing a fabric iocb command 7751 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 7752 * function makes sure that only one fabric bound iocb will be outstanding at 7753 * any given time. As such, this function will first check to see whether there 7754 * is already an outstanding fabric iocb on the wire. If so, it will put the 7755 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 7756 * issued later. Otherwise, it will issue the iocb on the wire and update the 7757 * fabric iocb count it indicate that there is one fabric iocb on the wire. 7758 * 7759 * Note, this implementation has a potential sending out fabric IOCBs out of 7760 * order. The problem is caused by the construction of the "ready" boolen does 7761 * not include the condition that the internal fabric IOCB list is empty. As 7762 * such, it is possible a fabric IOCB issued by this routine might be "jump" 7763 * ahead of the fabric IOCBs in the internal list. 7764 * 7765 * Return code 7766 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 7767 * IOCB_ERROR - failed to issue fabric iocb 7768 **/ 7769 static int 7770 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 7771 { 7772 unsigned long iflags; 7773 int ready; 7774 int ret; 7775 7776 if (atomic_read(&phba->fabric_iocb_count) > 1) 7777 BUG(); 7778 7779 spin_lock_irqsave(&phba->hbalock, iflags); 7780 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 7781 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7782 7783 if (ready) 7784 /* Increment fabric iocb count to hold the position */ 7785 atomic_inc(&phba->fabric_iocb_count); 7786 spin_unlock_irqrestore(&phba->hbalock, iflags); 7787 if (ready) { 7788 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7789 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7790 iocb->iocb_flag |= LPFC_IO_FABRIC; 7791 7792 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7793 "Fabric sched2: ste:x%x", 7794 iocb->vport->port_state, 0, 0); 7795 7796 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7797 7798 if (ret == IOCB_ERROR) { 7799 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7800 iocb->fabric_iocb_cmpl = NULL; 7801 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7802 atomic_dec(&phba->fabric_iocb_count); 7803 } 7804 } else { 7805 spin_lock_irqsave(&phba->hbalock, iflags); 7806 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 7807 spin_unlock_irqrestore(&phba->hbalock, iflags); 7808 ret = IOCB_SUCCESS; 7809 } 7810 return ret; 7811 } 7812 7813 /** 7814 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 7815 * @vport: pointer to a virtual N_Port data structure. 7816 * 7817 * This routine aborts all the IOCBs associated with a @vport from the 7818 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7819 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7820 * list, removes each IOCB associated with the @vport off the list, set the 7821 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7822 * associated with the IOCB. 7823 **/ 7824 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 7825 { 7826 LIST_HEAD(completions); 7827 struct lpfc_hba *phba = vport->phba; 7828 struct lpfc_iocbq *tmp_iocb, *piocb; 7829 7830 spin_lock_irq(&phba->hbalock); 7831 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7832 list) { 7833 7834 if (piocb->vport != vport) 7835 continue; 7836 7837 list_move_tail(&piocb->list, &completions); 7838 } 7839 spin_unlock_irq(&phba->hbalock); 7840 7841 /* Cancel all the IOCBs from the completions list */ 7842 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7843 IOERR_SLI_ABORTED); 7844 } 7845 7846 /** 7847 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 7848 * @ndlp: pointer to a node-list data structure. 7849 * 7850 * This routine aborts all the IOCBs associated with an @ndlp from the 7851 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7852 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7853 * list, removes each IOCB associated with the @ndlp off the list, set the 7854 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7855 * associated with the IOCB. 7856 **/ 7857 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 7858 { 7859 LIST_HEAD(completions); 7860 struct lpfc_hba *phba = ndlp->phba; 7861 struct lpfc_iocbq *tmp_iocb, *piocb; 7862 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7863 7864 spin_lock_irq(&phba->hbalock); 7865 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7866 list) { 7867 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 7868 7869 list_move_tail(&piocb->list, &completions); 7870 } 7871 } 7872 spin_unlock_irq(&phba->hbalock); 7873 7874 /* Cancel all the IOCBs from the completions list */ 7875 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7876 IOERR_SLI_ABORTED); 7877 } 7878 7879 /** 7880 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 7881 * @phba: pointer to lpfc hba data structure. 7882 * 7883 * This routine aborts all the IOCBs currently on the driver internal 7884 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 7885 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 7886 * list, removes IOCBs off the list, set the status feild to 7887 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 7888 * the IOCB. 7889 **/ 7890 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 7891 { 7892 LIST_HEAD(completions); 7893 7894 spin_lock_irq(&phba->hbalock); 7895 list_splice_init(&phba->fabric_iocb_list, &completions); 7896 spin_unlock_irq(&phba->hbalock); 7897 7898 /* Cancel all the IOCBs from the completions list */ 7899 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7900 IOERR_SLI_ABORTED); 7901 } 7902 7903 /** 7904 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 7905 * @vport: pointer to lpfc vport data structure. 7906 * 7907 * This routine is invoked by the vport cleanup for deletions and the cleanup 7908 * for an ndlp on removal. 7909 **/ 7910 void 7911 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 7912 { 7913 struct lpfc_hba *phba = vport->phba; 7914 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7915 unsigned long iflag = 0; 7916 7917 spin_lock_irqsave(&phba->hbalock, iflag); 7918 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7919 list_for_each_entry_safe(sglq_entry, sglq_next, 7920 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7921 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 7922 sglq_entry->ndlp = NULL; 7923 } 7924 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7925 spin_unlock_irqrestore(&phba->hbalock, iflag); 7926 return; 7927 } 7928 7929 /** 7930 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 7931 * @phba: pointer to lpfc hba data structure. 7932 * @axri: pointer to the els xri abort wcqe structure. 7933 * 7934 * This routine is invoked by the worker thread to process a SLI4 slow-path 7935 * ELS aborted xri. 7936 **/ 7937 void 7938 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 7939 struct sli4_wcqe_xri_aborted *axri) 7940 { 7941 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7942 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7943 uint16_t lxri = 0; 7944 7945 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7946 unsigned long iflag = 0; 7947 struct lpfc_nodelist *ndlp; 7948 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7949 7950 spin_lock_irqsave(&phba->hbalock, iflag); 7951 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7952 list_for_each_entry_safe(sglq_entry, sglq_next, 7953 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7954 if (sglq_entry->sli4_xritag == xri) { 7955 list_del(&sglq_entry->list); 7956 ndlp = sglq_entry->ndlp; 7957 sglq_entry->ndlp = NULL; 7958 list_add_tail(&sglq_entry->list, 7959 &phba->sli4_hba.lpfc_sgl_list); 7960 sglq_entry->state = SGL_FREED; 7961 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7962 spin_unlock_irqrestore(&phba->hbalock, iflag); 7963 lpfc_set_rrq_active(phba, ndlp, 7964 sglq_entry->sli4_lxritag, 7965 rxid, 1); 7966 7967 /* Check if TXQ queue needs to be serviced */ 7968 if (pring->txq_cnt) 7969 lpfc_worker_wake_up(phba); 7970 return; 7971 } 7972 } 7973 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7974 lxri = lpfc_sli4_xri_inrange(phba, xri); 7975 if (lxri == NO_XRI) { 7976 spin_unlock_irqrestore(&phba->hbalock, iflag); 7977 return; 7978 } 7979 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 7980 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7981 spin_unlock_irqrestore(&phba->hbalock, iflag); 7982 return; 7983 } 7984 sglq_entry->state = SGL_XRI_ABORTED; 7985 spin_unlock_irqrestore(&phba->hbalock, iflag); 7986 return; 7987 } 7988