1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 /* See Fibre Channel protocol T11 FC-LS for details */ 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_hw4.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_sli4.h" 36 #include "lpfc_nl.h" 37 #include "lpfc_disc.h" 38 #include "lpfc_scsi.h" 39 #include "lpfc.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_crtn.h" 42 #include "lpfc_vport.h" 43 #include "lpfc_debugfs.h" 44 45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 46 struct lpfc_iocbq *); 47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 51 struct lpfc_nodelist *ndlp, uint8_t retry); 52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 53 struct lpfc_iocbq *iocb); 54 55 static int lpfc_max_els_tries = 3; 56 57 /** 58 * lpfc_els_chk_latt - Check host link attention event for a vport 59 * @vport: pointer to a host virtual N_Port data structure. 60 * 61 * This routine checks whether there is an outstanding host link 62 * attention event during the discovery process with the @vport. It is done 63 * by reading the HBA's Host Attention (HA) register. If there is any host 64 * link attention events during this @vport's discovery process, the @vport 65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 66 * be issued if the link state is not already in host link cleared state, 67 * and a return code shall indicate whether the host link attention event 68 * had happened. 69 * 70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 71 * state in LPFC_VPORT_READY, the request for checking host link attention 72 * event will be ignored and a return code shall indicate no host link 73 * attention event had happened. 74 * 75 * Return codes 76 * 0 - no host link attention event happened 77 * 1 - host link attention event happened 78 **/ 79 int 80 lpfc_els_chk_latt(struct lpfc_vport *vport) 81 { 82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 83 struct lpfc_hba *phba = vport->phba; 84 uint32_t ha_copy; 85 86 if (vport->port_state >= LPFC_VPORT_READY || 87 phba->link_state == LPFC_LINK_DOWN || 88 phba->sli_rev > LPFC_SLI_REV3) 89 return 0; 90 91 /* Read the HBA Host Attention Register */ 92 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 93 return 1; 94 95 if (!(ha_copy & HA_LATT)) 96 return 0; 97 98 /* Pending Link Event during Discovery */ 99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 100 "0237 Pending Link Event during " 101 "Discovery: State x%x\n", 102 phba->pport->port_state); 103 104 /* CLEAR_LA should re-enable link attention events and 105 * we should then immediately take a LATT event. The 106 * LATT processing should call lpfc_linkdown() which 107 * will cleanup any left over in-progress discovery 108 * events. 109 */ 110 spin_lock_irq(shost->host_lock); 111 vport->fc_flag |= FC_ABORT_DISCOVERY; 112 spin_unlock_irq(shost->host_lock); 113 114 if (phba->link_state != LPFC_CLEAR_LA) 115 lpfc_issue_clear_la(phba, vport); 116 117 return 1; 118 } 119 120 /** 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 122 * @vport: pointer to a host virtual N_Port data structure. 123 * @expectRsp: flag indicating whether response is expected. 124 * @cmdSize: size of the ELS command. 125 * @retry: number of retries to the command IOCB when it fails. 126 * @ndlp: pointer to a node-list data structure. 127 * @did: destination identifier. 128 * @elscmd: the ELS command code. 129 * 130 * This routine is used for allocating a lpfc-IOCB data structure from 131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 132 * passed into the routine for discovery state machine to issue an Extended 133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 134 * and preparation routine that is used by all the discovery state machine 135 * routines and the ELS command-specific fields will be later set up by 136 * the individual discovery machine routines after calling this routine 137 * allocating and preparing a generic IOCB data structure. It fills in the 138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 139 * payload and response payload (if expected). The reference count on the 140 * ndlp is incremented by 1 and the reference to the ndlp is put into 141 * context1 of the IOCB data structure for this IOCB to hold the ndlp 142 * reference for the command's callback function to access later. 143 * 144 * Return code 145 * Pointer to the newly allocated/prepared els iocb data structure 146 * NULL - when els iocb data structure allocation/preparation failed 147 **/ 148 struct lpfc_iocbq * 149 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150 uint16_t cmdSize, uint8_t retry, 151 struct lpfc_nodelist *ndlp, uint32_t did, 152 uint32_t elscmd) 153 { 154 struct lpfc_hba *phba = vport->phba; 155 struct lpfc_iocbq *elsiocb; 156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 157 struct ulp_bde64 *bpl; 158 IOCB_t *icmd; 159 160 161 if (!lpfc_is_link_up(phba)) 162 return NULL; 163 164 /* Allocate buffer for command iocb */ 165 elsiocb = lpfc_sli_get_iocbq(phba); 166 167 if (elsiocb == NULL) 168 return NULL; 169 170 /* 171 * If this command is for fabric controller and HBA running 172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 */ 174 if ((did == Fabric_DID) && 175 (phba->hba_flag & HBA_FIP_SUPPORT) && 176 ((elscmd == ELS_CMD_FLOGI) || 177 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_LOGO))) 179 switch (elscmd) { 180 case ELS_CMD_FLOGI: 181 elsiocb->iocb_flag |= 182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 183 & LPFC_FIP_ELS_ID_MASK); 184 break; 185 case ELS_CMD_FDISC: 186 elsiocb->iocb_flag |= 187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 188 & LPFC_FIP_ELS_ID_MASK); 189 break; 190 case ELS_CMD_LOGO: 191 elsiocb->iocb_flag |= 192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 193 & LPFC_FIP_ELS_ID_MASK); 194 break; 195 } 196 else 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 198 199 icmd = &elsiocb->iocb; 200 201 /* fill in BDEs for command */ 202 /* Allocate buffer for command payload */ 203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 204 if (pcmd) 205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 206 if (!pcmd || !pcmd->virt) 207 goto els_iocb_free_pcmb_exit; 208 209 INIT_LIST_HEAD(&pcmd->list); 210 211 /* Allocate buffer for response payload */ 212 if (expectRsp) { 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 214 if (prsp) 215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 216 &prsp->phys); 217 if (!prsp || !prsp->virt) 218 goto els_iocb_free_prsp_exit; 219 INIT_LIST_HEAD(&prsp->list); 220 } else 221 prsp = NULL; 222 223 /* Allocate buffer for Buffer ptr list */ 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 225 if (pbuflist) 226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 227 &pbuflist->phys); 228 if (!pbuflist || !pbuflist->virt) 229 goto els_iocb_free_pbuf_exit; 230 231 INIT_LIST_HEAD(&pbuflist->list); 232 233 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 234 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 235 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 236 icmd->un.elsreq64.remoteID = did; /* DID */ 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 239 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 240 icmd->ulpTimeout = phba->fc_ratov * 2; 241 } else { 242 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 243 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 244 } 245 icmd->ulpBdeCount = 1; 246 icmd->ulpLe = 1; 247 icmd->ulpClass = CLASS3; 248 249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 250 icmd->un.elsreq64.myID = vport->fc_myDID; 251 252 /* For ELS_REQUEST64_CR, use the VPI by default */ 253 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 254 icmd->ulpCt_h = 0; 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 256 if (elscmd == ELS_CMD_ECHO) 257 icmd->ulpCt_l = 0; /* context = invalid RPI */ 258 else 259 icmd->ulpCt_l = 1; /* context = VPI */ 260 } 261 262 bpl = (struct ulp_bde64 *) pbuflist->virt; 263 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 264 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 265 bpl->tus.f.bdeSize = cmdSize; 266 bpl->tus.f.bdeFlags = 0; 267 bpl->tus.w = le32_to_cpu(bpl->tus.w); 268 269 if (expectRsp) { 270 bpl++; 271 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 272 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 273 bpl->tus.f.bdeSize = FCELSSIZE; 274 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 275 bpl->tus.w = le32_to_cpu(bpl->tus.w); 276 } 277 278 /* prevent preparing iocb with NULL ndlp reference */ 279 elsiocb->context1 = lpfc_nlp_get(ndlp); 280 if (!elsiocb->context1) 281 goto els_iocb_free_pbuf_exit; 282 elsiocb->context2 = pcmd; 283 elsiocb->context3 = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) { 289 list_add(&prsp->list, &pcmd->list); 290 } 291 if (expectRsp) { 292 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 294 "0116 Xmit ELS command x%x to remote " 295 "NPORT x%x I/O tag: x%x, port state: x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state); 298 } else { 299 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 300 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 301 "0117 Xmit ELS response x%x to remote " 302 "NPORT x%x I/O tag: x%x, size: x%x\n", 303 elscmd, ndlp->nlp_DID, elsiocb->iotag, 304 cmdSize); 305 } 306 return elsiocb; 307 308 els_iocb_free_pbuf_exit: 309 if (expectRsp) 310 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 311 kfree(pbuflist); 312 313 els_iocb_free_prsp_exit: 314 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 315 kfree(prsp); 316 317 els_iocb_free_pcmb_exit: 318 kfree(pcmd); 319 lpfc_sli_release_iocbq(phba, elsiocb); 320 return NULL; 321 } 322 323 /** 324 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 325 * @vport: pointer to a host virtual N_Port data structure. 326 * 327 * This routine issues a fabric registration login for a @vport. An 328 * active ndlp node with Fabric_DID must already exist for this @vport. 329 * The routine invokes two mailbox commands to carry out fabric registration 330 * login through the HBA firmware: the first mailbox command requests the 331 * HBA to perform link configuration for the @vport; and the second mailbox 332 * command requests the HBA to perform the actual fabric registration login 333 * with the @vport. 334 * 335 * Return code 336 * 0 - successfully issued fabric registration login for @vport 337 * -ENXIO -- failed to issue fabric registration login for @vport 338 **/ 339 int 340 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 341 { 342 struct lpfc_hba *phba = vport->phba; 343 LPFC_MBOXQ_t *mbox; 344 struct lpfc_dmabuf *mp; 345 struct lpfc_nodelist *ndlp; 346 struct serv_parm *sp; 347 int rc; 348 int err = 0; 349 350 sp = &phba->fc_fabparam; 351 ndlp = lpfc_findnode_did(vport, Fabric_DID); 352 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 353 err = 1; 354 goto fail; 355 } 356 357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 358 if (!mbox) { 359 err = 2; 360 goto fail; 361 } 362 363 vport->port_state = LPFC_FABRIC_CFG_LINK; 364 lpfc_config_link(phba, mbox); 365 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 366 mbox->vport = vport; 367 368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 369 if (rc == MBX_NOT_FINISHED) { 370 err = 3; 371 goto fail_free_mbox; 372 } 373 374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 375 if (!mbox) { 376 err = 4; 377 goto fail; 378 } 379 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 380 ndlp->nlp_rpi); 381 if (rc) { 382 err = 5; 383 goto fail_free_mbox; 384 } 385 386 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 387 mbox->vport = vport; 388 /* increment the reference count on ndlp to hold reference 389 * for the callback routine. 390 */ 391 mbox->context2 = lpfc_nlp_get(ndlp); 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 6; 396 goto fail_issue_reg_login; 397 } 398 399 return 0; 400 401 fail_issue_reg_login: 402 /* decrement the reference count on ndlp just incremented 403 * for the failed mbox command. 404 */ 405 lpfc_nlp_put(ndlp); 406 mp = (struct lpfc_dmabuf *) mbox->context1; 407 lpfc_mbuf_free(phba, mp->virt, mp->phys); 408 kfree(mp); 409 fail_free_mbox: 410 mempool_free(mbox, phba->mbox_mem_pool); 411 412 fail: 413 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 414 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 415 "0249 Cannot issue Register Fabric login: Err %d\n", err); 416 return -ENXIO; 417 } 418 419 /** 420 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 421 * @vport: pointer to a host virtual N_Port data structure. 422 * 423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 424 * the @vport. This mailbox command is necessary for SLI4 port only. 425 * 426 * Return code 427 * 0 - successfully issued REG_VFI for @vport 428 * A failure code otherwise. 429 **/ 430 int 431 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 432 { 433 struct lpfc_hba *phba = vport->phba; 434 LPFC_MBOXQ_t *mboxq; 435 struct lpfc_nodelist *ndlp; 436 struct serv_parm *sp; 437 struct lpfc_dmabuf *dmabuf; 438 int rc = 0; 439 440 sp = &phba->fc_fabparam; 441 /* move forward in case of SLI4 FC port loopback test */ 442 if ((phba->sli_rev == LPFC_SLI_REV4) && 443 !(phba->link_flag & LS_LOOPBACK_MODE)) { 444 ndlp = lpfc_findnode_did(vport, Fabric_DID); 445 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 446 rc = -ENODEV; 447 goto fail; 448 } 449 } 450 451 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 452 if (!dmabuf) { 453 rc = -ENOMEM; 454 goto fail; 455 } 456 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 457 if (!dmabuf->virt) { 458 rc = -ENOMEM; 459 goto fail_free_dmabuf; 460 } 461 462 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 463 if (!mboxq) { 464 rc = -ENOMEM; 465 goto fail_free_coherent; 466 } 467 vport->port_state = LPFC_FABRIC_CFG_LINK; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 469 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 470 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 471 mboxq->vport = vport; 472 mboxq->context1 = dmabuf; 473 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 474 if (rc == MBX_NOT_FINISHED) { 475 rc = -ENXIO; 476 goto fail_free_mbox; 477 } 478 return 0; 479 480 fail_free_mbox: 481 mempool_free(mboxq, phba->mbox_mem_pool); 482 fail_free_coherent: 483 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 484 fail_free_dmabuf: 485 kfree(dmabuf); 486 fail: 487 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 488 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 489 "0289 Issue Register VFI failed: Err %d\n", rc); 490 return rc; 491 } 492 493 /** 494 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 495 * @vport: pointer to a host virtual N_Port data structure. 496 * 497 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 498 * the @vport. This mailbox command is necessary for SLI4 port only. 499 * 500 * Return code 501 * 0 - successfully issued REG_VFI for @vport 502 * A failure code otherwise. 503 **/ 504 int 505 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 506 { 507 struct lpfc_hba *phba = vport->phba; 508 struct Scsi_Host *shost; 509 LPFC_MBOXQ_t *mboxq; 510 int rc; 511 512 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 513 if (!mboxq) { 514 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 515 "2556 UNREG_VFI mbox allocation failed" 516 "HBA state x%x\n", phba->pport->port_state); 517 return -ENOMEM; 518 } 519 520 lpfc_unreg_vfi(mboxq, vport); 521 mboxq->vport = vport; 522 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 523 524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 525 if (rc == MBX_NOT_FINISHED) { 526 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 527 "2557 UNREG_VFI issue mbox failed rc x%x " 528 "HBA state x%x\n", 529 rc, phba->pport->port_state); 530 mempool_free(mboxq, phba->mbox_mem_pool); 531 return -EIO; 532 } 533 534 shost = lpfc_shost_from_vport(vport); 535 spin_lock_irq(shost->host_lock); 536 vport->fc_flag &= ~FC_VFI_REGISTERED; 537 spin_unlock_irq(shost->host_lock); 538 return 0; 539 } 540 541 /** 542 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 543 * @vport: pointer to a host virtual N_Port data structure. 544 * @sp: pointer to service parameter data structure. 545 * 546 * This routine is called from FLOGI/FDISC completion handler functions. 547 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 548 * node nodename is changed in the completion service parameter else return 549 * 0. This function also set flag in the vport data structure to delay 550 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 551 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 552 * node nodename is changed in the completion service parameter. 553 * 554 * Return code 555 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 556 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 557 * 558 **/ 559 static uint8_t 560 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 561 struct serv_parm *sp) 562 { 563 uint8_t fabric_param_changed = 0; 564 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 565 566 if ((vport->fc_prevDID != vport->fc_myDID) || 567 memcmp(&vport->fabric_portname, &sp->portName, 568 sizeof(struct lpfc_name)) || 569 memcmp(&vport->fabric_nodename, &sp->nodeName, 570 sizeof(struct lpfc_name))) 571 fabric_param_changed = 1; 572 573 /* 574 * Word 1 Bit 31 in common service parameter is overloaded. 575 * Word 1 Bit 31 in FLOGI request is multiple NPort request 576 * Word 1 Bit 31 in FLOGI response is clean address bit 577 * 578 * If fabric parameter is changed and clean address bit is 579 * cleared delay nport discovery if 580 * - vport->fc_prevDID != 0 (not initial discovery) OR 581 * - lpfc_delay_discovery module parameter is set. 582 */ 583 if (fabric_param_changed && !sp->cmn.clean_address_bit && 584 (vport->fc_prevDID || lpfc_delay_discovery)) { 585 spin_lock_irq(shost->host_lock); 586 vport->fc_flag |= FC_DISC_DELAYED; 587 spin_unlock_irq(shost->host_lock); 588 } 589 590 return fabric_param_changed; 591 } 592 593 594 /** 595 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 596 * @vport: pointer to a host virtual N_Port data structure. 597 * @ndlp: pointer to a node-list data structure. 598 * @sp: pointer to service parameter data structure. 599 * @irsp: pointer to the IOCB within the lpfc response IOCB. 600 * 601 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 602 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 603 * port in a fabric topology. It properly sets up the parameters to the @ndlp 604 * from the IOCB response. It also check the newly assigned N_Port ID to the 605 * @vport against the previously assigned N_Port ID. If it is different from 606 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 607 * is invoked on all the remaining nodes with the @vport to unregister the 608 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 609 * is invoked to register login to the fabric. 610 * 611 * Return code 612 * 0 - Success (currently, always return 0) 613 **/ 614 static int 615 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 616 struct serv_parm *sp, IOCB_t *irsp) 617 { 618 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 619 struct lpfc_hba *phba = vport->phba; 620 struct lpfc_nodelist *np; 621 struct lpfc_nodelist *next_np; 622 uint8_t fabric_param_changed; 623 624 spin_lock_irq(shost->host_lock); 625 vport->fc_flag |= FC_FABRIC; 626 spin_unlock_irq(shost->host_lock); 627 628 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 629 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 630 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 631 632 phba->fc_edtovResol = sp->cmn.edtovResolution; 633 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 634 635 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 636 spin_lock_irq(shost->host_lock); 637 vport->fc_flag |= FC_PUBLIC_LOOP; 638 spin_unlock_irq(shost->host_lock); 639 } 640 641 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 642 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 643 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 644 ndlp->nlp_class_sup = 0; 645 if (sp->cls1.classValid) 646 ndlp->nlp_class_sup |= FC_COS_CLASS1; 647 if (sp->cls2.classValid) 648 ndlp->nlp_class_sup |= FC_COS_CLASS2; 649 if (sp->cls3.classValid) 650 ndlp->nlp_class_sup |= FC_COS_CLASS3; 651 if (sp->cls4.classValid) 652 ndlp->nlp_class_sup |= FC_COS_CLASS4; 653 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 654 sp->cmn.bbRcvSizeLsb; 655 656 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 657 memcpy(&vport->fabric_portname, &sp->portName, 658 sizeof(struct lpfc_name)); 659 memcpy(&vport->fabric_nodename, &sp->nodeName, 660 sizeof(struct lpfc_name)); 661 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 662 663 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 664 if (sp->cmn.response_multiple_NPort) { 665 lpfc_printf_vlog(vport, KERN_WARNING, 666 LOG_ELS | LOG_VPORT, 667 "1816 FLOGI NPIV supported, " 668 "response data 0x%x\n", 669 sp->cmn.response_multiple_NPort); 670 spin_lock_irq(&phba->hbalock); 671 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 672 spin_unlock_irq(&phba->hbalock); 673 } else { 674 /* Because we asked f/w for NPIV it still expects us 675 to call reg_vnpid atleast for the physcial host */ 676 lpfc_printf_vlog(vport, KERN_WARNING, 677 LOG_ELS | LOG_VPORT, 678 "1817 Fabric does not support NPIV " 679 "- configuring single port mode.\n"); 680 spin_lock_irq(&phba->hbalock); 681 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 682 spin_unlock_irq(&phba->hbalock); 683 } 684 } 685 686 if (fabric_param_changed && 687 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 688 689 /* If our NportID changed, we need to ensure all 690 * remaining NPORTs get unreg_login'ed. 691 */ 692 list_for_each_entry_safe(np, next_np, 693 &vport->fc_nodes, nlp_listp) { 694 if (!NLP_CHK_NODE_ACT(np)) 695 continue; 696 if ((np->nlp_state != NLP_STE_NPR_NODE) || 697 !(np->nlp_flag & NLP_NPR_ADISC)) 698 continue; 699 spin_lock_irq(shost->host_lock); 700 np->nlp_flag &= ~NLP_NPR_ADISC; 701 spin_unlock_irq(shost->host_lock); 702 lpfc_unreg_rpi(vport, np); 703 } 704 lpfc_cleanup_pending_mbox(vport); 705 706 if (phba->sli_rev == LPFC_SLI_REV4) { 707 lpfc_sli4_unreg_all_rpis(vport); 708 lpfc_mbx_unreg_vpi(vport); 709 spin_lock_irq(shost->host_lock); 710 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 711 /* 712 * If VPI is unreged, driver need to do INIT_VPI 713 * before re-registering 714 */ 715 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 716 spin_unlock_irq(shost->host_lock); 717 } 718 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 719 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 720 /* 721 * Driver needs to re-reg VPI in order for f/w 722 * to update the MAC address. 723 */ 724 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 725 lpfc_register_new_vport(phba, vport, ndlp); 726 return 0; 727 } 728 729 if (phba->sli_rev < LPFC_SLI_REV4) { 730 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 732 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 733 lpfc_register_new_vport(phba, vport, ndlp); 734 else 735 lpfc_issue_fabric_reglogin(vport); 736 } else { 737 ndlp->nlp_type |= NLP_FABRIC; 738 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 739 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 740 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 741 lpfc_start_fdiscs(phba); 742 lpfc_do_scr_ns_plogi(phba, vport); 743 } else if (vport->fc_flag & FC_VFI_REGISTERED) 744 lpfc_issue_init_vpi(vport); 745 else { 746 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 747 "3135 Need register VFI: (x%x/%x)\n", 748 vport->fc_prevDID, vport->fc_myDID); 749 lpfc_issue_reg_vfi(vport); 750 } 751 } 752 return 0; 753 } 754 755 /** 756 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 757 * @vport: pointer to a host virtual N_Port data structure. 758 * @ndlp: pointer to a node-list data structure. 759 * @sp: pointer to service parameter data structure. 760 * 761 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 762 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 763 * in a point-to-point topology. First, the @vport's N_Port Name is compared 764 * with the received N_Port Name: if the @vport's N_Port Name is greater than 765 * the received N_Port Name lexicographically, this node shall assign local 766 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 767 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 768 * this node shall just wait for the remote node to issue PLOGI and assign 769 * N_Port IDs. 770 * 771 * Return code 772 * 0 - Success 773 * -ENXIO - Fail 774 **/ 775 static int 776 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 777 struct serv_parm *sp) 778 { 779 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 780 struct lpfc_hba *phba = vport->phba; 781 LPFC_MBOXQ_t *mbox; 782 int rc; 783 784 spin_lock_irq(shost->host_lock); 785 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 786 spin_unlock_irq(shost->host_lock); 787 788 phba->fc_edtov = FF_DEF_EDTOV; 789 phba->fc_ratov = FF_DEF_RATOV; 790 rc = memcmp(&vport->fc_portname, &sp->portName, 791 sizeof(vport->fc_portname)); 792 if (rc >= 0) { 793 /* This side will initiate the PLOGI */ 794 spin_lock_irq(shost->host_lock); 795 vport->fc_flag |= FC_PT2PT_PLOGI; 796 spin_unlock_irq(shost->host_lock); 797 798 /* 799 * N_Port ID cannot be 0, set our to LocalID the other 800 * side will be RemoteID. 801 */ 802 803 /* not equal */ 804 if (rc) 805 vport->fc_myDID = PT2PT_LocalID; 806 807 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 808 if (!mbox) 809 goto fail; 810 811 lpfc_config_link(phba, mbox); 812 813 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 814 mbox->vport = vport; 815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 816 if (rc == MBX_NOT_FINISHED) { 817 mempool_free(mbox, phba->mbox_mem_pool); 818 goto fail; 819 } 820 /* Decrement ndlp reference count indicating that ndlp can be 821 * safely released when other references to it are done. 822 */ 823 lpfc_nlp_put(ndlp); 824 825 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 826 if (!ndlp) { 827 /* 828 * Cannot find existing Fabric ndlp, so allocate a 829 * new one 830 */ 831 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 832 if (!ndlp) 833 goto fail; 834 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 835 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 836 ndlp = lpfc_enable_node(vport, ndlp, 837 NLP_STE_UNUSED_NODE); 838 if(!ndlp) 839 goto fail; 840 } 841 842 memcpy(&ndlp->nlp_portname, &sp->portName, 843 sizeof(struct lpfc_name)); 844 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 845 sizeof(struct lpfc_name)); 846 /* Set state will put ndlp onto node list if not already done */ 847 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 848 spin_lock_irq(shost->host_lock); 849 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 850 spin_unlock_irq(shost->host_lock); 851 } else 852 /* This side will wait for the PLOGI, decrement ndlp reference 853 * count indicating that ndlp can be released when other 854 * references to it are done. 855 */ 856 lpfc_nlp_put(ndlp); 857 858 /* If we are pt2pt with another NPort, force NPIV off! */ 859 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 860 861 spin_lock_irq(shost->host_lock); 862 vport->fc_flag |= FC_PT2PT; 863 spin_unlock_irq(shost->host_lock); 864 865 /* Start discovery - this should just do CLEAR_LA */ 866 lpfc_disc_start(vport); 867 return 0; 868 fail: 869 return -ENXIO; 870 } 871 872 /** 873 * lpfc_cmpl_els_flogi - Completion callback function for flogi 874 * @phba: pointer to lpfc hba data structure. 875 * @cmdiocb: pointer to lpfc command iocb data structure. 876 * @rspiocb: pointer to lpfc response iocb data structure. 877 * 878 * This routine is the top-level completion callback function for issuing 879 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 880 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 881 * retry has been made (either immediately or delayed with lpfc_els_retry() 882 * returning 1), the command IOCB will be released and function returned. 883 * If the retry attempt has been given up (possibly reach the maximum 884 * number of retries), one additional decrement of ndlp reference shall be 885 * invoked before going out after releasing the command IOCB. This will 886 * actually release the remote node (Note, lpfc_els_free_iocb() will also 887 * invoke one decrement of ndlp reference count). If no error reported in 888 * the IOCB status, the command Port ID field is used to determine whether 889 * this is a point-to-point topology or a fabric topology: if the Port ID 890 * field is assigned, it is a fabric topology; otherwise, it is a 891 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 892 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 893 * specific topology completion conditions. 894 **/ 895 static void 896 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 897 struct lpfc_iocbq *rspiocb) 898 { 899 struct lpfc_vport *vport = cmdiocb->vport; 900 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 901 IOCB_t *irsp = &rspiocb->iocb; 902 struct lpfc_nodelist *ndlp = cmdiocb->context1; 903 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 904 struct serv_parm *sp; 905 uint16_t fcf_index; 906 int rc; 907 908 /* Check to see if link went down during discovery */ 909 if (lpfc_els_chk_latt(vport)) { 910 /* One additional decrement on node reference count to 911 * trigger the release of the node 912 */ 913 lpfc_nlp_put(ndlp); 914 goto out; 915 } 916 917 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 918 "FLOGI cmpl: status:x%x/x%x state:x%x", 919 irsp->ulpStatus, irsp->un.ulpWord[4], 920 vport->port_state); 921 922 if (irsp->ulpStatus) { 923 /* 924 * In case of FIP mode, perform roundrobin FCF failover 925 * due to new FCF discovery 926 */ 927 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 928 (phba->fcf.fcf_flag & FCF_DISCOVERY) && 929 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 930 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { 931 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 932 "2611 FLOGI failed on FCF (x%x), " 933 "status:x%x/x%x, tmo:x%x, perform " 934 "roundrobin FCF failover\n", 935 phba->fcf.current_rec.fcf_indx, 936 irsp->ulpStatus, irsp->un.ulpWord[4], 937 irsp->ulpTimeout); 938 lpfc_sli4_set_fcf_flogi_fail(phba, 939 phba->fcf.current_rec.fcf_indx); 940 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 941 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 942 if (rc) 943 goto out; 944 } 945 946 /* FLOGI failure */ 947 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 948 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", 949 irsp->ulpStatus, irsp->un.ulpWord[4], 950 irsp->ulpTimeout); 951 952 /* Check for retry */ 953 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 954 goto out; 955 956 /* FLOGI failure */ 957 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 958 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 959 irsp->ulpStatus, irsp->un.ulpWord[4], 960 irsp->ulpTimeout); 961 962 /* FLOGI failed, so there is no fabric */ 963 spin_lock_irq(shost->host_lock); 964 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 965 spin_unlock_irq(shost->host_lock); 966 967 /* If private loop, then allow max outstanding els to be 968 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 969 * alpa map would take too long otherwise. 970 */ 971 if (phba->alpa_map[0] == 0) 972 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 973 if ((phba->sli_rev == LPFC_SLI_REV4) && 974 (!(vport->fc_flag & FC_VFI_REGISTERED) || 975 (vport->fc_prevDID != vport->fc_myDID))) { 976 if (vport->fc_flag & FC_VFI_REGISTERED) 977 lpfc_sli4_unreg_all_rpis(vport); 978 lpfc_issue_reg_vfi(vport); 979 lpfc_nlp_put(ndlp); 980 goto out; 981 } 982 goto flogifail; 983 } 984 spin_lock_irq(shost->host_lock); 985 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 986 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 987 spin_unlock_irq(shost->host_lock); 988 989 /* 990 * The FLogI succeeded. Sync the data for the CPU before 991 * accessing it. 992 */ 993 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 994 995 sp = prsp->virt + sizeof(uint32_t); 996 997 /* FLOGI completes successfully */ 998 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 999 "0101 FLOGI completes successfully " 1000 "Data: x%x x%x x%x x%x\n", 1001 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1002 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1003 1004 if (vport->port_state == LPFC_FLOGI) { 1005 /* 1006 * If Common Service Parameters indicate Nport 1007 * we are point to point, if Fport we are Fabric. 1008 */ 1009 if (sp->cmn.fPort) 1010 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1011 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1012 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1013 else { 1014 lpfc_printf_vlog(vport, KERN_ERR, 1015 LOG_FIP | LOG_ELS, 1016 "2831 FLOGI response with cleared Fabric " 1017 "bit fcf_index 0x%x " 1018 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1019 "Fabric Name " 1020 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1021 phba->fcf.current_rec.fcf_indx, 1022 phba->fcf.current_rec.switch_name[0], 1023 phba->fcf.current_rec.switch_name[1], 1024 phba->fcf.current_rec.switch_name[2], 1025 phba->fcf.current_rec.switch_name[3], 1026 phba->fcf.current_rec.switch_name[4], 1027 phba->fcf.current_rec.switch_name[5], 1028 phba->fcf.current_rec.switch_name[6], 1029 phba->fcf.current_rec.switch_name[7], 1030 phba->fcf.current_rec.fabric_name[0], 1031 phba->fcf.current_rec.fabric_name[1], 1032 phba->fcf.current_rec.fabric_name[2], 1033 phba->fcf.current_rec.fabric_name[3], 1034 phba->fcf.current_rec.fabric_name[4], 1035 phba->fcf.current_rec.fabric_name[5], 1036 phba->fcf.current_rec.fabric_name[6], 1037 phba->fcf.current_rec.fabric_name[7]); 1038 lpfc_nlp_put(ndlp); 1039 spin_lock_irq(&phba->hbalock); 1040 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1041 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1042 spin_unlock_irq(&phba->hbalock); 1043 goto out; 1044 } 1045 if (!rc) { 1046 /* Mark the FCF discovery process done */ 1047 if (phba->hba_flag & HBA_FIP_SUPPORT) 1048 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1049 LOG_ELS, 1050 "2769 FLOGI to FCF (x%x) " 1051 "completed successfully\n", 1052 phba->fcf.current_rec.fcf_indx); 1053 spin_lock_irq(&phba->hbalock); 1054 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1055 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1056 spin_unlock_irq(&phba->hbalock); 1057 goto out; 1058 } 1059 } 1060 1061 flogifail: 1062 lpfc_nlp_put(ndlp); 1063 1064 if (!lpfc_error_lost_link(irsp)) { 1065 /* FLOGI failed, so just use loop map to make discovery list */ 1066 lpfc_disc_list_loopmap(vport); 1067 1068 /* Start discovery */ 1069 lpfc_disc_start(vport); 1070 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1071 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1072 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 1073 (phba->link_state != LPFC_CLEAR_LA)) { 1074 /* If FLOGI failed enable link interrupt. */ 1075 lpfc_issue_clear_la(phba, vport); 1076 } 1077 out: 1078 lpfc_els_free_iocb(phba, cmdiocb); 1079 } 1080 1081 /** 1082 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1083 * @vport: pointer to a host virtual N_Port data structure. 1084 * @ndlp: pointer to a node-list data structure. 1085 * @retry: number of retries to the command IOCB. 1086 * 1087 * This routine issues a Fabric Login (FLOGI) Request ELS command 1088 * for a @vport. The initiator service parameters are put into the payload 1089 * of the FLOGI Request IOCB and the top-level callback function pointer 1090 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1091 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1092 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1093 * 1094 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1095 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1096 * will be stored into the context1 field of the IOCB for the completion 1097 * callback function to the FLOGI ELS command. 1098 * 1099 * Return code 1100 * 0 - successfully issued flogi iocb for @vport 1101 * 1 - failed to issue flogi iocb for @vport 1102 **/ 1103 static int 1104 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1105 uint8_t retry) 1106 { 1107 struct lpfc_hba *phba = vport->phba; 1108 struct serv_parm *sp; 1109 IOCB_t *icmd; 1110 struct lpfc_iocbq *elsiocb; 1111 struct lpfc_sli_ring *pring; 1112 uint8_t *pcmd; 1113 uint16_t cmdsize; 1114 uint32_t tmo; 1115 int rc; 1116 1117 pring = &phba->sli.ring[LPFC_ELS_RING]; 1118 1119 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1120 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1121 ndlp->nlp_DID, ELS_CMD_FLOGI); 1122 1123 if (!elsiocb) 1124 return 1; 1125 1126 icmd = &elsiocb->iocb; 1127 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1128 1129 /* For FLOGI request, remainder of payload is service parameters */ 1130 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1131 pcmd += sizeof(uint32_t); 1132 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1133 sp = (struct serv_parm *) pcmd; 1134 1135 /* Setup CSPs accordingly for Fabric */ 1136 sp->cmn.e_d_tov = 0; 1137 sp->cmn.w2.r_a_tov = 0; 1138 sp->cmn.virtual_fabric_support = 0; 1139 sp->cls1.classValid = 0; 1140 sp->cls2.seqDelivery = 1; 1141 sp->cls3.seqDelivery = 1; 1142 if (sp->cmn.fcphLow < FC_PH3) 1143 sp->cmn.fcphLow = FC_PH3; 1144 if (sp->cmn.fcphHigh < FC_PH3) 1145 sp->cmn.fcphHigh = FC_PH3; 1146 1147 if (phba->sli_rev == LPFC_SLI_REV4) { 1148 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1149 LPFC_SLI_INTF_IF_TYPE_0) { 1150 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1151 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1152 /* FLOGI needs to be 3 for WQE FCFI */ 1153 /* Set the fcfi to the fcfi we registered with */ 1154 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1155 } 1156 } else { 1157 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1158 sp->cmn.request_multiple_Nport = 1; 1159 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1160 icmd->ulpCt_h = 1; 1161 icmd->ulpCt_l = 0; 1162 } else 1163 sp->cmn.request_multiple_Nport = 0; 1164 } 1165 1166 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1167 icmd->un.elsreq64.myID = 0; 1168 icmd->un.elsreq64.fl = 1; 1169 } 1170 1171 tmo = phba->fc_ratov; 1172 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1173 lpfc_set_disctmo(vport); 1174 phba->fc_ratov = tmo; 1175 1176 phba->fc_stat.elsXmitFLOGI++; 1177 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1178 1179 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1180 "Issue FLOGI: opt:x%x", 1181 phba->sli3_options, 0, 0); 1182 1183 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1184 if (rc == IOCB_ERROR) { 1185 lpfc_els_free_iocb(phba, elsiocb); 1186 return 1; 1187 } 1188 return 0; 1189 } 1190 1191 /** 1192 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1193 * @phba: pointer to lpfc hba data structure. 1194 * 1195 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1196 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1197 * list and issues an abort IOCB commond on each outstanding IOCB that 1198 * contains a active Fabric_DID ndlp. Note that this function is to issue 1199 * the abort IOCB command on all the outstanding IOCBs, thus when this 1200 * function returns, it does not guarantee all the IOCBs are actually aborted. 1201 * 1202 * Return code 1203 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1204 **/ 1205 int 1206 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1207 { 1208 struct lpfc_sli_ring *pring; 1209 struct lpfc_iocbq *iocb, *next_iocb; 1210 struct lpfc_nodelist *ndlp; 1211 IOCB_t *icmd; 1212 1213 /* Abort outstanding I/O on NPort <nlp_DID> */ 1214 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1215 "0201 Abort outstanding I/O on NPort x%x\n", 1216 Fabric_DID); 1217 1218 pring = &phba->sli.ring[LPFC_ELS_RING]; 1219 1220 /* 1221 * Check the txcmplq for an iocb that matches the nport the driver is 1222 * searching for. 1223 */ 1224 spin_lock_irq(&phba->hbalock); 1225 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1226 icmd = &iocb->iocb; 1227 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1228 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1229 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1230 (ndlp->nlp_DID == Fabric_DID)) 1231 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1232 } 1233 } 1234 spin_unlock_irq(&phba->hbalock); 1235 1236 return 0; 1237 } 1238 1239 /** 1240 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1241 * @vport: pointer to a host virtual N_Port data structure. 1242 * 1243 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1244 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1245 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1246 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1247 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1248 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1249 * @vport. 1250 * 1251 * Return code 1252 * 0 - failed to issue initial flogi for @vport 1253 * 1 - successfully issued initial flogi for @vport 1254 **/ 1255 int 1256 lpfc_initial_flogi(struct lpfc_vport *vport) 1257 { 1258 struct lpfc_hba *phba = vport->phba; 1259 struct lpfc_nodelist *ndlp; 1260 1261 vport->port_state = LPFC_FLOGI; 1262 lpfc_set_disctmo(vport); 1263 1264 /* First look for the Fabric ndlp */ 1265 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1266 if (!ndlp) { 1267 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1268 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1269 if (!ndlp) 1270 return 0; 1271 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1272 /* Set the node type */ 1273 ndlp->nlp_type |= NLP_FABRIC; 1274 /* Put ndlp onto node list */ 1275 lpfc_enqueue_node(vport, ndlp); 1276 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1277 /* re-setup ndlp without removing from node list */ 1278 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1279 if (!ndlp) 1280 return 0; 1281 } 1282 1283 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1284 /* This decrement of reference count to node shall kick off 1285 * the release of the node. 1286 */ 1287 lpfc_nlp_put(ndlp); 1288 return 0; 1289 } 1290 return 1; 1291 } 1292 1293 /** 1294 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1295 * @vport: pointer to a host virtual N_Port data structure. 1296 * 1297 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1298 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1299 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1300 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1301 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1302 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1303 * @vport. 1304 * 1305 * Return code 1306 * 0 - failed to issue initial fdisc for @vport 1307 * 1 - successfully issued initial fdisc for @vport 1308 **/ 1309 int 1310 lpfc_initial_fdisc(struct lpfc_vport *vport) 1311 { 1312 struct lpfc_hba *phba = vport->phba; 1313 struct lpfc_nodelist *ndlp; 1314 1315 /* First look for the Fabric ndlp */ 1316 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1317 if (!ndlp) { 1318 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1319 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1320 if (!ndlp) 1321 return 0; 1322 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1323 /* Put ndlp onto node list */ 1324 lpfc_enqueue_node(vport, ndlp); 1325 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1326 /* re-setup ndlp without removing from node list */ 1327 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1328 if (!ndlp) 1329 return 0; 1330 } 1331 1332 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1333 /* decrement node reference count to trigger the release of 1334 * the node. 1335 */ 1336 lpfc_nlp_put(ndlp); 1337 return 0; 1338 } 1339 return 1; 1340 } 1341 1342 /** 1343 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1344 * @vport: pointer to a host virtual N_Port data structure. 1345 * 1346 * This routine checks whether there are more remaining Port Logins 1347 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1348 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1349 * to issue ELS PLOGIs up to the configured discover threads with the 1350 * @vport (@vport->cfg_discovery_threads). The function also decrement 1351 * the @vport's num_disc_node by 1 if it is not already 0. 1352 **/ 1353 void 1354 lpfc_more_plogi(struct lpfc_vport *vport) 1355 { 1356 int sentplogi; 1357 1358 if (vport->num_disc_nodes) 1359 vport->num_disc_nodes--; 1360 1361 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1362 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1363 "0232 Continue discovery with %d PLOGIs to go " 1364 "Data: x%x x%x x%x\n", 1365 vport->num_disc_nodes, vport->fc_plogi_cnt, 1366 vport->fc_flag, vport->port_state); 1367 /* Check to see if there are more PLOGIs to be sent */ 1368 if (vport->fc_flag & FC_NLP_MORE) 1369 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1370 sentplogi = lpfc_els_disc_plogi(vport); 1371 1372 return; 1373 } 1374 1375 /** 1376 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1377 * @phba: pointer to lpfc hba data structure. 1378 * @prsp: pointer to response IOCB payload. 1379 * @ndlp: pointer to a node-list data structure. 1380 * 1381 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1382 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1383 * The following cases are considered N_Port confirmed: 1384 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1385 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1386 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1387 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1388 * 1) if there is a node on vport list other than the @ndlp with the same 1389 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1390 * on that node to release the RPI associated with the node; 2) if there is 1391 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1392 * into, a new node shall be allocated (or activated). In either case, the 1393 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1394 * be released and the new_ndlp shall be put on to the vport node list and 1395 * its pointer returned as the confirmed node. 1396 * 1397 * Note that before the @ndlp got "released", the keepDID from not-matching 1398 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1399 * of the @ndlp. This is because the release of @ndlp is actually to put it 1400 * into an inactive state on the vport node list and the vport node list 1401 * management algorithm does not allow two node with a same DID. 1402 * 1403 * Return code 1404 * pointer to the PLOGI N_Port @ndlp 1405 **/ 1406 static struct lpfc_nodelist * 1407 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1408 struct lpfc_nodelist *ndlp) 1409 { 1410 struct lpfc_vport *vport = ndlp->vport; 1411 struct lpfc_nodelist *new_ndlp; 1412 struct lpfc_rport_data *rdata; 1413 struct fc_rport *rport; 1414 struct serv_parm *sp; 1415 uint8_t name[sizeof(struct lpfc_name)]; 1416 uint32_t rc, keepDID = 0; 1417 int put_node; 1418 int put_rport; 1419 struct lpfc_node_rrqs rrq; 1420 1421 /* Fabric nodes can have the same WWPN so we don't bother searching 1422 * by WWPN. Just return the ndlp that was given to us. 1423 */ 1424 if (ndlp->nlp_type & NLP_FABRIC) 1425 return ndlp; 1426 1427 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1428 memset(name, 0, sizeof(struct lpfc_name)); 1429 1430 /* Now we find out if the NPort we are logging into, matches the WWPN 1431 * we have for that ndlp. If not, we have some work to do. 1432 */ 1433 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1434 1435 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1436 return ndlp; 1437 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1438 1439 if (!new_ndlp) { 1440 rc = memcmp(&ndlp->nlp_portname, name, 1441 sizeof(struct lpfc_name)); 1442 if (!rc) 1443 return ndlp; 1444 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 1445 if (!new_ndlp) 1446 return ndlp; 1447 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 1448 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1449 rc = memcmp(&ndlp->nlp_portname, name, 1450 sizeof(struct lpfc_name)); 1451 if (!rc) 1452 return ndlp; 1453 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1454 NLP_STE_UNUSED_NODE); 1455 if (!new_ndlp) 1456 return ndlp; 1457 keepDID = new_ndlp->nlp_DID; 1458 if (phba->sli_rev == LPFC_SLI_REV4) 1459 memcpy(&rrq.xri_bitmap, 1460 &new_ndlp->active_rrqs.xri_bitmap, 1461 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1462 } else { 1463 keepDID = new_ndlp->nlp_DID; 1464 if (phba->sli_rev == LPFC_SLI_REV4) 1465 memcpy(&rrq.xri_bitmap, 1466 &new_ndlp->active_rrqs.xri_bitmap, 1467 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1468 } 1469 1470 lpfc_unreg_rpi(vport, new_ndlp); 1471 new_ndlp->nlp_DID = ndlp->nlp_DID; 1472 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1473 if (phba->sli_rev == LPFC_SLI_REV4) 1474 memcpy(new_ndlp->active_rrqs.xri_bitmap, 1475 &ndlp->active_rrqs.xri_bitmap, 1476 sizeof(ndlp->active_rrqs.xri_bitmap)); 1477 1478 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1479 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1480 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1481 1482 /* Set state will put new_ndlp on to node list if not already done */ 1483 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1484 1485 /* Move this back to NPR state */ 1486 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1487 /* The new_ndlp is replacing ndlp totally, so we need 1488 * to put ndlp on UNUSED list and try to free it. 1489 */ 1490 1491 /* Fix up the rport accordingly */ 1492 rport = ndlp->rport; 1493 if (rport) { 1494 rdata = rport->dd_data; 1495 if (rdata->pnode == ndlp) { 1496 lpfc_nlp_put(ndlp); 1497 ndlp->rport = NULL; 1498 rdata->pnode = lpfc_nlp_get(new_ndlp); 1499 new_ndlp->rport = rport; 1500 } 1501 new_ndlp->nlp_type = ndlp->nlp_type; 1502 } 1503 /* We shall actually free the ndlp with both nlp_DID and 1504 * nlp_portname fields equals 0 to avoid any ndlp on the 1505 * nodelist never to be used. 1506 */ 1507 if (ndlp->nlp_DID == 0) { 1508 spin_lock_irq(&phba->ndlp_lock); 1509 NLP_SET_FREE_REQ(ndlp); 1510 spin_unlock_irq(&phba->ndlp_lock); 1511 } 1512 1513 /* Two ndlps cannot have the same did on the nodelist */ 1514 ndlp->nlp_DID = keepDID; 1515 if (phba->sli_rev == LPFC_SLI_REV4) 1516 memcpy(&ndlp->active_rrqs.xri_bitmap, 1517 &rrq.xri_bitmap, 1518 sizeof(ndlp->active_rrqs.xri_bitmap)); 1519 lpfc_drop_node(vport, ndlp); 1520 } 1521 else { 1522 lpfc_unreg_rpi(vport, ndlp); 1523 /* Two ndlps cannot have the same did */ 1524 ndlp->nlp_DID = keepDID; 1525 if (phba->sli_rev == LPFC_SLI_REV4) 1526 memcpy(&ndlp->active_rrqs.xri_bitmap, 1527 &rrq.xri_bitmap, 1528 sizeof(ndlp->active_rrqs.xri_bitmap)); 1529 /* Since we are swapping the ndlp passed in with the new one 1530 * and the did has already been swapped, copy over the 1531 * state and names. 1532 */ 1533 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname, 1534 sizeof(struct lpfc_name)); 1535 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, 1536 sizeof(struct lpfc_name)); 1537 new_ndlp->nlp_state = ndlp->nlp_state; 1538 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1539 /* Fix up the rport accordingly */ 1540 rport = ndlp->rport; 1541 if (rport) { 1542 rdata = rport->dd_data; 1543 put_node = rdata->pnode != NULL; 1544 put_rport = ndlp->rport != NULL; 1545 rdata->pnode = NULL; 1546 ndlp->rport = NULL; 1547 if (put_node) 1548 lpfc_nlp_put(ndlp); 1549 if (put_rport) 1550 put_device(&rport->dev); 1551 } 1552 } 1553 return new_ndlp; 1554 } 1555 1556 /** 1557 * lpfc_end_rscn - Check and handle more rscn for a vport 1558 * @vport: pointer to a host virtual N_Port data structure. 1559 * 1560 * This routine checks whether more Registration State Change 1561 * Notifications (RSCNs) came in while the discovery state machine was in 1562 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1563 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1564 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1565 * handling the RSCNs. 1566 **/ 1567 void 1568 lpfc_end_rscn(struct lpfc_vport *vport) 1569 { 1570 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1571 1572 if (vport->fc_flag & FC_RSCN_MODE) { 1573 /* 1574 * Check to see if more RSCNs came in while we were 1575 * processing this one. 1576 */ 1577 if (vport->fc_rscn_id_cnt || 1578 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1579 lpfc_els_handle_rscn(vport); 1580 else { 1581 spin_lock_irq(shost->host_lock); 1582 vport->fc_flag &= ~FC_RSCN_MODE; 1583 spin_unlock_irq(shost->host_lock); 1584 } 1585 } 1586 } 1587 1588 /** 1589 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1590 * @phba: pointer to lpfc hba data structure. 1591 * @cmdiocb: pointer to lpfc command iocb data structure. 1592 * @rspiocb: pointer to lpfc response iocb data structure. 1593 * 1594 * This routine will call the clear rrq function to free the rrq and 1595 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1596 * exist then the clear_rrq is still called because the rrq needs to 1597 * be freed. 1598 **/ 1599 1600 static void 1601 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1602 struct lpfc_iocbq *rspiocb) 1603 { 1604 struct lpfc_vport *vport = cmdiocb->vport; 1605 IOCB_t *irsp; 1606 struct lpfc_nodelist *ndlp; 1607 struct lpfc_node_rrq *rrq; 1608 1609 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1610 rrq = cmdiocb->context_un.rrq; 1611 cmdiocb->context_un.rsp_iocb = rspiocb; 1612 1613 irsp = &rspiocb->iocb; 1614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1615 "RRQ cmpl: status:x%x/x%x did:x%x", 1616 irsp->ulpStatus, irsp->un.ulpWord[4], 1617 irsp->un.elsreq64.remoteID); 1618 1619 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1620 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1621 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1622 "2882 RRQ completes to NPort x%x " 1623 "with no ndlp. Data: x%x x%x x%x\n", 1624 irsp->un.elsreq64.remoteID, 1625 irsp->ulpStatus, irsp->un.ulpWord[4], 1626 irsp->ulpIoTag); 1627 goto out; 1628 } 1629 1630 /* rrq completes to NPort <nlp_DID> */ 1631 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1632 "2880 RRQ completes to NPort x%x " 1633 "Data: x%x x%x x%x x%x x%x\n", 1634 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1635 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1636 1637 if (irsp->ulpStatus) { 1638 /* Check for retry */ 1639 /* RRQ failed Don't print the vport to vport rjts */ 1640 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1641 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1642 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1643 (phba)->pport->cfg_log_verbose & LOG_ELS) 1644 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1645 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1646 ndlp->nlp_DID, irsp->ulpStatus, 1647 irsp->un.ulpWord[4]); 1648 } 1649 out: 1650 if (rrq) 1651 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1652 lpfc_els_free_iocb(phba, cmdiocb); 1653 return; 1654 } 1655 /** 1656 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1657 * @phba: pointer to lpfc hba data structure. 1658 * @cmdiocb: pointer to lpfc command iocb data structure. 1659 * @rspiocb: pointer to lpfc response iocb data structure. 1660 * 1661 * This routine is the completion callback function for issuing the Port 1662 * Login (PLOGI) command. For PLOGI completion, there must be an active 1663 * ndlp on the vport node list that matches the remote node ID from the 1664 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1665 * ignored and command IOCB released. The PLOGI response IOCB status is 1666 * checked for error conditons. If there is error status reported, PLOGI 1667 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1668 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1669 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1670 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1671 * there are additional N_Port nodes with the vport that need to perform 1672 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1673 * PLOGIs. 1674 **/ 1675 static void 1676 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1677 struct lpfc_iocbq *rspiocb) 1678 { 1679 struct lpfc_vport *vport = cmdiocb->vport; 1680 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1681 IOCB_t *irsp; 1682 struct lpfc_nodelist *ndlp; 1683 struct lpfc_dmabuf *prsp; 1684 int disc, rc, did, type; 1685 1686 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1687 cmdiocb->context_un.rsp_iocb = rspiocb; 1688 1689 irsp = &rspiocb->iocb; 1690 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1691 "PLOGI cmpl: status:x%x/x%x did:x%x", 1692 irsp->ulpStatus, irsp->un.ulpWord[4], 1693 irsp->un.elsreq64.remoteID); 1694 1695 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1696 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1697 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1698 "0136 PLOGI completes to NPort x%x " 1699 "with no ndlp. Data: x%x x%x x%x\n", 1700 irsp->un.elsreq64.remoteID, 1701 irsp->ulpStatus, irsp->un.ulpWord[4], 1702 irsp->ulpIoTag); 1703 goto out; 1704 } 1705 1706 /* Since ndlp can be freed in the disc state machine, note if this node 1707 * is being used during discovery. 1708 */ 1709 spin_lock_irq(shost->host_lock); 1710 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1711 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1712 spin_unlock_irq(shost->host_lock); 1713 rc = 0; 1714 1715 /* PLOGI completes to NPort <nlp_DID> */ 1716 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1717 "0102 PLOGI completes to NPort x%x " 1718 "Data: x%x x%x x%x x%x x%x\n", 1719 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1720 irsp->ulpTimeout, disc, vport->num_disc_nodes); 1721 /* Check to see if link went down during discovery */ 1722 if (lpfc_els_chk_latt(vport)) { 1723 spin_lock_irq(shost->host_lock); 1724 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1725 spin_unlock_irq(shost->host_lock); 1726 goto out; 1727 } 1728 1729 /* ndlp could be freed in DSM, save these values now */ 1730 type = ndlp->nlp_type; 1731 did = ndlp->nlp_DID; 1732 1733 if (irsp->ulpStatus) { 1734 /* Check for retry */ 1735 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1736 /* ELS command is being retried */ 1737 if (disc) { 1738 spin_lock_irq(shost->host_lock); 1739 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1740 spin_unlock_irq(shost->host_lock); 1741 } 1742 goto out; 1743 } 1744 /* PLOGI failed Don't print the vport to vport rjts */ 1745 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1746 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1747 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1748 (phba)->pport->cfg_log_verbose & LOG_ELS) 1749 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1750 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1751 ndlp->nlp_DID, irsp->ulpStatus, 1752 irsp->un.ulpWord[4]); 1753 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1754 if (lpfc_error_lost_link(irsp)) 1755 rc = NLP_STE_FREED_NODE; 1756 else 1757 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1758 NLP_EVT_CMPL_PLOGI); 1759 } else { 1760 /* Good status, call state machine */ 1761 prsp = list_entry(((struct lpfc_dmabuf *) 1762 cmdiocb->context2)->list.next, 1763 struct lpfc_dmabuf, list); 1764 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 1765 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1766 NLP_EVT_CMPL_PLOGI); 1767 } 1768 1769 if (disc && vport->num_disc_nodes) { 1770 /* Check to see if there are more PLOGIs to be sent */ 1771 lpfc_more_plogi(vport); 1772 1773 if (vport->num_disc_nodes == 0) { 1774 spin_lock_irq(shost->host_lock); 1775 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1776 spin_unlock_irq(shost->host_lock); 1777 1778 lpfc_can_disctmo(vport); 1779 lpfc_end_rscn(vport); 1780 } 1781 } 1782 1783 out: 1784 lpfc_els_free_iocb(phba, cmdiocb); 1785 return; 1786 } 1787 1788 /** 1789 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 1790 * @vport: pointer to a host virtual N_Port data structure. 1791 * @did: destination port identifier. 1792 * @retry: number of retries to the command IOCB. 1793 * 1794 * This routine issues a Port Login (PLOGI) command to a remote N_Port 1795 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 1796 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 1797 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 1798 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 1799 * 1800 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1801 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1802 * will be stored into the context1 field of the IOCB for the completion 1803 * callback function to the PLOGI ELS command. 1804 * 1805 * Return code 1806 * 0 - Successfully issued a plogi for @vport 1807 * 1 - failed to issue a plogi for @vport 1808 **/ 1809 int 1810 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1811 { 1812 struct lpfc_hba *phba = vport->phba; 1813 struct serv_parm *sp; 1814 IOCB_t *icmd; 1815 struct lpfc_nodelist *ndlp; 1816 struct lpfc_iocbq *elsiocb; 1817 struct lpfc_sli *psli; 1818 uint8_t *pcmd; 1819 uint16_t cmdsize; 1820 int ret; 1821 1822 psli = &phba->sli; 1823 1824 ndlp = lpfc_findnode_did(vport, did); 1825 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1826 ndlp = NULL; 1827 1828 /* If ndlp is not NULL, we will bump the reference count on it */ 1829 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1830 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1831 ELS_CMD_PLOGI); 1832 if (!elsiocb) 1833 return 1; 1834 1835 icmd = &elsiocb->iocb; 1836 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1837 1838 /* For PLOGI request, remainder of payload is service parameters */ 1839 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 1840 pcmd += sizeof(uint32_t); 1841 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1842 sp = (struct serv_parm *) pcmd; 1843 1844 /* 1845 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 1846 * to device on remote loops work. 1847 */ 1848 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 1849 sp->cmn.altBbCredit = 1; 1850 1851 if (sp->cmn.fcphLow < FC_PH_4_3) 1852 sp->cmn.fcphLow = FC_PH_4_3; 1853 1854 if (sp->cmn.fcphHigh < FC_PH3) 1855 sp->cmn.fcphHigh = FC_PH3; 1856 1857 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1858 "Issue PLOGI: did:x%x", 1859 did, 0, 0); 1860 1861 phba->fc_stat.elsXmitPLOGI++; 1862 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1863 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 1864 1865 if (ret == IOCB_ERROR) { 1866 lpfc_els_free_iocb(phba, elsiocb); 1867 return 1; 1868 } 1869 return 0; 1870 } 1871 1872 /** 1873 * lpfc_cmpl_els_prli - Completion callback function for prli 1874 * @phba: pointer to lpfc hba data structure. 1875 * @cmdiocb: pointer to lpfc command iocb data structure. 1876 * @rspiocb: pointer to lpfc response iocb data structure. 1877 * 1878 * This routine is the completion callback function for a Process Login 1879 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 1880 * status. If there is error status reported, PRLI retry shall be attempted 1881 * by invoking the lpfc_els_retry() routine. Otherwise, the state 1882 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 1883 * ndlp to mark the PRLI completion. 1884 **/ 1885 static void 1886 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1887 struct lpfc_iocbq *rspiocb) 1888 { 1889 struct lpfc_vport *vport = cmdiocb->vport; 1890 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1891 IOCB_t *irsp; 1892 struct lpfc_sli *psli; 1893 struct lpfc_nodelist *ndlp; 1894 1895 psli = &phba->sli; 1896 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1897 cmdiocb->context_un.rsp_iocb = rspiocb; 1898 1899 irsp = &(rspiocb->iocb); 1900 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1901 spin_lock_irq(shost->host_lock); 1902 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1903 spin_unlock_irq(shost->host_lock); 1904 1905 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1906 "PRLI cmpl: status:x%x/x%x did:x%x", 1907 irsp->ulpStatus, irsp->un.ulpWord[4], 1908 ndlp->nlp_DID); 1909 /* PRLI completes to NPort <nlp_DID> */ 1910 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1911 "0103 PRLI completes to NPort x%x " 1912 "Data: x%x x%x x%x x%x\n", 1913 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1914 irsp->ulpTimeout, vport->num_disc_nodes); 1915 1916 vport->fc_prli_sent--; 1917 /* Check to see if link went down during discovery */ 1918 if (lpfc_els_chk_latt(vport)) 1919 goto out; 1920 1921 if (irsp->ulpStatus) { 1922 /* Check for retry */ 1923 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1924 /* ELS command is being retried */ 1925 goto out; 1926 } 1927 /* PRLI failed */ 1928 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1929 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 1930 ndlp->nlp_DID, irsp->ulpStatus, 1931 irsp->un.ulpWord[4]); 1932 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1933 if (lpfc_error_lost_link(irsp)) 1934 goto out; 1935 else 1936 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1937 NLP_EVT_CMPL_PRLI); 1938 } else 1939 /* Good status, call state machine */ 1940 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1941 NLP_EVT_CMPL_PRLI); 1942 out: 1943 lpfc_els_free_iocb(phba, cmdiocb); 1944 return; 1945 } 1946 1947 /** 1948 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 1949 * @vport: pointer to a host virtual N_Port data structure. 1950 * @ndlp: pointer to a node-list data structure. 1951 * @retry: number of retries to the command IOCB. 1952 * 1953 * This routine issues a Process Login (PRLI) ELS command for the 1954 * @vport. The PRLI service parameters are set up in the payload of the 1955 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 1956 * is put to the IOCB completion callback func field before invoking the 1957 * routine lpfc_sli_issue_iocb() to send out PRLI command. 1958 * 1959 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1960 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1961 * will be stored into the context1 field of the IOCB for the completion 1962 * callback function to the PRLI ELS command. 1963 * 1964 * Return code 1965 * 0 - successfully issued prli iocb command for @vport 1966 * 1 - failed to issue prli iocb command for @vport 1967 **/ 1968 int 1969 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1970 uint8_t retry) 1971 { 1972 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1973 struct lpfc_hba *phba = vport->phba; 1974 PRLI *npr; 1975 IOCB_t *icmd; 1976 struct lpfc_iocbq *elsiocb; 1977 uint8_t *pcmd; 1978 uint16_t cmdsize; 1979 1980 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1981 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1982 ndlp->nlp_DID, ELS_CMD_PRLI); 1983 if (!elsiocb) 1984 return 1; 1985 1986 icmd = &elsiocb->iocb; 1987 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1988 1989 /* For PRLI request, remainder of payload is service parameters */ 1990 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); 1991 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1992 pcmd += sizeof(uint32_t); 1993 1994 /* For PRLI, remainder of payload is PRLI parameter page */ 1995 npr = (PRLI *) pcmd; 1996 /* 1997 * If our firmware version is 3.20 or later, 1998 * set the following bits for FC-TAPE support. 1999 */ 2000 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2001 npr->ConfmComplAllowed = 1; 2002 npr->Retry = 1; 2003 npr->TaskRetryIdReq = 1; 2004 } 2005 npr->estabImagePair = 1; 2006 npr->readXferRdyDis = 1; 2007 2008 /* For FCP support */ 2009 npr->prliType = PRLI_FCP_TYPE; 2010 npr->initiatorFunc = 1; 2011 2012 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2013 "Issue PRLI: did:x%x", 2014 ndlp->nlp_DID, 0, 0); 2015 2016 phba->fc_stat.elsXmitPRLI++; 2017 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2018 spin_lock_irq(shost->host_lock); 2019 ndlp->nlp_flag |= NLP_PRLI_SND; 2020 spin_unlock_irq(shost->host_lock); 2021 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2022 IOCB_ERROR) { 2023 spin_lock_irq(shost->host_lock); 2024 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2025 spin_unlock_irq(shost->host_lock); 2026 lpfc_els_free_iocb(phba, elsiocb); 2027 return 1; 2028 } 2029 vport->fc_prli_sent++; 2030 return 0; 2031 } 2032 2033 /** 2034 * lpfc_rscn_disc - Perform rscn discovery for a vport 2035 * @vport: pointer to a host virtual N_Port data structure. 2036 * 2037 * This routine performs Registration State Change Notification (RSCN) 2038 * discovery for a @vport. If the @vport's node port recovery count is not 2039 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2040 * the nodes that need recovery. If none of the PLOGI were needed through 2041 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2042 * invoked to check and handle possible more RSCN came in during the period 2043 * of processing the current ones. 2044 **/ 2045 static void 2046 lpfc_rscn_disc(struct lpfc_vport *vport) 2047 { 2048 lpfc_can_disctmo(vport); 2049 2050 /* RSCN discovery */ 2051 /* go thru NPR nodes and issue ELS PLOGIs */ 2052 if (vport->fc_npr_cnt) 2053 if (lpfc_els_disc_plogi(vport)) 2054 return; 2055 2056 lpfc_end_rscn(vport); 2057 } 2058 2059 /** 2060 * lpfc_adisc_done - Complete the adisc phase of discovery 2061 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2062 * 2063 * This function is called when the final ADISC is completed during discovery. 2064 * This function handles clearing link attention or issuing reg_vpi depending 2065 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2066 * discovery. 2067 * This function is called with no locks held. 2068 **/ 2069 static void 2070 lpfc_adisc_done(struct lpfc_vport *vport) 2071 { 2072 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2073 struct lpfc_hba *phba = vport->phba; 2074 2075 /* 2076 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2077 * and continue discovery. 2078 */ 2079 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2080 !(vport->fc_flag & FC_RSCN_MODE) && 2081 (phba->sli_rev < LPFC_SLI_REV4)) { 2082 lpfc_issue_reg_vpi(phba, vport); 2083 return; 2084 } 2085 /* 2086 * For SLI2, we need to set port_state to READY 2087 * and continue discovery. 2088 */ 2089 if (vport->port_state < LPFC_VPORT_READY) { 2090 /* If we get here, there is nothing to ADISC */ 2091 if (vport->port_type == LPFC_PHYSICAL_PORT) 2092 lpfc_issue_clear_la(phba, vport); 2093 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2094 vport->num_disc_nodes = 0; 2095 /* go thru NPR list, issue ELS PLOGIs */ 2096 if (vport->fc_npr_cnt) 2097 lpfc_els_disc_plogi(vport); 2098 if (!vport->num_disc_nodes) { 2099 spin_lock_irq(shost->host_lock); 2100 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2101 spin_unlock_irq(shost->host_lock); 2102 lpfc_can_disctmo(vport); 2103 lpfc_end_rscn(vport); 2104 } 2105 } 2106 vport->port_state = LPFC_VPORT_READY; 2107 } else 2108 lpfc_rscn_disc(vport); 2109 } 2110 2111 /** 2112 * lpfc_more_adisc - Issue more adisc as needed 2113 * @vport: pointer to a host virtual N_Port data structure. 2114 * 2115 * This routine determines whether there are more ndlps on a @vport 2116 * node list need to have Address Discover (ADISC) issued. If so, it will 2117 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2118 * remaining nodes which need to have ADISC sent. 2119 **/ 2120 void 2121 lpfc_more_adisc(struct lpfc_vport *vport) 2122 { 2123 int sentadisc; 2124 2125 if (vport->num_disc_nodes) 2126 vport->num_disc_nodes--; 2127 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2128 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2129 "0210 Continue discovery with %d ADISCs to go " 2130 "Data: x%x x%x x%x\n", 2131 vport->num_disc_nodes, vport->fc_adisc_cnt, 2132 vport->fc_flag, vport->port_state); 2133 /* Check to see if there are more ADISCs to be sent */ 2134 if (vport->fc_flag & FC_NLP_MORE) { 2135 lpfc_set_disctmo(vport); 2136 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2137 sentadisc = lpfc_els_disc_adisc(vport); 2138 } 2139 if (!vport->num_disc_nodes) 2140 lpfc_adisc_done(vport); 2141 return; 2142 } 2143 2144 /** 2145 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2146 * @phba: pointer to lpfc hba data structure. 2147 * @cmdiocb: pointer to lpfc command iocb data structure. 2148 * @rspiocb: pointer to lpfc response iocb data structure. 2149 * 2150 * This routine is the completion function for issuing the Address Discover 2151 * (ADISC) command. It first checks to see whether link went down during 2152 * the discovery process. If so, the node will be marked as node port 2153 * recovery for issuing discover IOCB by the link attention handler and 2154 * exit. Otherwise, the response status is checked. If error was reported 2155 * in the response status, the ADISC command shall be retried by invoking 2156 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2157 * the response status, the state machine is invoked to set transition 2158 * with respect to NLP_EVT_CMPL_ADISC event. 2159 **/ 2160 static void 2161 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2162 struct lpfc_iocbq *rspiocb) 2163 { 2164 struct lpfc_vport *vport = cmdiocb->vport; 2165 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2166 IOCB_t *irsp; 2167 struct lpfc_nodelist *ndlp; 2168 int disc; 2169 2170 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2171 cmdiocb->context_un.rsp_iocb = rspiocb; 2172 2173 irsp = &(rspiocb->iocb); 2174 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2175 2176 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2177 "ADISC cmpl: status:x%x/x%x did:x%x", 2178 irsp->ulpStatus, irsp->un.ulpWord[4], 2179 ndlp->nlp_DID); 2180 2181 /* Since ndlp can be freed in the disc state machine, note if this node 2182 * is being used during discovery. 2183 */ 2184 spin_lock_irq(shost->host_lock); 2185 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2186 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2187 spin_unlock_irq(shost->host_lock); 2188 /* ADISC completes to NPort <nlp_DID> */ 2189 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2190 "0104 ADISC completes to NPort x%x " 2191 "Data: x%x x%x x%x x%x x%x\n", 2192 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2193 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2194 /* Check to see if link went down during discovery */ 2195 if (lpfc_els_chk_latt(vport)) { 2196 spin_lock_irq(shost->host_lock); 2197 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2198 spin_unlock_irq(shost->host_lock); 2199 goto out; 2200 } 2201 2202 if (irsp->ulpStatus) { 2203 /* Check for retry */ 2204 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2205 /* ELS command is being retried */ 2206 if (disc) { 2207 spin_lock_irq(shost->host_lock); 2208 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2209 spin_unlock_irq(shost->host_lock); 2210 lpfc_set_disctmo(vport); 2211 } 2212 goto out; 2213 } 2214 /* ADISC failed */ 2215 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2216 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2217 ndlp->nlp_DID, irsp->ulpStatus, 2218 irsp->un.ulpWord[4]); 2219 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2220 if (!lpfc_error_lost_link(irsp)) 2221 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2222 NLP_EVT_CMPL_ADISC); 2223 } else 2224 /* Good status, call state machine */ 2225 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2226 NLP_EVT_CMPL_ADISC); 2227 2228 /* Check to see if there are more ADISCs to be sent */ 2229 if (disc && vport->num_disc_nodes) 2230 lpfc_more_adisc(vport); 2231 out: 2232 lpfc_els_free_iocb(phba, cmdiocb); 2233 return; 2234 } 2235 2236 /** 2237 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2238 * @vport: pointer to a virtual N_Port data structure. 2239 * @ndlp: pointer to a node-list data structure. 2240 * @retry: number of retries to the command IOCB. 2241 * 2242 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2243 * @vport. It prepares the payload of the ADISC ELS command, updates the 2244 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2245 * to issue the ADISC ELS command. 2246 * 2247 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2248 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2249 * will be stored into the context1 field of the IOCB for the completion 2250 * callback function to the ADISC ELS command. 2251 * 2252 * Return code 2253 * 0 - successfully issued adisc 2254 * 1 - failed to issue adisc 2255 **/ 2256 int 2257 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2258 uint8_t retry) 2259 { 2260 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2261 struct lpfc_hba *phba = vport->phba; 2262 ADISC *ap; 2263 IOCB_t *icmd; 2264 struct lpfc_iocbq *elsiocb; 2265 uint8_t *pcmd; 2266 uint16_t cmdsize; 2267 2268 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2269 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2270 ndlp->nlp_DID, ELS_CMD_ADISC); 2271 if (!elsiocb) 2272 return 1; 2273 2274 icmd = &elsiocb->iocb; 2275 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2276 2277 /* For ADISC request, remainder of payload is service parameters */ 2278 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2279 pcmd += sizeof(uint32_t); 2280 2281 /* Fill in ADISC payload */ 2282 ap = (ADISC *) pcmd; 2283 ap->hardAL_PA = phba->fc_pref_ALPA; 2284 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2285 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2286 ap->DID = be32_to_cpu(vport->fc_myDID); 2287 2288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2289 "Issue ADISC: did:x%x", 2290 ndlp->nlp_DID, 0, 0); 2291 2292 phba->fc_stat.elsXmitADISC++; 2293 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2294 spin_lock_irq(shost->host_lock); 2295 ndlp->nlp_flag |= NLP_ADISC_SND; 2296 spin_unlock_irq(shost->host_lock); 2297 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2298 IOCB_ERROR) { 2299 spin_lock_irq(shost->host_lock); 2300 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2301 spin_unlock_irq(shost->host_lock); 2302 lpfc_els_free_iocb(phba, elsiocb); 2303 return 1; 2304 } 2305 return 0; 2306 } 2307 2308 /** 2309 * lpfc_cmpl_els_logo - Completion callback function for logo 2310 * @phba: pointer to lpfc hba data structure. 2311 * @cmdiocb: pointer to lpfc command iocb data structure. 2312 * @rspiocb: pointer to lpfc response iocb data structure. 2313 * 2314 * This routine is the completion function for issuing the ELS Logout (LOGO) 2315 * command. If no error status was reported from the LOGO response, the 2316 * state machine of the associated ndlp shall be invoked for transition with 2317 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2318 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2319 **/ 2320 static void 2321 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2322 struct lpfc_iocbq *rspiocb) 2323 { 2324 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2325 struct lpfc_vport *vport = ndlp->vport; 2326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2327 IOCB_t *irsp; 2328 struct lpfc_sli *psli; 2329 struct lpfcMboxq *mbox; 2330 2331 psli = &phba->sli; 2332 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2333 cmdiocb->context_un.rsp_iocb = rspiocb; 2334 2335 irsp = &(rspiocb->iocb); 2336 spin_lock_irq(shost->host_lock); 2337 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2338 spin_unlock_irq(shost->host_lock); 2339 2340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2341 "LOGO cmpl: status:x%x/x%x did:x%x", 2342 irsp->ulpStatus, irsp->un.ulpWord[4], 2343 ndlp->nlp_DID); 2344 /* LOGO completes to NPort <nlp_DID> */ 2345 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2346 "0105 LOGO completes to NPort x%x " 2347 "Data: x%x x%x x%x x%x\n", 2348 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2349 irsp->ulpTimeout, vport->num_disc_nodes); 2350 /* Check to see if link went down during discovery */ 2351 if (lpfc_els_chk_latt(vport)) 2352 goto out; 2353 2354 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2355 /* NLP_EVT_DEVICE_RM should unregister the RPI 2356 * which should abort all outstanding IOs. 2357 */ 2358 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2359 NLP_EVT_DEVICE_RM); 2360 goto out; 2361 } 2362 2363 if (irsp->ulpStatus) { 2364 /* Check for retry */ 2365 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 2366 /* ELS command is being retried */ 2367 goto out; 2368 /* LOGO failed */ 2369 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2370 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2371 ndlp->nlp_DID, irsp->ulpStatus, 2372 irsp->un.ulpWord[4]); 2373 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2374 if (lpfc_error_lost_link(irsp)) 2375 goto out; 2376 else 2377 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2378 NLP_EVT_CMPL_LOGO); 2379 } else 2380 /* Good status, call state machine. 2381 * This will unregister the rpi if needed. 2382 */ 2383 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2384 NLP_EVT_CMPL_LOGO); 2385 out: 2386 lpfc_els_free_iocb(phba, cmdiocb); 2387 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2388 if ((vport->fc_flag & FC_PT2PT) && 2389 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2390 phba->pport->fc_myDID = 0; 2391 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2392 if (mbox) { 2393 lpfc_config_link(phba, mbox); 2394 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2395 mbox->vport = vport; 2396 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2397 MBX_NOT_FINISHED) { 2398 mempool_free(mbox, phba->mbox_mem_pool); 2399 } 2400 } 2401 } 2402 return; 2403 } 2404 2405 /** 2406 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2407 * @vport: pointer to a virtual N_Port data structure. 2408 * @ndlp: pointer to a node-list data structure. 2409 * @retry: number of retries to the command IOCB. 2410 * 2411 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2412 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2413 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2414 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2415 * 2416 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2417 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2418 * will be stored into the context1 field of the IOCB for the completion 2419 * callback function to the LOGO ELS command. 2420 * 2421 * Return code 2422 * 0 - successfully issued logo 2423 * 1 - failed to issue logo 2424 **/ 2425 int 2426 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2427 uint8_t retry) 2428 { 2429 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2430 struct lpfc_hba *phba = vport->phba; 2431 IOCB_t *icmd; 2432 struct lpfc_iocbq *elsiocb; 2433 uint8_t *pcmd; 2434 uint16_t cmdsize; 2435 int rc; 2436 2437 spin_lock_irq(shost->host_lock); 2438 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2439 spin_unlock_irq(shost->host_lock); 2440 return 0; 2441 } 2442 spin_unlock_irq(shost->host_lock); 2443 2444 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2445 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2446 ndlp->nlp_DID, ELS_CMD_LOGO); 2447 if (!elsiocb) 2448 return 1; 2449 2450 icmd = &elsiocb->iocb; 2451 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2452 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2453 pcmd += sizeof(uint32_t); 2454 2455 /* Fill in LOGO payload */ 2456 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2457 pcmd += sizeof(uint32_t); 2458 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2459 2460 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2461 "Issue LOGO: did:x%x", 2462 ndlp->nlp_DID, 0, 0); 2463 2464 phba->fc_stat.elsXmitLOGO++; 2465 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2466 spin_lock_irq(shost->host_lock); 2467 ndlp->nlp_flag |= NLP_LOGO_SND; 2468 spin_unlock_irq(shost->host_lock); 2469 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2470 2471 if (rc == IOCB_ERROR) { 2472 spin_lock_irq(shost->host_lock); 2473 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2474 spin_unlock_irq(shost->host_lock); 2475 lpfc_els_free_iocb(phba, elsiocb); 2476 return 1; 2477 } 2478 return 0; 2479 } 2480 2481 /** 2482 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2483 * @phba: pointer to lpfc hba data structure. 2484 * @cmdiocb: pointer to lpfc command iocb data structure. 2485 * @rspiocb: pointer to lpfc response iocb data structure. 2486 * 2487 * This routine is a generic completion callback function for ELS commands. 2488 * Specifically, it is the callback function which does not need to perform 2489 * any command specific operations. It is currently used by the ELS command 2490 * issuing routines for the ELS State Change Request (SCR), 2491 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2492 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2493 * certain debug loggings, this callback function simply invokes the 2494 * lpfc_els_chk_latt() routine to check whether link went down during the 2495 * discovery process. 2496 **/ 2497 static void 2498 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2499 struct lpfc_iocbq *rspiocb) 2500 { 2501 struct lpfc_vport *vport = cmdiocb->vport; 2502 IOCB_t *irsp; 2503 2504 irsp = &rspiocb->iocb; 2505 2506 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2507 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2508 irsp->ulpStatus, irsp->un.ulpWord[4], 2509 irsp->un.elsreq64.remoteID); 2510 /* ELS cmd tag <ulpIoTag> completes */ 2511 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2512 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2513 irsp->ulpIoTag, irsp->ulpStatus, 2514 irsp->un.ulpWord[4], irsp->ulpTimeout); 2515 /* Check to see if link went down during discovery */ 2516 lpfc_els_chk_latt(vport); 2517 lpfc_els_free_iocb(phba, cmdiocb); 2518 return; 2519 } 2520 2521 /** 2522 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2523 * @vport: pointer to a host virtual N_Port data structure. 2524 * @nportid: N_Port identifier to the remote node. 2525 * @retry: number of retries to the command IOCB. 2526 * 2527 * This routine issues a State Change Request (SCR) to a fabric node 2528 * on a @vport. The remote node @nportid is passed into the function. It 2529 * first search the @vport node list to find the matching ndlp. If no such 2530 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 2531 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 2532 * routine is invoked to send the SCR IOCB. 2533 * 2534 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2535 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2536 * will be stored into the context1 field of the IOCB for the completion 2537 * callback function to the SCR ELS command. 2538 * 2539 * Return code 2540 * 0 - Successfully issued scr command 2541 * 1 - Failed to issue scr command 2542 **/ 2543 int 2544 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2545 { 2546 struct lpfc_hba *phba = vport->phba; 2547 IOCB_t *icmd; 2548 struct lpfc_iocbq *elsiocb; 2549 struct lpfc_sli *psli; 2550 uint8_t *pcmd; 2551 uint16_t cmdsize; 2552 struct lpfc_nodelist *ndlp; 2553 2554 psli = &phba->sli; 2555 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2556 2557 ndlp = lpfc_findnode_did(vport, nportid); 2558 if (!ndlp) { 2559 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2560 if (!ndlp) 2561 return 1; 2562 lpfc_nlp_init(vport, ndlp, nportid); 2563 lpfc_enqueue_node(vport, ndlp); 2564 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2565 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2566 if (!ndlp) 2567 return 1; 2568 } 2569 2570 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2571 ndlp->nlp_DID, ELS_CMD_SCR); 2572 2573 if (!elsiocb) { 2574 /* This will trigger the release of the node just 2575 * allocated 2576 */ 2577 lpfc_nlp_put(ndlp); 2578 return 1; 2579 } 2580 2581 icmd = &elsiocb->iocb; 2582 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2583 2584 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 2585 pcmd += sizeof(uint32_t); 2586 2587 /* For SCR, remainder of payload is SCR parameter page */ 2588 memset(pcmd, 0, sizeof(SCR)); 2589 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 2590 2591 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2592 "Issue SCR: did:x%x", 2593 ndlp->nlp_DID, 0, 0); 2594 2595 phba->fc_stat.elsXmitSCR++; 2596 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2597 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2598 IOCB_ERROR) { 2599 /* The additional lpfc_nlp_put will cause the following 2600 * lpfc_els_free_iocb routine to trigger the rlease of 2601 * the node. 2602 */ 2603 lpfc_nlp_put(ndlp); 2604 lpfc_els_free_iocb(phba, elsiocb); 2605 return 1; 2606 } 2607 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2608 * trigger the release of node. 2609 */ 2610 lpfc_nlp_put(ndlp); 2611 return 0; 2612 } 2613 2614 /** 2615 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 2616 * @vport: pointer to a host virtual N_Port data structure. 2617 * @nportid: N_Port identifier to the remote node. 2618 * @retry: number of retries to the command IOCB. 2619 * 2620 * This routine issues a Fibre Channel Address Resolution Response 2621 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 2622 * is passed into the function. It first search the @vport node list to find 2623 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 2624 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 2625 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 2626 * 2627 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2628 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2629 * will be stored into the context1 field of the IOCB for the completion 2630 * callback function to the PARPR ELS command. 2631 * 2632 * Return code 2633 * 0 - Successfully issued farpr command 2634 * 1 - Failed to issue farpr command 2635 **/ 2636 static int 2637 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2638 { 2639 struct lpfc_hba *phba = vport->phba; 2640 IOCB_t *icmd; 2641 struct lpfc_iocbq *elsiocb; 2642 struct lpfc_sli *psli; 2643 FARP *fp; 2644 uint8_t *pcmd; 2645 uint32_t *lp; 2646 uint16_t cmdsize; 2647 struct lpfc_nodelist *ondlp; 2648 struct lpfc_nodelist *ndlp; 2649 2650 psli = &phba->sli; 2651 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2652 2653 ndlp = lpfc_findnode_did(vport, nportid); 2654 if (!ndlp) { 2655 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2656 if (!ndlp) 2657 return 1; 2658 lpfc_nlp_init(vport, ndlp, nportid); 2659 lpfc_enqueue_node(vport, ndlp); 2660 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2661 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2662 if (!ndlp) 2663 return 1; 2664 } 2665 2666 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2667 ndlp->nlp_DID, ELS_CMD_RNID); 2668 if (!elsiocb) { 2669 /* This will trigger the release of the node just 2670 * allocated 2671 */ 2672 lpfc_nlp_put(ndlp); 2673 return 1; 2674 } 2675 2676 icmd = &elsiocb->iocb; 2677 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2678 2679 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 2680 pcmd += sizeof(uint32_t); 2681 2682 /* Fill in FARPR payload */ 2683 fp = (FARP *) (pcmd); 2684 memset(fp, 0, sizeof(FARP)); 2685 lp = (uint32_t *) pcmd; 2686 *lp++ = be32_to_cpu(nportid); 2687 *lp++ = be32_to_cpu(vport->fc_myDID); 2688 fp->Rflags = 0; 2689 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 2690 2691 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 2692 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2693 ondlp = lpfc_findnode_did(vport, nportid); 2694 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 2695 memcpy(&fp->OportName, &ondlp->nlp_portname, 2696 sizeof(struct lpfc_name)); 2697 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 2698 sizeof(struct lpfc_name)); 2699 } 2700 2701 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2702 "Issue FARPR: did:x%x", 2703 ndlp->nlp_DID, 0, 0); 2704 2705 phba->fc_stat.elsXmitFARPR++; 2706 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2707 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2708 IOCB_ERROR) { 2709 /* The additional lpfc_nlp_put will cause the following 2710 * lpfc_els_free_iocb routine to trigger the release of 2711 * the node. 2712 */ 2713 lpfc_nlp_put(ndlp); 2714 lpfc_els_free_iocb(phba, elsiocb); 2715 return 1; 2716 } 2717 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2718 * trigger the release of the node. 2719 */ 2720 lpfc_nlp_put(ndlp); 2721 return 0; 2722 } 2723 2724 /** 2725 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 2726 * @vport: pointer to a host virtual N_Port data structure. 2727 * @nlp: pointer to a node-list data structure. 2728 * 2729 * This routine cancels the timer with a delayed IOCB-command retry for 2730 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 2731 * removes the ELS retry event if it presents. In addition, if the 2732 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 2733 * commands are sent for the @vport's nodes that require issuing discovery 2734 * ADISC. 2735 **/ 2736 void 2737 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 2738 { 2739 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2740 struct lpfc_work_evt *evtp; 2741 2742 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 2743 return; 2744 spin_lock_irq(shost->host_lock); 2745 nlp->nlp_flag &= ~NLP_DELAY_TMO; 2746 spin_unlock_irq(shost->host_lock); 2747 del_timer_sync(&nlp->nlp_delayfunc); 2748 nlp->nlp_last_elscmd = 0; 2749 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 2750 list_del_init(&nlp->els_retry_evt.evt_listp); 2751 /* Decrement nlp reference count held for the delayed retry */ 2752 evtp = &nlp->els_retry_evt; 2753 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 2754 } 2755 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 2756 spin_lock_irq(shost->host_lock); 2757 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2758 spin_unlock_irq(shost->host_lock); 2759 if (vport->num_disc_nodes) { 2760 if (vport->port_state < LPFC_VPORT_READY) { 2761 /* Check if there are more ADISCs to be sent */ 2762 lpfc_more_adisc(vport); 2763 } else { 2764 /* Check if there are more PLOGIs to be sent */ 2765 lpfc_more_plogi(vport); 2766 if (vport->num_disc_nodes == 0) { 2767 spin_lock_irq(shost->host_lock); 2768 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2769 spin_unlock_irq(shost->host_lock); 2770 lpfc_can_disctmo(vport); 2771 lpfc_end_rscn(vport); 2772 } 2773 } 2774 } 2775 } 2776 return; 2777 } 2778 2779 /** 2780 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 2781 * @ptr: holder for the pointer to the timer function associated data (ndlp). 2782 * 2783 * This routine is invoked by the ndlp delayed-function timer to check 2784 * whether there is any pending ELS retry event(s) with the node. If not, it 2785 * simply returns. Otherwise, if there is at least one ELS delayed event, it 2786 * adds the delayed events to the HBA work list and invokes the 2787 * lpfc_worker_wake_up() routine to wake up worker thread to process the 2788 * event. Note that lpfc_nlp_get() is called before posting the event to 2789 * the work list to hold reference count of ndlp so that it guarantees the 2790 * reference to ndlp will still be available when the worker thread gets 2791 * to the event associated with the ndlp. 2792 **/ 2793 void 2794 lpfc_els_retry_delay(unsigned long ptr) 2795 { 2796 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 2797 struct lpfc_vport *vport = ndlp->vport; 2798 struct lpfc_hba *phba = vport->phba; 2799 unsigned long flags; 2800 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 2801 2802 spin_lock_irqsave(&phba->hbalock, flags); 2803 if (!list_empty(&evtp->evt_listp)) { 2804 spin_unlock_irqrestore(&phba->hbalock, flags); 2805 return; 2806 } 2807 2808 /* We need to hold the node by incrementing the reference 2809 * count until the queued work is done 2810 */ 2811 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 2812 if (evtp->evt_arg1) { 2813 evtp->evt = LPFC_EVT_ELS_RETRY; 2814 list_add_tail(&evtp->evt_listp, &phba->work_list); 2815 lpfc_worker_wake_up(phba); 2816 } 2817 spin_unlock_irqrestore(&phba->hbalock, flags); 2818 return; 2819 } 2820 2821 /** 2822 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 2823 * @ndlp: pointer to a node-list data structure. 2824 * 2825 * This routine is the worker-thread handler for processing the @ndlp delayed 2826 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 2827 * the last ELS command from the associated ndlp and invokes the proper ELS 2828 * function according to the delayed ELS command to retry the command. 2829 **/ 2830 void 2831 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 2832 { 2833 struct lpfc_vport *vport = ndlp->vport; 2834 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2835 uint32_t cmd, did, retry; 2836 2837 spin_lock_irq(shost->host_lock); 2838 did = ndlp->nlp_DID; 2839 cmd = ndlp->nlp_last_elscmd; 2840 ndlp->nlp_last_elscmd = 0; 2841 2842 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2843 spin_unlock_irq(shost->host_lock); 2844 return; 2845 } 2846 2847 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2848 spin_unlock_irq(shost->host_lock); 2849 /* 2850 * If a discovery event readded nlp_delayfunc after timer 2851 * firing and before processing the timer, cancel the 2852 * nlp_delayfunc. 2853 */ 2854 del_timer_sync(&ndlp->nlp_delayfunc); 2855 retry = ndlp->nlp_retry; 2856 ndlp->nlp_retry = 0; 2857 2858 switch (cmd) { 2859 case ELS_CMD_FLOGI: 2860 lpfc_issue_els_flogi(vport, ndlp, retry); 2861 break; 2862 case ELS_CMD_PLOGI: 2863 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 2864 ndlp->nlp_prev_state = ndlp->nlp_state; 2865 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2866 } 2867 break; 2868 case ELS_CMD_ADISC: 2869 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 2870 ndlp->nlp_prev_state = ndlp->nlp_state; 2871 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2872 } 2873 break; 2874 case ELS_CMD_PRLI: 2875 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 2876 ndlp->nlp_prev_state = ndlp->nlp_state; 2877 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2878 } 2879 break; 2880 case ELS_CMD_LOGO: 2881 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2882 ndlp->nlp_prev_state = ndlp->nlp_state; 2883 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2884 } 2885 break; 2886 case ELS_CMD_FDISC: 2887 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 2888 lpfc_issue_els_fdisc(vport, ndlp, retry); 2889 break; 2890 } 2891 return; 2892 } 2893 2894 /** 2895 * lpfc_els_retry - Make retry decision on an els command iocb 2896 * @phba: pointer to lpfc hba data structure. 2897 * @cmdiocb: pointer to lpfc command iocb data structure. 2898 * @rspiocb: pointer to lpfc response iocb data structure. 2899 * 2900 * This routine makes a retry decision on an ELS command IOCB, which has 2901 * failed. The following ELS IOCBs use this function for retrying the command 2902 * when previously issued command responsed with error status: FLOGI, PLOGI, 2903 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 2904 * returned error status, it makes the decision whether a retry shall be 2905 * issued for the command, and whether a retry shall be made immediately or 2906 * delayed. In the former case, the corresponding ELS command issuing-function 2907 * is called to retry the command. In the later case, the ELS command shall 2908 * be posted to the ndlp delayed event and delayed function timer set to the 2909 * ndlp for the delayed command issusing. 2910 * 2911 * Return code 2912 * 0 - No retry of els command is made 2913 * 1 - Immediate or delayed retry of els command is made 2914 **/ 2915 static int 2916 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2917 struct lpfc_iocbq *rspiocb) 2918 { 2919 struct lpfc_vport *vport = cmdiocb->vport; 2920 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2921 IOCB_t *irsp = &rspiocb->iocb; 2922 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2923 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2924 uint32_t *elscmd; 2925 struct ls_rjt stat; 2926 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 2927 int logerr = 0; 2928 uint32_t cmd = 0; 2929 uint32_t did; 2930 2931 2932 /* Note: context2 may be 0 for internal driver abort 2933 * of delays ELS command. 2934 */ 2935 2936 if (pcmd && pcmd->virt) { 2937 elscmd = (uint32_t *) (pcmd->virt); 2938 cmd = *elscmd++; 2939 } 2940 2941 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 2942 did = ndlp->nlp_DID; 2943 else { 2944 /* We should only hit this case for retrying PLOGI */ 2945 did = irsp->un.elsreq64.remoteID; 2946 ndlp = lpfc_findnode_did(vport, did); 2947 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 2948 && (cmd != ELS_CMD_PLOGI)) 2949 return 1; 2950 } 2951 2952 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2953 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 2954 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 2955 2956 switch (irsp->ulpStatus) { 2957 case IOSTAT_FCP_RSP_ERROR: 2958 break; 2959 case IOSTAT_REMOTE_STOP: 2960 if (phba->sli_rev == LPFC_SLI_REV4) { 2961 /* This IO was aborted by the target, we don't 2962 * know the rxid and because we did not send the 2963 * ABTS we cannot generate and RRQ. 2964 */ 2965 lpfc_set_rrq_active(phba, ndlp, 2966 cmdiocb->sli4_xritag, 0, 0); 2967 } 2968 break; 2969 case IOSTAT_LOCAL_REJECT: 2970 switch ((irsp->un.ulpWord[4] & 0xff)) { 2971 case IOERR_LOOP_OPEN_FAILURE: 2972 if (cmd == ELS_CMD_FLOGI) { 2973 if (PCI_DEVICE_ID_HORNET == 2974 phba->pcidev->device) { 2975 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 2976 phba->pport->fc_myDID = 0; 2977 phba->alpa_map[0] = 0; 2978 phba->alpa_map[1] = 0; 2979 } 2980 } 2981 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 2982 delay = 1000; 2983 retry = 1; 2984 break; 2985 2986 case IOERR_ILLEGAL_COMMAND: 2987 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2988 "0124 Retry illegal cmd x%x " 2989 "retry:x%x delay:x%x\n", 2990 cmd, cmdiocb->retry, delay); 2991 retry = 1; 2992 /* All command's retry policy */ 2993 maxretry = 8; 2994 if (cmdiocb->retry > 2) 2995 delay = 1000; 2996 break; 2997 2998 case IOERR_NO_RESOURCES: 2999 logerr = 1; /* HBA out of resources */ 3000 retry = 1; 3001 if (cmdiocb->retry > 100) 3002 delay = 100; 3003 maxretry = 250; 3004 break; 3005 3006 case IOERR_ILLEGAL_FRAME: 3007 delay = 100; 3008 retry = 1; 3009 break; 3010 3011 case IOERR_SEQUENCE_TIMEOUT: 3012 case IOERR_INVALID_RPI: 3013 retry = 1; 3014 break; 3015 } 3016 break; 3017 3018 case IOSTAT_NPORT_RJT: 3019 case IOSTAT_FABRIC_RJT: 3020 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3021 retry = 1; 3022 break; 3023 } 3024 break; 3025 3026 case IOSTAT_NPORT_BSY: 3027 case IOSTAT_FABRIC_BSY: 3028 logerr = 1; /* Fabric / Remote NPort out of resources */ 3029 retry = 1; 3030 break; 3031 3032 case IOSTAT_LS_RJT: 3033 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3034 /* Added for Vendor specifc support 3035 * Just keep retrying for these Rsn / Exp codes 3036 */ 3037 switch (stat.un.b.lsRjtRsnCode) { 3038 case LSRJT_UNABLE_TPC: 3039 if (stat.un.b.lsRjtRsnCodeExp == 3040 LSEXP_CMD_IN_PROGRESS) { 3041 if (cmd == ELS_CMD_PLOGI) { 3042 delay = 1000; 3043 maxretry = 48; 3044 } 3045 retry = 1; 3046 break; 3047 } 3048 if (stat.un.b.lsRjtRsnCodeExp == 3049 LSEXP_CANT_GIVE_DATA) { 3050 if (cmd == ELS_CMD_PLOGI) { 3051 delay = 1000; 3052 maxretry = 48; 3053 } 3054 retry = 1; 3055 break; 3056 } 3057 if (cmd == ELS_CMD_PLOGI) { 3058 delay = 1000; 3059 maxretry = lpfc_max_els_tries + 1; 3060 retry = 1; 3061 break; 3062 } 3063 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3064 (cmd == ELS_CMD_FDISC) && 3065 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3066 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3067 "0125 FDISC Failed (x%x). " 3068 "Fabric out of resources\n", 3069 stat.un.lsRjtError); 3070 lpfc_vport_set_state(vport, 3071 FC_VPORT_NO_FABRIC_RSCS); 3072 } 3073 break; 3074 3075 case LSRJT_LOGICAL_BSY: 3076 if ((cmd == ELS_CMD_PLOGI) || 3077 (cmd == ELS_CMD_PRLI)) { 3078 delay = 1000; 3079 maxretry = 48; 3080 } else if (cmd == ELS_CMD_FDISC) { 3081 /* FDISC retry policy */ 3082 maxretry = 48; 3083 if (cmdiocb->retry >= 32) 3084 delay = 1000; 3085 } 3086 retry = 1; 3087 break; 3088 3089 case LSRJT_LOGICAL_ERR: 3090 /* There are some cases where switches return this 3091 * error when they are not ready and should be returning 3092 * Logical Busy. We should delay every time. 3093 */ 3094 if (cmd == ELS_CMD_FDISC && 3095 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3096 maxretry = 3; 3097 delay = 1000; 3098 retry = 1; 3099 break; 3100 } 3101 case LSRJT_PROTOCOL_ERR: 3102 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3103 (cmd == ELS_CMD_FDISC) && 3104 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3105 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3106 ) { 3107 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3108 "0122 FDISC Failed (x%x). " 3109 "Fabric Detected Bad WWN\n", 3110 stat.un.lsRjtError); 3111 lpfc_vport_set_state(vport, 3112 FC_VPORT_FABRIC_REJ_WWN); 3113 } 3114 break; 3115 } 3116 break; 3117 3118 case IOSTAT_INTERMED_RSP: 3119 case IOSTAT_BA_RJT: 3120 break; 3121 3122 default: 3123 break; 3124 } 3125 3126 if (did == FDMI_DID) 3127 retry = 1; 3128 3129 if ((cmd == ELS_CMD_FLOGI) && 3130 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3131 !lpfc_error_lost_link(irsp)) { 3132 /* FLOGI retry policy */ 3133 retry = 1; 3134 /* retry FLOGI forever */ 3135 maxretry = 0; 3136 if (cmdiocb->retry >= 100) 3137 delay = 5000; 3138 else if (cmdiocb->retry >= 32) 3139 delay = 1000; 3140 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 3141 /* retry FDISCs every second up to devloss */ 3142 retry = 1; 3143 maxretry = vport->cfg_devloss_tmo; 3144 delay = 1000; 3145 } 3146 3147 cmdiocb->retry++; 3148 if (maxretry && (cmdiocb->retry >= maxretry)) { 3149 phba->fc_stat.elsRetryExceeded++; 3150 retry = 0; 3151 } 3152 3153 if ((vport->load_flag & FC_UNLOADING) != 0) 3154 retry = 0; 3155 3156 if (retry) { 3157 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3158 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3159 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3160 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3161 "2849 Stop retry ELS command " 3162 "x%x to remote NPORT x%x, " 3163 "Data: x%x x%x\n", cmd, did, 3164 cmdiocb->retry, delay); 3165 return 0; 3166 } 3167 } 3168 3169 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3170 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3171 "0107 Retry ELS command x%x to remote " 3172 "NPORT x%x Data: x%x x%x\n", 3173 cmd, did, cmdiocb->retry, delay); 3174 3175 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3176 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3177 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 3178 /* Don't reset timer for no resources */ 3179 3180 /* If discovery / RSCN timer is running, reset it */ 3181 if (timer_pending(&vport->fc_disctmo) || 3182 (vport->fc_flag & FC_RSCN_MODE)) 3183 lpfc_set_disctmo(vport); 3184 } 3185 3186 phba->fc_stat.elsXmitRetry++; 3187 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3188 phba->fc_stat.elsDelayRetry++; 3189 ndlp->nlp_retry = cmdiocb->retry; 3190 3191 /* delay is specified in milliseconds */ 3192 mod_timer(&ndlp->nlp_delayfunc, 3193 jiffies + msecs_to_jiffies(delay)); 3194 spin_lock_irq(shost->host_lock); 3195 ndlp->nlp_flag |= NLP_DELAY_TMO; 3196 spin_unlock_irq(shost->host_lock); 3197 3198 ndlp->nlp_prev_state = ndlp->nlp_state; 3199 if (cmd == ELS_CMD_PRLI) 3200 lpfc_nlp_set_state(vport, ndlp, 3201 NLP_STE_REG_LOGIN_ISSUE); 3202 else 3203 lpfc_nlp_set_state(vport, ndlp, 3204 NLP_STE_NPR_NODE); 3205 ndlp->nlp_last_elscmd = cmd; 3206 3207 return 1; 3208 } 3209 switch (cmd) { 3210 case ELS_CMD_FLOGI: 3211 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3212 return 1; 3213 case ELS_CMD_FDISC: 3214 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3215 return 1; 3216 case ELS_CMD_PLOGI: 3217 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3218 ndlp->nlp_prev_state = ndlp->nlp_state; 3219 lpfc_nlp_set_state(vport, ndlp, 3220 NLP_STE_PLOGI_ISSUE); 3221 } 3222 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3223 return 1; 3224 case ELS_CMD_ADISC: 3225 ndlp->nlp_prev_state = ndlp->nlp_state; 3226 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3227 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3228 return 1; 3229 case ELS_CMD_PRLI: 3230 ndlp->nlp_prev_state = ndlp->nlp_state; 3231 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3232 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3233 return 1; 3234 case ELS_CMD_LOGO: 3235 ndlp->nlp_prev_state = ndlp->nlp_state; 3236 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3237 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3238 return 1; 3239 } 3240 } 3241 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3242 if (logerr) { 3243 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3244 "0137 No retry ELS command x%x to remote " 3245 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3246 cmd, did, irsp->ulpStatus, 3247 irsp->un.ulpWord[4]); 3248 } 3249 else { 3250 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3251 "0108 No retry ELS command x%x to remote " 3252 "NPORT x%x Retried:%d Error:x%x/%x\n", 3253 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3254 irsp->un.ulpWord[4]); 3255 } 3256 return 0; 3257 } 3258 3259 /** 3260 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3261 * @phba: pointer to lpfc hba data structure. 3262 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3263 * 3264 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3265 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3266 * checks to see whether there is a lpfc DMA buffer associated with the 3267 * response of the command IOCB. If so, it will be released before releasing 3268 * the lpfc DMA buffer associated with the IOCB itself. 3269 * 3270 * Return code 3271 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3272 **/ 3273 static int 3274 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3275 { 3276 struct lpfc_dmabuf *buf_ptr; 3277 3278 /* Free the response before processing the command. */ 3279 if (!list_empty(&buf_ptr1->list)) { 3280 list_remove_head(&buf_ptr1->list, buf_ptr, 3281 struct lpfc_dmabuf, 3282 list); 3283 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3284 kfree(buf_ptr); 3285 } 3286 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3287 kfree(buf_ptr1); 3288 return 0; 3289 } 3290 3291 /** 3292 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3293 * @phba: pointer to lpfc hba data structure. 3294 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3295 * 3296 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3297 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3298 * pool. 3299 * 3300 * Return code 3301 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3302 **/ 3303 static int 3304 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3305 { 3306 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3307 kfree(buf_ptr); 3308 return 0; 3309 } 3310 3311 /** 3312 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3313 * @phba: pointer to lpfc hba data structure. 3314 * @elsiocb: pointer to lpfc els command iocb data structure. 3315 * 3316 * This routine frees a command IOCB and its associated resources. The 3317 * command IOCB data structure contains the reference to various associated 3318 * resources, these fields must be set to NULL if the associated reference 3319 * not present: 3320 * context1 - reference to ndlp 3321 * context2 - reference to cmd 3322 * context2->next - reference to rsp 3323 * context3 - reference to bpl 3324 * 3325 * It first properly decrements the reference count held on ndlp for the 3326 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3327 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3328 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3329 * adds the DMA buffer the @phba data structure for the delayed release. 3330 * If reference to the Buffer Pointer List (BPL) is present, the 3331 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3332 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3333 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3334 * 3335 * Return code 3336 * 0 - Success (currently, always return 0) 3337 **/ 3338 int 3339 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3340 { 3341 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3342 struct lpfc_nodelist *ndlp; 3343 3344 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3345 if (ndlp) { 3346 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3347 lpfc_nlp_put(ndlp); 3348 3349 /* If the ndlp is not being used by another discovery 3350 * thread, free it. 3351 */ 3352 if (!lpfc_nlp_not_used(ndlp)) { 3353 /* If ndlp is being used by another discovery 3354 * thread, just clear NLP_DEFER_RM 3355 */ 3356 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3357 } 3358 } 3359 else 3360 lpfc_nlp_put(ndlp); 3361 elsiocb->context1 = NULL; 3362 } 3363 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3364 if (elsiocb->context2) { 3365 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3366 /* Firmware could still be in progress of DMAing 3367 * payload, so don't free data buffer till after 3368 * a hbeat. 3369 */ 3370 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3371 buf_ptr = elsiocb->context2; 3372 elsiocb->context2 = NULL; 3373 if (buf_ptr) { 3374 buf_ptr1 = NULL; 3375 spin_lock_irq(&phba->hbalock); 3376 if (!list_empty(&buf_ptr->list)) { 3377 list_remove_head(&buf_ptr->list, 3378 buf_ptr1, struct lpfc_dmabuf, 3379 list); 3380 INIT_LIST_HEAD(&buf_ptr1->list); 3381 list_add_tail(&buf_ptr1->list, 3382 &phba->elsbuf); 3383 phba->elsbuf_cnt++; 3384 } 3385 INIT_LIST_HEAD(&buf_ptr->list); 3386 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3387 phba->elsbuf_cnt++; 3388 spin_unlock_irq(&phba->hbalock); 3389 } 3390 } else { 3391 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3392 lpfc_els_free_data(phba, buf_ptr1); 3393 } 3394 } 3395 3396 if (elsiocb->context3) { 3397 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3398 lpfc_els_free_bpl(phba, buf_ptr); 3399 } 3400 lpfc_sli_release_iocbq(phba, elsiocb); 3401 return 0; 3402 } 3403 3404 /** 3405 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 3406 * @phba: pointer to lpfc hba data structure. 3407 * @cmdiocb: pointer to lpfc command iocb data structure. 3408 * @rspiocb: pointer to lpfc response iocb data structure. 3409 * 3410 * This routine is the completion callback function to the Logout (LOGO) 3411 * Accept (ACC) Response ELS command. This routine is invoked to indicate 3412 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 3413 * release the ndlp if it has the last reference remaining (reference count 3414 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 3415 * field to NULL to inform the following lpfc_els_free_iocb() routine no 3416 * ndlp reference count needs to be decremented. Otherwise, the ndlp 3417 * reference use-count shall be decremented by the lpfc_els_free_iocb() 3418 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 3419 * IOCB data structure. 3420 **/ 3421 static void 3422 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3423 struct lpfc_iocbq *rspiocb) 3424 { 3425 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3426 struct lpfc_vport *vport = cmdiocb->vport; 3427 IOCB_t *irsp; 3428 3429 irsp = &rspiocb->iocb; 3430 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3431 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 3432 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 3433 /* ACC to LOGO completes to NPort <nlp_DID> */ 3434 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3435 "0109 ACC to LOGO completes to NPort x%x " 3436 "Data: x%x x%x x%x\n", 3437 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3438 ndlp->nlp_rpi); 3439 3440 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 3441 /* NPort Recovery mode or node is just allocated */ 3442 if (!lpfc_nlp_not_used(ndlp)) { 3443 /* If the ndlp is being used by another discovery 3444 * thread, just unregister the RPI. 3445 */ 3446 lpfc_unreg_rpi(vport, ndlp); 3447 } else { 3448 /* Indicate the node has already released, should 3449 * not reference to it from within lpfc_els_free_iocb. 3450 */ 3451 cmdiocb->context1 = NULL; 3452 } 3453 } 3454 3455 /* 3456 * The driver received a LOGO from the rport and has ACK'd it. 3457 * At this point, the driver is done so release the IOCB 3458 */ 3459 lpfc_els_free_iocb(phba, cmdiocb); 3460 3461 /* 3462 * Remove the ndlp reference if it's a fabric node that has 3463 * sent us an unsolicted LOGO. 3464 */ 3465 if (ndlp->nlp_type & NLP_FABRIC) 3466 lpfc_nlp_put(ndlp); 3467 3468 return; 3469 } 3470 3471 /** 3472 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 3473 * @phba: pointer to lpfc hba data structure. 3474 * @pmb: pointer to the driver internal queue element for mailbox command. 3475 * 3476 * This routine is the completion callback function for unregister default 3477 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 3478 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 3479 * decrements the ndlp reference count held for this completion callback 3480 * function. After that, it invokes the lpfc_nlp_not_used() to check 3481 * whether there is only one reference left on the ndlp. If so, it will 3482 * perform one more decrement and trigger the release of the ndlp. 3483 **/ 3484 void 3485 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3486 { 3487 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3488 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3489 3490 pmb->context1 = NULL; 3491 pmb->context2 = NULL; 3492 3493 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3494 kfree(mp); 3495 mempool_free(pmb, phba->mbox_mem_pool); 3496 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3497 lpfc_nlp_put(ndlp); 3498 /* This is the end of the default RPI cleanup logic for this 3499 * ndlp. If no other discovery threads are using this ndlp. 3500 * we should free all resources associated with it. 3501 */ 3502 lpfc_nlp_not_used(ndlp); 3503 } 3504 3505 return; 3506 } 3507 3508 /** 3509 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 3510 * @phba: pointer to lpfc hba data structure. 3511 * @cmdiocb: pointer to lpfc command iocb data structure. 3512 * @rspiocb: pointer to lpfc response iocb data structure. 3513 * 3514 * This routine is the completion callback function for ELS Response IOCB 3515 * command. In normal case, this callback function just properly sets the 3516 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 3517 * field in the command IOCB is not NULL, the referred mailbox command will 3518 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 3519 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 3520 * link down event occurred during the discovery, the lpfc_nlp_not_used() 3521 * routine shall be invoked trying to release the ndlp if no other threads 3522 * are currently referring it. 3523 **/ 3524 static void 3525 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3526 struct lpfc_iocbq *rspiocb) 3527 { 3528 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3529 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 3530 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 3531 IOCB_t *irsp; 3532 uint8_t *pcmd; 3533 LPFC_MBOXQ_t *mbox = NULL; 3534 struct lpfc_dmabuf *mp = NULL; 3535 uint32_t ls_rjt = 0; 3536 3537 irsp = &rspiocb->iocb; 3538 3539 if (cmdiocb->context_un.mbox) 3540 mbox = cmdiocb->context_un.mbox; 3541 3542 /* First determine if this is a LS_RJT cmpl. Note, this callback 3543 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 3544 */ 3545 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 3546 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3547 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3548 /* A LS_RJT associated with Default RPI cleanup has its own 3549 * separate code path. 3550 */ 3551 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3552 ls_rjt = 1; 3553 } 3554 3555 /* Check to see if link went down during discovery */ 3556 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 3557 if (mbox) { 3558 mp = (struct lpfc_dmabuf *) mbox->context1; 3559 if (mp) { 3560 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3561 kfree(mp); 3562 } 3563 mempool_free(mbox, phba->mbox_mem_pool); 3564 } 3565 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3566 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3567 if (lpfc_nlp_not_used(ndlp)) { 3568 ndlp = NULL; 3569 /* Indicate the node has already released, 3570 * should not reference to it from within 3571 * the routine lpfc_els_free_iocb. 3572 */ 3573 cmdiocb->context1 = NULL; 3574 } 3575 goto out; 3576 } 3577 3578 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3579 "ELS rsp cmpl: status:x%x/x%x did:x%x", 3580 irsp->ulpStatus, irsp->un.ulpWord[4], 3581 cmdiocb->iocb.un.elsreq64.remoteID); 3582 /* ELS response tag <ulpIoTag> completes */ 3583 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3584 "0110 ELS response tag x%x completes " 3585 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 3586 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 3587 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 3588 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3589 ndlp->nlp_rpi); 3590 if (mbox) { 3591 if ((rspiocb->iocb.ulpStatus == 0) 3592 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 3593 lpfc_unreg_rpi(vport, ndlp); 3594 /* Increment reference count to ndlp to hold the 3595 * reference to ndlp for the callback function. 3596 */ 3597 mbox->context2 = lpfc_nlp_get(ndlp); 3598 mbox->vport = vport; 3599 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 3600 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 3601 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 3602 } 3603 else { 3604 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 3605 ndlp->nlp_prev_state = ndlp->nlp_state; 3606 lpfc_nlp_set_state(vport, ndlp, 3607 NLP_STE_REG_LOGIN_ISSUE); 3608 } 3609 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 3610 != MBX_NOT_FINISHED) 3611 goto out; 3612 else 3613 /* Decrement the ndlp reference count we 3614 * set for this failed mailbox command. 3615 */ 3616 lpfc_nlp_put(ndlp); 3617 3618 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 3619 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3620 "0138 ELS rsp: Cannot issue reg_login for x%x " 3621 "Data: x%x x%x x%x\n", 3622 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3623 ndlp->nlp_rpi); 3624 3625 if (lpfc_nlp_not_used(ndlp)) { 3626 ndlp = NULL; 3627 /* Indicate node has already been released, 3628 * should not reference to it from within 3629 * the routine lpfc_els_free_iocb. 3630 */ 3631 cmdiocb->context1 = NULL; 3632 } 3633 } else { 3634 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 3635 if (!lpfc_error_lost_link(irsp) && 3636 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 3637 if (lpfc_nlp_not_used(ndlp)) { 3638 ndlp = NULL; 3639 /* Indicate node has already been 3640 * released, should not reference 3641 * to it from within the routine 3642 * lpfc_els_free_iocb. 3643 */ 3644 cmdiocb->context1 = NULL; 3645 } 3646 } 3647 } 3648 mp = (struct lpfc_dmabuf *) mbox->context1; 3649 if (mp) { 3650 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3651 kfree(mp); 3652 } 3653 mempool_free(mbox, phba->mbox_mem_pool); 3654 } 3655 out: 3656 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3657 spin_lock_irq(shost->host_lock); 3658 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 3659 spin_unlock_irq(shost->host_lock); 3660 3661 /* If the node is not being used by another discovery thread, 3662 * and we are sending a reject, we are done with it. 3663 * Release driver reference count here and free associated 3664 * resources. 3665 */ 3666 if (ls_rjt) 3667 if (lpfc_nlp_not_used(ndlp)) 3668 /* Indicate node has already been released, 3669 * should not reference to it from within 3670 * the routine lpfc_els_free_iocb. 3671 */ 3672 cmdiocb->context1 = NULL; 3673 } 3674 3675 lpfc_els_free_iocb(phba, cmdiocb); 3676 return; 3677 } 3678 3679 /** 3680 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 3681 * @vport: pointer to a host virtual N_Port data structure. 3682 * @flag: the els command code to be accepted. 3683 * @oldiocb: pointer to the original lpfc command iocb data structure. 3684 * @ndlp: pointer to a node-list data structure. 3685 * @mbox: pointer to the driver internal queue element for mailbox command. 3686 * 3687 * This routine prepares and issues an Accept (ACC) response IOCB 3688 * command. It uses the @flag to properly set up the IOCB field for the 3689 * specific ACC response command to be issued and invokes the 3690 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 3691 * @mbox pointer is passed in, it will be put into the context_un.mbox 3692 * field of the IOCB for the completion callback function to issue the 3693 * mailbox command to the HBA later when callback is invoked. 3694 * 3695 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3696 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3697 * will be stored into the context1 field of the IOCB for the completion 3698 * callback function to the corresponding response ELS IOCB command. 3699 * 3700 * Return code 3701 * 0 - Successfully issued acc response 3702 * 1 - Failed to issue acc response 3703 **/ 3704 int 3705 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 3706 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3707 LPFC_MBOXQ_t *mbox) 3708 { 3709 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3710 struct lpfc_hba *phba = vport->phba; 3711 IOCB_t *icmd; 3712 IOCB_t *oldcmd; 3713 struct lpfc_iocbq *elsiocb; 3714 struct lpfc_sli *psli; 3715 uint8_t *pcmd; 3716 uint16_t cmdsize; 3717 int rc; 3718 ELS_PKT *els_pkt_ptr; 3719 3720 psli = &phba->sli; 3721 oldcmd = &oldiocb->iocb; 3722 3723 switch (flag) { 3724 case ELS_CMD_ACC: 3725 cmdsize = sizeof(uint32_t); 3726 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3727 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3728 if (!elsiocb) { 3729 spin_lock_irq(shost->host_lock); 3730 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3731 spin_unlock_irq(shost->host_lock); 3732 return 1; 3733 } 3734 3735 icmd = &elsiocb->iocb; 3736 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3737 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3738 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3739 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3740 pcmd += sizeof(uint32_t); 3741 3742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3743 "Issue ACC: did:x%x flg:x%x", 3744 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3745 break; 3746 case ELS_CMD_PLOGI: 3747 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 3748 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3749 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3750 if (!elsiocb) 3751 return 1; 3752 3753 icmd = &elsiocb->iocb; 3754 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3755 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3756 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3757 3758 if (mbox) 3759 elsiocb->context_un.mbox = mbox; 3760 3761 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3762 pcmd += sizeof(uint32_t); 3763 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 3764 3765 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3766 "Issue ACC PLOGI: did:x%x flg:x%x", 3767 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3768 break; 3769 case ELS_CMD_PRLO: 3770 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 3771 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3772 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 3773 if (!elsiocb) 3774 return 1; 3775 3776 icmd = &elsiocb->iocb; 3777 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3778 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3779 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3780 3781 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3782 sizeof(uint32_t) + sizeof(PRLO)); 3783 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 3784 els_pkt_ptr = (ELS_PKT *) pcmd; 3785 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 3786 3787 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3788 "Issue ACC PRLO: did:x%x flg:x%x", 3789 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3790 break; 3791 default: 3792 return 1; 3793 } 3794 /* Xmit ELS ACC response tag <ulpIoTag> */ 3795 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3796 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 3797 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 3798 elsiocb->iotag, elsiocb->iocb.ulpContext, 3799 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3800 ndlp->nlp_rpi); 3801 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 3802 spin_lock_irq(shost->host_lock); 3803 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3804 spin_unlock_irq(shost->host_lock); 3805 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 3806 } else { 3807 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3808 } 3809 3810 phba->fc_stat.elsXmitACC++; 3811 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3812 if (rc == IOCB_ERROR) { 3813 lpfc_els_free_iocb(phba, elsiocb); 3814 return 1; 3815 } 3816 return 0; 3817 } 3818 3819 /** 3820 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 3821 * @vport: pointer to a virtual N_Port data structure. 3822 * @rejectError: 3823 * @oldiocb: pointer to the original lpfc command iocb data structure. 3824 * @ndlp: pointer to a node-list data structure. 3825 * @mbox: pointer to the driver internal queue element for mailbox command. 3826 * 3827 * This routine prepares and issue an Reject (RJT) response IOCB 3828 * command. If a @mbox pointer is passed in, it will be put into the 3829 * context_un.mbox field of the IOCB for the completion callback function 3830 * to issue to the HBA later. 3831 * 3832 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3833 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3834 * will be stored into the context1 field of the IOCB for the completion 3835 * callback function to the reject response ELS IOCB command. 3836 * 3837 * Return code 3838 * 0 - Successfully issued reject response 3839 * 1 - Failed to issue reject response 3840 **/ 3841 int 3842 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 3843 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3844 LPFC_MBOXQ_t *mbox) 3845 { 3846 struct lpfc_hba *phba = vport->phba; 3847 IOCB_t *icmd; 3848 IOCB_t *oldcmd; 3849 struct lpfc_iocbq *elsiocb; 3850 struct lpfc_sli *psli; 3851 uint8_t *pcmd; 3852 uint16_t cmdsize; 3853 int rc; 3854 3855 psli = &phba->sli; 3856 cmdsize = 2 * sizeof(uint32_t); 3857 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3858 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3859 if (!elsiocb) 3860 return 1; 3861 3862 icmd = &elsiocb->iocb; 3863 oldcmd = &oldiocb->iocb; 3864 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3865 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3866 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3867 3868 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3869 pcmd += sizeof(uint32_t); 3870 *((uint32_t *) (pcmd)) = rejectError; 3871 3872 if (mbox) 3873 elsiocb->context_un.mbox = mbox; 3874 3875 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 3876 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3877 "0129 Xmit ELS RJT x%x response tag x%x " 3878 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 3879 "rpi x%x\n", 3880 rejectError, elsiocb->iotag, 3881 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3882 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3883 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3884 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 3885 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 3886 3887 phba->fc_stat.elsXmitLSRJT++; 3888 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3889 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3890 3891 if (rc == IOCB_ERROR) { 3892 lpfc_els_free_iocb(phba, elsiocb); 3893 return 1; 3894 } 3895 return 0; 3896 } 3897 3898 /** 3899 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 3900 * @vport: pointer to a virtual N_Port data structure. 3901 * @oldiocb: pointer to the original lpfc command iocb data structure. 3902 * @ndlp: pointer to a node-list data structure. 3903 * 3904 * This routine prepares and issues an Accept (ACC) response to Address 3905 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 3906 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 3907 * 3908 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3909 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3910 * will be stored into the context1 field of the IOCB for the completion 3911 * callback function to the ADISC Accept response ELS IOCB command. 3912 * 3913 * Return code 3914 * 0 - Successfully issued acc adisc response 3915 * 1 - Failed to issue adisc acc response 3916 **/ 3917 int 3918 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3919 struct lpfc_nodelist *ndlp) 3920 { 3921 struct lpfc_hba *phba = vport->phba; 3922 ADISC *ap; 3923 IOCB_t *icmd, *oldcmd; 3924 struct lpfc_iocbq *elsiocb; 3925 uint8_t *pcmd; 3926 uint16_t cmdsize; 3927 int rc; 3928 3929 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 3930 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3931 ndlp->nlp_DID, ELS_CMD_ACC); 3932 if (!elsiocb) 3933 return 1; 3934 3935 icmd = &elsiocb->iocb; 3936 oldcmd = &oldiocb->iocb; 3937 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 3938 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 3939 3940 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3941 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3942 "0130 Xmit ADISC ACC response iotag x%x xri: " 3943 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 3944 elsiocb->iotag, elsiocb->iocb.ulpContext, 3945 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3946 ndlp->nlp_rpi); 3947 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3948 3949 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3950 pcmd += sizeof(uint32_t); 3951 3952 ap = (ADISC *) (pcmd); 3953 ap->hardAL_PA = phba->fc_pref_ALPA; 3954 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 3955 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3956 ap->DID = be32_to_cpu(vport->fc_myDID); 3957 3958 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3959 "Issue ACC ADISC: did:x%x flg:x%x", 3960 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3961 3962 phba->fc_stat.elsXmitACC++; 3963 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3964 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3965 if (rc == IOCB_ERROR) { 3966 lpfc_els_free_iocb(phba, elsiocb); 3967 return 1; 3968 } 3969 return 0; 3970 } 3971 3972 /** 3973 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 3974 * @vport: pointer to a virtual N_Port data structure. 3975 * @oldiocb: pointer to the original lpfc command iocb data structure. 3976 * @ndlp: pointer to a node-list data structure. 3977 * 3978 * This routine prepares and issues an Accept (ACC) response to Process 3979 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 3980 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 3981 * 3982 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3983 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3984 * will be stored into the context1 field of the IOCB for the completion 3985 * callback function to the PRLI Accept response ELS IOCB command. 3986 * 3987 * Return code 3988 * 0 - Successfully issued acc prli response 3989 * 1 - Failed to issue acc prli response 3990 **/ 3991 int 3992 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3993 struct lpfc_nodelist *ndlp) 3994 { 3995 struct lpfc_hba *phba = vport->phba; 3996 PRLI *npr; 3997 lpfc_vpd_t *vpd; 3998 IOCB_t *icmd; 3999 IOCB_t *oldcmd; 4000 struct lpfc_iocbq *elsiocb; 4001 struct lpfc_sli *psli; 4002 uint8_t *pcmd; 4003 uint16_t cmdsize; 4004 int rc; 4005 4006 psli = &phba->sli; 4007 4008 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 4009 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4010 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 4011 if (!elsiocb) 4012 return 1; 4013 4014 icmd = &elsiocb->iocb; 4015 oldcmd = &oldiocb->iocb; 4016 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4017 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4018 4019 /* Xmit PRLI ACC response tag <ulpIoTag> */ 4020 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4021 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 4022 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4023 elsiocb->iotag, elsiocb->iocb.ulpContext, 4024 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4025 ndlp->nlp_rpi); 4026 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4027 4028 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 4029 pcmd += sizeof(uint32_t); 4030 4031 /* For PRLI, remainder of payload is PRLI parameter page */ 4032 memset(pcmd, 0, sizeof(PRLI)); 4033 4034 npr = (PRLI *) pcmd; 4035 vpd = &phba->vpd; 4036 /* 4037 * If the remote port is a target and our firmware version is 3.20 or 4038 * later, set the following bits for FC-TAPE support. 4039 */ 4040 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4041 (vpd->rev.feaLevelHigh >= 0x02)) { 4042 npr->ConfmComplAllowed = 1; 4043 npr->Retry = 1; 4044 npr->TaskRetryIdReq = 1; 4045 } 4046 4047 npr->acceptRspCode = PRLI_REQ_EXECUTED; 4048 npr->estabImagePair = 1; 4049 npr->readXferRdyDis = 1; 4050 npr->ConfmComplAllowed = 1; 4051 4052 npr->prliType = PRLI_FCP_TYPE; 4053 npr->initiatorFunc = 1; 4054 4055 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4056 "Issue ACC PRLI: did:x%x flg:x%x", 4057 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4058 4059 phba->fc_stat.elsXmitACC++; 4060 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4061 4062 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4063 if (rc == IOCB_ERROR) { 4064 lpfc_els_free_iocb(phba, elsiocb); 4065 return 1; 4066 } 4067 return 0; 4068 } 4069 4070 /** 4071 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 4072 * @vport: pointer to a virtual N_Port data structure. 4073 * @format: rnid command format. 4074 * @oldiocb: pointer to the original lpfc command iocb data structure. 4075 * @ndlp: pointer to a node-list data structure. 4076 * 4077 * This routine issues a Request Node Identification Data (RNID) Accept 4078 * (ACC) response. It constructs the RNID ACC response command according to 4079 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 4080 * issue the response. Note that this command does not need to hold the ndlp 4081 * reference count for the callback. So, the ndlp reference count taken by 4082 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 4083 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4084 * there is no ndlp reference available. 4085 * 4086 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4087 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4088 * will be stored into the context1 field of the IOCB for the completion 4089 * callback function. However, for the RNID Accept Response ELS command, 4090 * this is undone later by this routine after the IOCB is allocated. 4091 * 4092 * Return code 4093 * 0 - Successfully issued acc rnid response 4094 * 1 - Failed to issue acc rnid response 4095 **/ 4096 static int 4097 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4098 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4099 { 4100 struct lpfc_hba *phba = vport->phba; 4101 RNID *rn; 4102 IOCB_t *icmd, *oldcmd; 4103 struct lpfc_iocbq *elsiocb; 4104 struct lpfc_sli *psli; 4105 uint8_t *pcmd; 4106 uint16_t cmdsize; 4107 int rc; 4108 4109 psli = &phba->sli; 4110 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4111 + (2 * sizeof(struct lpfc_name)); 4112 if (format) 4113 cmdsize += sizeof(RNID_TOP_DISC); 4114 4115 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4116 ndlp->nlp_DID, ELS_CMD_ACC); 4117 if (!elsiocb) 4118 return 1; 4119 4120 icmd = &elsiocb->iocb; 4121 oldcmd = &oldiocb->iocb; 4122 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4123 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4124 4125 /* Xmit RNID ACC response tag <ulpIoTag> */ 4126 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4127 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4128 elsiocb->iotag, elsiocb->iocb.ulpContext); 4129 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4130 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4131 pcmd += sizeof(uint32_t); 4132 4133 memset(pcmd, 0, sizeof(RNID)); 4134 rn = (RNID *) (pcmd); 4135 rn->Format = format; 4136 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4137 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4138 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4139 switch (format) { 4140 case 0: 4141 rn->SpecificLen = 0; 4142 break; 4143 case RNID_TOPOLOGY_DISC: 4144 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4145 memcpy(&rn->un.topologyDisc.portName, 4146 &vport->fc_portname, sizeof(struct lpfc_name)); 4147 rn->un.topologyDisc.unitType = RNID_HBA; 4148 rn->un.topologyDisc.physPort = 0; 4149 rn->un.topologyDisc.attachedNodes = 0; 4150 break; 4151 default: 4152 rn->CommonLen = 0; 4153 rn->SpecificLen = 0; 4154 break; 4155 } 4156 4157 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4158 "Issue ACC RNID: did:x%x flg:x%x", 4159 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4160 4161 phba->fc_stat.elsXmitACC++; 4162 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4163 4164 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4165 if (rc == IOCB_ERROR) { 4166 lpfc_els_free_iocb(phba, elsiocb); 4167 return 1; 4168 } 4169 return 0; 4170 } 4171 4172 /** 4173 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4174 * @vport: pointer to a virtual N_Port data structure. 4175 * @iocb: pointer to the lpfc command iocb data structure. 4176 * @ndlp: pointer to a node-list data structure. 4177 * 4178 * Return 4179 **/ 4180 static void 4181 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4182 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4183 { 4184 struct lpfc_hba *phba = vport->phba; 4185 uint8_t *pcmd; 4186 struct RRQ *rrq; 4187 uint16_t rxid; 4188 uint16_t xri; 4189 struct lpfc_node_rrq *prrq; 4190 4191 4192 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4193 pcmd += sizeof(uint32_t); 4194 rrq = (struct RRQ *)pcmd; 4195 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4196 rxid = bf_get(rrq_rxid, rrq); 4197 4198 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4199 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4200 " x%x x%x\n", 4201 be32_to_cpu(bf_get(rrq_did, rrq)), 4202 bf_get(rrq_oxid, rrq), 4203 rxid, 4204 iocb->iotag, iocb->iocb.ulpContext); 4205 4206 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4207 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4208 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4209 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4210 xri = bf_get(rrq_oxid, rrq); 4211 else 4212 xri = rxid; 4213 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4214 if (prrq) 4215 lpfc_clr_rrq_active(phba, xri, prrq); 4216 return; 4217 } 4218 4219 /** 4220 * lpfc_els_rsp_echo_acc - Issue echo acc response 4221 * @vport: pointer to a virtual N_Port data structure. 4222 * @data: pointer to echo data to return in the accept. 4223 * @oldiocb: pointer to the original lpfc command iocb data structure. 4224 * @ndlp: pointer to a node-list data structure. 4225 * 4226 * Return code 4227 * 0 - Successfully issued acc echo response 4228 * 1 - Failed to issue acc echo response 4229 **/ 4230 static int 4231 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4232 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4233 { 4234 struct lpfc_hba *phba = vport->phba; 4235 struct lpfc_iocbq *elsiocb; 4236 struct lpfc_sli *psli; 4237 uint8_t *pcmd; 4238 uint16_t cmdsize; 4239 int rc; 4240 4241 psli = &phba->sli; 4242 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4243 4244 /* The accumulated length can exceed the BPL_SIZE. For 4245 * now, use this as the limit 4246 */ 4247 if (cmdsize > LPFC_BPL_SIZE) 4248 cmdsize = LPFC_BPL_SIZE; 4249 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4250 ndlp->nlp_DID, ELS_CMD_ACC); 4251 if (!elsiocb) 4252 return 1; 4253 4254 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 4255 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 4256 4257 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4258 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4259 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4260 elsiocb->iotag, elsiocb->iocb.ulpContext); 4261 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4262 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4263 pcmd += sizeof(uint32_t); 4264 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4265 4266 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4267 "Issue ACC ECHO: did:x%x flg:x%x", 4268 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4269 4270 phba->fc_stat.elsXmitACC++; 4271 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4272 4273 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4274 if (rc == IOCB_ERROR) { 4275 lpfc_els_free_iocb(phba, elsiocb); 4276 return 1; 4277 } 4278 return 0; 4279 } 4280 4281 /** 4282 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 4283 * @vport: pointer to a host virtual N_Port data structure. 4284 * 4285 * This routine issues Address Discover (ADISC) ELS commands to those 4286 * N_Ports which are in node port recovery state and ADISC has not been issued 4287 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 4288 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 4289 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 4290 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 4291 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 4292 * IOCBs quit for later pick up. On the other hand, after walking through 4293 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 4294 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 4295 * no more ADISC need to be sent. 4296 * 4297 * Return code 4298 * The number of N_Ports with adisc issued. 4299 **/ 4300 int 4301 lpfc_els_disc_adisc(struct lpfc_vport *vport) 4302 { 4303 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4304 struct lpfc_nodelist *ndlp, *next_ndlp; 4305 int sentadisc = 0; 4306 4307 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 4308 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4309 if (!NLP_CHK_NODE_ACT(ndlp)) 4310 continue; 4311 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4312 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4313 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 4314 spin_lock_irq(shost->host_lock); 4315 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4316 spin_unlock_irq(shost->host_lock); 4317 ndlp->nlp_prev_state = ndlp->nlp_state; 4318 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4319 lpfc_issue_els_adisc(vport, ndlp, 0); 4320 sentadisc++; 4321 vport->num_disc_nodes++; 4322 if (vport->num_disc_nodes >= 4323 vport->cfg_discovery_threads) { 4324 spin_lock_irq(shost->host_lock); 4325 vport->fc_flag |= FC_NLP_MORE; 4326 spin_unlock_irq(shost->host_lock); 4327 break; 4328 } 4329 } 4330 } 4331 if (sentadisc == 0) { 4332 spin_lock_irq(shost->host_lock); 4333 vport->fc_flag &= ~FC_NLP_MORE; 4334 spin_unlock_irq(shost->host_lock); 4335 } 4336 return sentadisc; 4337 } 4338 4339 /** 4340 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 4341 * @vport: pointer to a host virtual N_Port data structure. 4342 * 4343 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 4344 * which are in node port recovery state, with a @vport. Each time an ELS 4345 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 4346 * the per @vport number of discover count (num_disc_nodes) shall be 4347 * incremented. If the num_disc_nodes reaches a pre-configured threshold 4348 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 4349 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 4350 * later pick up. On the other hand, after walking through all the ndlps with 4351 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 4352 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 4353 * PLOGI need to be sent. 4354 * 4355 * Return code 4356 * The number of N_Ports with plogi issued. 4357 **/ 4358 int 4359 lpfc_els_disc_plogi(struct lpfc_vport *vport) 4360 { 4361 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4362 struct lpfc_nodelist *ndlp, *next_ndlp; 4363 int sentplogi = 0; 4364 4365 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 4366 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4367 if (!NLP_CHK_NODE_ACT(ndlp)) 4368 continue; 4369 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4370 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4371 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4372 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4373 ndlp->nlp_prev_state = ndlp->nlp_state; 4374 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4375 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4376 sentplogi++; 4377 vport->num_disc_nodes++; 4378 if (vport->num_disc_nodes >= 4379 vport->cfg_discovery_threads) { 4380 spin_lock_irq(shost->host_lock); 4381 vport->fc_flag |= FC_NLP_MORE; 4382 spin_unlock_irq(shost->host_lock); 4383 break; 4384 } 4385 } 4386 } 4387 if (sentplogi) { 4388 lpfc_set_disctmo(vport); 4389 } 4390 else { 4391 spin_lock_irq(shost->host_lock); 4392 vport->fc_flag &= ~FC_NLP_MORE; 4393 spin_unlock_irq(shost->host_lock); 4394 } 4395 return sentplogi; 4396 } 4397 4398 /** 4399 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 4400 * @vport: pointer to a host virtual N_Port data structure. 4401 * 4402 * This routine cleans up any Registration State Change Notification 4403 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 4404 * @vport together with the host_lock is used to prevent multiple thread 4405 * trying to access the RSCN array on a same @vport at the same time. 4406 **/ 4407 void 4408 lpfc_els_flush_rscn(struct lpfc_vport *vport) 4409 { 4410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4411 struct lpfc_hba *phba = vport->phba; 4412 int i; 4413 4414 spin_lock_irq(shost->host_lock); 4415 if (vport->fc_rscn_flush) { 4416 /* Another thread is walking fc_rscn_id_list on this vport */ 4417 spin_unlock_irq(shost->host_lock); 4418 return; 4419 } 4420 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 4421 vport->fc_rscn_flush = 1; 4422 spin_unlock_irq(shost->host_lock); 4423 4424 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4425 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 4426 vport->fc_rscn_id_list[i] = NULL; 4427 } 4428 spin_lock_irq(shost->host_lock); 4429 vport->fc_rscn_id_cnt = 0; 4430 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 4431 spin_unlock_irq(shost->host_lock); 4432 lpfc_can_disctmo(vport); 4433 /* Indicate we are done walking this fc_rscn_id_list */ 4434 vport->fc_rscn_flush = 0; 4435 } 4436 4437 /** 4438 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 4439 * @vport: pointer to a host virtual N_Port data structure. 4440 * @did: remote destination port identifier. 4441 * 4442 * This routine checks whether there is any pending Registration State 4443 * Configuration Notification (RSCN) to a @did on @vport. 4444 * 4445 * Return code 4446 * None zero - The @did matched with a pending rscn 4447 * 0 - not able to match @did with a pending rscn 4448 **/ 4449 int 4450 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 4451 { 4452 D_ID ns_did; 4453 D_ID rscn_did; 4454 uint32_t *lp; 4455 uint32_t payload_len, i; 4456 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4457 4458 ns_did.un.word = did; 4459 4460 /* Never match fabric nodes for RSCNs */ 4461 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 4462 return 0; 4463 4464 /* If we are doing a FULL RSCN rediscovery, match everything */ 4465 if (vport->fc_flag & FC_RSCN_DISCOVERY) 4466 return did; 4467 4468 spin_lock_irq(shost->host_lock); 4469 if (vport->fc_rscn_flush) { 4470 /* Another thread is walking fc_rscn_id_list on this vport */ 4471 spin_unlock_irq(shost->host_lock); 4472 return 0; 4473 } 4474 /* Indicate we are walking fc_rscn_id_list on this vport */ 4475 vport->fc_rscn_flush = 1; 4476 spin_unlock_irq(shost->host_lock); 4477 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4478 lp = vport->fc_rscn_id_list[i]->virt; 4479 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4480 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4481 while (payload_len) { 4482 rscn_did.un.word = be32_to_cpu(*lp++); 4483 payload_len -= sizeof(uint32_t); 4484 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 4485 case RSCN_ADDRESS_FORMAT_PORT: 4486 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4487 && (ns_did.un.b.area == rscn_did.un.b.area) 4488 && (ns_did.un.b.id == rscn_did.un.b.id)) 4489 goto return_did_out; 4490 break; 4491 case RSCN_ADDRESS_FORMAT_AREA: 4492 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4493 && (ns_did.un.b.area == rscn_did.un.b.area)) 4494 goto return_did_out; 4495 break; 4496 case RSCN_ADDRESS_FORMAT_DOMAIN: 4497 if (ns_did.un.b.domain == rscn_did.un.b.domain) 4498 goto return_did_out; 4499 break; 4500 case RSCN_ADDRESS_FORMAT_FABRIC: 4501 goto return_did_out; 4502 } 4503 } 4504 } 4505 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4506 vport->fc_rscn_flush = 0; 4507 return 0; 4508 return_did_out: 4509 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4510 vport->fc_rscn_flush = 0; 4511 return did; 4512 } 4513 4514 /** 4515 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 4516 * @vport: pointer to a host virtual N_Port data structure. 4517 * 4518 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 4519 * state machine for a @vport's nodes that are with pending RSCN (Registration 4520 * State Change Notification). 4521 * 4522 * Return code 4523 * 0 - Successful (currently alway return 0) 4524 **/ 4525 static int 4526 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 4527 { 4528 struct lpfc_nodelist *ndlp = NULL; 4529 4530 /* Move all affected nodes by pending RSCNs to NPR state. */ 4531 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4532 if (!NLP_CHK_NODE_ACT(ndlp) || 4533 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 4534 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 4535 continue; 4536 lpfc_disc_state_machine(vport, ndlp, NULL, 4537 NLP_EVT_DEVICE_RECOVERY); 4538 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4539 } 4540 return 0; 4541 } 4542 4543 /** 4544 * lpfc_send_rscn_event - Send an RSCN event to management application 4545 * @vport: pointer to a host virtual N_Port data structure. 4546 * @cmdiocb: pointer to lpfc command iocb data structure. 4547 * 4548 * lpfc_send_rscn_event sends an RSCN netlink event to management 4549 * applications. 4550 */ 4551 static void 4552 lpfc_send_rscn_event(struct lpfc_vport *vport, 4553 struct lpfc_iocbq *cmdiocb) 4554 { 4555 struct lpfc_dmabuf *pcmd; 4556 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4557 uint32_t *payload_ptr; 4558 uint32_t payload_len; 4559 struct lpfc_rscn_event_header *rscn_event_data; 4560 4561 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4562 payload_ptr = (uint32_t *) pcmd->virt; 4563 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 4564 4565 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 4566 payload_len, GFP_KERNEL); 4567 if (!rscn_event_data) { 4568 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4569 "0147 Failed to allocate memory for RSCN event\n"); 4570 return; 4571 } 4572 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 4573 rscn_event_data->payload_length = payload_len; 4574 memcpy(rscn_event_data->rscn_payload, payload_ptr, 4575 payload_len); 4576 4577 fc_host_post_vendor_event(shost, 4578 fc_get_event_number(), 4579 sizeof(struct lpfc_els_event_header) + payload_len, 4580 (char *)rscn_event_data, 4581 LPFC_NL_VENDOR_ID); 4582 4583 kfree(rscn_event_data); 4584 } 4585 4586 /** 4587 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 4588 * @vport: pointer to a host virtual N_Port data structure. 4589 * @cmdiocb: pointer to lpfc command iocb data structure. 4590 * @ndlp: pointer to a node-list data structure. 4591 * 4592 * This routine processes an unsolicited RSCN (Registration State Change 4593 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 4594 * to invoke fc_host_post_event() routine to the FC transport layer. If the 4595 * discover state machine is about to begin discovery, it just accepts the 4596 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 4597 * contains N_Port IDs for other vports on this HBA, it just accepts the 4598 * RSCN and ignore processing it. If the state machine is in the recovery 4599 * state, the fc_rscn_id_list of this @vport is walked and the 4600 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 4601 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 4602 * routine is invoked to handle the RSCN event. 4603 * 4604 * Return code 4605 * 0 - Just sent the acc response 4606 * 1 - Sent the acc response and waited for name server completion 4607 **/ 4608 static int 4609 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4610 struct lpfc_nodelist *ndlp) 4611 { 4612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4613 struct lpfc_hba *phba = vport->phba; 4614 struct lpfc_dmabuf *pcmd; 4615 uint32_t *lp, *datap; 4616 IOCB_t *icmd; 4617 uint32_t payload_len, length, nportid, *cmd; 4618 int rscn_cnt; 4619 int rscn_id = 0, hba_id = 0; 4620 int i; 4621 4622 icmd = &cmdiocb->iocb; 4623 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4624 lp = (uint32_t *) pcmd->virt; 4625 4626 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4627 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4628 /* RSCN received */ 4629 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4630 "0214 RSCN received Data: x%x x%x x%x x%x\n", 4631 vport->fc_flag, payload_len, *lp, 4632 vport->fc_rscn_id_cnt); 4633 4634 /* Send an RSCN event to the management application */ 4635 lpfc_send_rscn_event(vport, cmdiocb); 4636 4637 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 4638 fc_host_post_event(shost, fc_get_event_number(), 4639 FCH_EVT_RSCN, lp[i]); 4640 4641 /* If we are about to begin discovery, just ACC the RSCN. 4642 * Discovery processing will satisfy it. 4643 */ 4644 if (vport->port_state <= LPFC_NS_QRY) { 4645 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4646 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 4647 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4648 4649 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4650 return 0; 4651 } 4652 4653 /* If this RSCN just contains NPortIDs for other vports on this HBA, 4654 * just ACC and ignore it. 4655 */ 4656 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4657 !(vport->cfg_peer_port_login)) { 4658 i = payload_len; 4659 datap = lp; 4660 while (i > 0) { 4661 nportid = *datap++; 4662 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 4663 i -= sizeof(uint32_t); 4664 rscn_id++; 4665 if (lpfc_find_vport_by_did(phba, nportid)) 4666 hba_id++; 4667 } 4668 if (rscn_id == hba_id) { 4669 /* ALL NPortIDs in RSCN are on HBA */ 4670 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4671 "0219 Ignore RSCN " 4672 "Data: x%x x%x x%x x%x\n", 4673 vport->fc_flag, payload_len, 4674 *lp, vport->fc_rscn_id_cnt); 4675 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4676 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 4677 ndlp->nlp_DID, vport->port_state, 4678 ndlp->nlp_flag); 4679 4680 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 4681 ndlp, NULL); 4682 return 0; 4683 } 4684 } 4685 4686 spin_lock_irq(shost->host_lock); 4687 if (vport->fc_rscn_flush) { 4688 /* Another thread is walking fc_rscn_id_list on this vport */ 4689 vport->fc_flag |= FC_RSCN_DISCOVERY; 4690 spin_unlock_irq(shost->host_lock); 4691 /* Send back ACC */ 4692 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4693 return 0; 4694 } 4695 /* Indicate we are walking fc_rscn_id_list on this vport */ 4696 vport->fc_rscn_flush = 1; 4697 spin_unlock_irq(shost->host_lock); 4698 /* Get the array count after successfully have the token */ 4699 rscn_cnt = vport->fc_rscn_id_cnt; 4700 /* If we are already processing an RSCN, save the received 4701 * RSCN payload buffer, cmdiocb->context2 to process later. 4702 */ 4703 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 4704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4705 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 4706 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4707 4708 spin_lock_irq(shost->host_lock); 4709 vport->fc_flag |= FC_RSCN_DEFERRED; 4710 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 4711 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 4712 vport->fc_flag |= FC_RSCN_MODE; 4713 spin_unlock_irq(shost->host_lock); 4714 if (rscn_cnt) { 4715 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 4716 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 4717 } 4718 if ((rscn_cnt) && 4719 (payload_len + length <= LPFC_BPL_SIZE)) { 4720 *cmd &= ELS_CMD_MASK; 4721 *cmd |= cpu_to_be32(payload_len + length); 4722 memcpy(((uint8_t *)cmd) + length, lp, 4723 payload_len); 4724 } else { 4725 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 4726 vport->fc_rscn_id_cnt++; 4727 /* If we zero, cmdiocb->context2, the calling 4728 * routine will not try to free it. 4729 */ 4730 cmdiocb->context2 = NULL; 4731 } 4732 /* Deferred RSCN */ 4733 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4734 "0235 Deferred RSCN " 4735 "Data: x%x x%x x%x\n", 4736 vport->fc_rscn_id_cnt, vport->fc_flag, 4737 vport->port_state); 4738 } else { 4739 vport->fc_flag |= FC_RSCN_DISCOVERY; 4740 spin_unlock_irq(shost->host_lock); 4741 /* ReDiscovery RSCN */ 4742 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4743 "0234 ReDiscovery RSCN " 4744 "Data: x%x x%x x%x\n", 4745 vport->fc_rscn_id_cnt, vport->fc_flag, 4746 vport->port_state); 4747 } 4748 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4749 vport->fc_rscn_flush = 0; 4750 /* Send back ACC */ 4751 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4752 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4753 lpfc_rscn_recovery_check(vport); 4754 spin_lock_irq(shost->host_lock); 4755 vport->fc_flag &= ~FC_RSCN_DEFERRED; 4756 spin_unlock_irq(shost->host_lock); 4757 return 0; 4758 } 4759 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4760 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 4761 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4762 4763 spin_lock_irq(shost->host_lock); 4764 vport->fc_flag |= FC_RSCN_MODE; 4765 spin_unlock_irq(shost->host_lock); 4766 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 4767 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4768 vport->fc_rscn_flush = 0; 4769 /* 4770 * If we zero, cmdiocb->context2, the calling routine will 4771 * not try to free it. 4772 */ 4773 cmdiocb->context2 = NULL; 4774 lpfc_set_disctmo(vport); 4775 /* Send back ACC */ 4776 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4777 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4778 lpfc_rscn_recovery_check(vport); 4779 return lpfc_els_handle_rscn(vport); 4780 } 4781 4782 /** 4783 * lpfc_els_handle_rscn - Handle rscn for a vport 4784 * @vport: pointer to a host virtual N_Port data structure. 4785 * 4786 * This routine handles the Registration State Configuration Notification 4787 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 4788 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 4789 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 4790 * NameServer shall be issued. If CT command to the NameServer fails to be 4791 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 4792 * RSCN activities with the @vport. 4793 * 4794 * Return code 4795 * 0 - Cleaned up rscn on the @vport 4796 * 1 - Wait for plogi to name server before proceed 4797 **/ 4798 int 4799 lpfc_els_handle_rscn(struct lpfc_vport *vport) 4800 { 4801 struct lpfc_nodelist *ndlp; 4802 struct lpfc_hba *phba = vport->phba; 4803 4804 /* Ignore RSCN if the port is being torn down. */ 4805 if (vport->load_flag & FC_UNLOADING) { 4806 lpfc_els_flush_rscn(vport); 4807 return 0; 4808 } 4809 4810 /* Start timer for RSCN processing */ 4811 lpfc_set_disctmo(vport); 4812 4813 /* RSCN processed */ 4814 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4815 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 4816 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 4817 vport->port_state); 4818 4819 /* To process RSCN, first compare RSCN data with NameServer */ 4820 vport->fc_ns_retry = 0; 4821 vport->num_disc_nodes = 0; 4822 4823 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4824 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 4825 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 4826 /* Good ndlp, issue CT Request to NameServer */ 4827 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 4828 /* Wait for NameServer query cmpl before we can 4829 continue */ 4830 return 1; 4831 } else { 4832 /* If login to NameServer does not exist, issue one */ 4833 /* Good status, issue PLOGI to NameServer */ 4834 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4835 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 4836 /* Wait for NameServer login cmpl before we can 4837 continue */ 4838 return 1; 4839 4840 if (ndlp) { 4841 ndlp = lpfc_enable_node(vport, ndlp, 4842 NLP_STE_PLOGI_ISSUE); 4843 if (!ndlp) { 4844 lpfc_els_flush_rscn(vport); 4845 return 0; 4846 } 4847 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 4848 } else { 4849 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4850 if (!ndlp) { 4851 lpfc_els_flush_rscn(vport); 4852 return 0; 4853 } 4854 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4855 ndlp->nlp_prev_state = ndlp->nlp_state; 4856 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4857 } 4858 ndlp->nlp_type |= NLP_FABRIC; 4859 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 4860 /* Wait for NameServer login cmpl before we can 4861 * continue 4862 */ 4863 return 1; 4864 } 4865 4866 lpfc_els_flush_rscn(vport); 4867 return 0; 4868 } 4869 4870 /** 4871 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 4872 * @vport: pointer to a host virtual N_Port data structure. 4873 * @cmdiocb: pointer to lpfc command iocb data structure. 4874 * @ndlp: pointer to a node-list data structure. 4875 * 4876 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 4877 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 4878 * point topology. As an unsolicited FLOGI should not be received in a loop 4879 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 4880 * lpfc_check_sparm() routine is invoked to check the parameters in the 4881 * unsolicited FLOGI. If parameters validation failed, the routine 4882 * lpfc_els_rsp_reject() shall be called with reject reason code set to 4883 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 4884 * FLOGI shall be compared with the Port WWN of the @vport to determine who 4885 * will initiate PLOGI. The higher lexicographical value party shall has 4886 * higher priority (as the winning port) and will initiate PLOGI and 4887 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 4888 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 4889 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 4890 * 4891 * Return code 4892 * 0 - Successfully processed the unsolicited flogi 4893 * 1 - Failed to process the unsolicited flogi 4894 **/ 4895 static int 4896 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4897 struct lpfc_nodelist *ndlp) 4898 { 4899 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4900 struct lpfc_hba *phba = vport->phba; 4901 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4902 uint32_t *lp = (uint32_t *) pcmd->virt; 4903 IOCB_t *icmd = &cmdiocb->iocb; 4904 struct serv_parm *sp; 4905 LPFC_MBOXQ_t *mbox; 4906 struct ls_rjt stat; 4907 uint32_t cmd, did; 4908 int rc; 4909 4910 cmd = *lp++; 4911 sp = (struct serv_parm *) lp; 4912 4913 /* FLOGI received */ 4914 4915 lpfc_set_disctmo(vport); 4916 4917 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4918 /* We should never receive a FLOGI in loop mode, ignore it */ 4919 did = icmd->un.elsreq64.remoteID; 4920 4921 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 4922 Loop Mode */ 4923 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4924 "0113 An FLOGI ELS command x%x was " 4925 "received from DID x%x in Loop Mode\n", 4926 cmd, did); 4927 return 1; 4928 } 4929 4930 did = Fabric_DID; 4931 4932 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { 4933 /* For a FLOGI we accept, then if our portname is greater 4934 * then the remote portname we initiate Nport login. 4935 */ 4936 4937 rc = memcmp(&vport->fc_portname, &sp->portName, 4938 sizeof(struct lpfc_name)); 4939 4940 if (!rc) { 4941 if (phba->sli_rev < LPFC_SLI_REV4) { 4942 mbox = mempool_alloc(phba->mbox_mem_pool, 4943 GFP_KERNEL); 4944 if (!mbox) 4945 return 1; 4946 lpfc_linkdown(phba); 4947 lpfc_init_link(phba, mbox, 4948 phba->cfg_topology, 4949 phba->cfg_link_speed); 4950 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 4951 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4952 mbox->vport = vport; 4953 rc = lpfc_sli_issue_mbox(phba, mbox, 4954 MBX_NOWAIT); 4955 lpfc_set_loopback_flag(phba); 4956 if (rc == MBX_NOT_FINISHED) 4957 mempool_free(mbox, phba->mbox_mem_pool); 4958 return 1; 4959 } else { 4960 /* abort the flogi coming back to ourselves 4961 * due to external loopback on the port. 4962 */ 4963 lpfc_els_abort_flogi(phba); 4964 return 0; 4965 } 4966 } else if (rc > 0) { /* greater than */ 4967 spin_lock_irq(shost->host_lock); 4968 vport->fc_flag |= FC_PT2PT_PLOGI; 4969 spin_unlock_irq(shost->host_lock); 4970 } 4971 spin_lock_irq(shost->host_lock); 4972 vport->fc_flag |= FC_PT2PT; 4973 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 4974 spin_unlock_irq(shost->host_lock); 4975 } else { 4976 /* Reject this request because invalid parameters */ 4977 stat.un.b.lsRjtRsvd0 = 0; 4978 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4979 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 4980 stat.un.b.vendorUnique = 0; 4981 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 4982 NULL); 4983 return 1; 4984 } 4985 4986 /* Send back ACC */ 4987 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 4988 4989 return 0; 4990 } 4991 4992 /** 4993 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 4994 * @vport: pointer to a host virtual N_Port data structure. 4995 * @cmdiocb: pointer to lpfc command iocb data structure. 4996 * @ndlp: pointer to a node-list data structure. 4997 * 4998 * This routine processes Request Node Identification Data (RNID) IOCB 4999 * received as an ELS unsolicited event. Only when the RNID specified format 5000 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 5001 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 5002 * Accept (ACC) the RNID ELS command. All the other RNID formats are 5003 * rejected by invoking the lpfc_els_rsp_reject() routine. 5004 * 5005 * Return code 5006 * 0 - Successfully processed rnid iocb (currently always return 0) 5007 **/ 5008 static int 5009 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5010 struct lpfc_nodelist *ndlp) 5011 { 5012 struct lpfc_dmabuf *pcmd; 5013 uint32_t *lp; 5014 IOCB_t *icmd; 5015 RNID *rn; 5016 struct ls_rjt stat; 5017 uint32_t cmd, did; 5018 5019 icmd = &cmdiocb->iocb; 5020 did = icmd->un.elsreq64.remoteID; 5021 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5022 lp = (uint32_t *) pcmd->virt; 5023 5024 cmd = *lp++; 5025 rn = (RNID *) lp; 5026 5027 /* RNID received */ 5028 5029 switch (rn->Format) { 5030 case 0: 5031 case RNID_TOPOLOGY_DISC: 5032 /* Send back ACC */ 5033 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 5034 break; 5035 default: 5036 /* Reject this request because format not supported */ 5037 stat.un.b.lsRjtRsvd0 = 0; 5038 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5039 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5040 stat.un.b.vendorUnique = 0; 5041 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5042 NULL); 5043 } 5044 return 0; 5045 } 5046 5047 /** 5048 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 5049 * @vport: pointer to a host virtual N_Port data structure. 5050 * @cmdiocb: pointer to lpfc command iocb data structure. 5051 * @ndlp: pointer to a node-list data structure. 5052 * 5053 * Return code 5054 * 0 - Successfully processed echo iocb (currently always return 0) 5055 **/ 5056 static int 5057 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5058 struct lpfc_nodelist *ndlp) 5059 { 5060 uint8_t *pcmd; 5061 5062 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 5063 5064 /* skip over first word of echo command to find echo data */ 5065 pcmd += sizeof(uint32_t); 5066 5067 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 5068 return 0; 5069 } 5070 5071 /** 5072 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 5073 * @vport: pointer to a host virtual N_Port data structure. 5074 * @cmdiocb: pointer to lpfc command iocb data structure. 5075 * @ndlp: pointer to a node-list data structure. 5076 * 5077 * This routine processes a Link Incident Report Registration(LIRR) IOCB 5078 * received as an ELS unsolicited event. Currently, this function just invokes 5079 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 5080 * 5081 * Return code 5082 * 0 - Successfully processed lirr iocb (currently always return 0) 5083 **/ 5084 static int 5085 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5086 struct lpfc_nodelist *ndlp) 5087 { 5088 struct ls_rjt stat; 5089 5090 /* For now, unconditionally reject this command */ 5091 stat.un.b.lsRjtRsvd0 = 0; 5092 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5093 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5094 stat.un.b.vendorUnique = 0; 5095 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5096 return 0; 5097 } 5098 5099 /** 5100 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 5101 * @vport: pointer to a host virtual N_Port data structure. 5102 * @cmdiocb: pointer to lpfc command iocb data structure. 5103 * @ndlp: pointer to a node-list data structure. 5104 * 5105 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 5106 * received as an ELS unsolicited event. A request to RRQ shall only 5107 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 5108 * Nx_Port N_Port_ID of the target Exchange is the same as the 5109 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 5110 * not accepted, an LS_RJT with reason code "Unable to perform 5111 * command request" and reason code explanation "Invalid Originator 5112 * S_ID" shall be returned. For now, we just unconditionally accept 5113 * RRQ from the target. 5114 **/ 5115 static void 5116 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5117 struct lpfc_nodelist *ndlp) 5118 { 5119 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5120 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5121 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 5122 } 5123 5124 /** 5125 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5126 * @phba: pointer to lpfc hba data structure. 5127 * @pmb: pointer to the driver internal queue element for mailbox command. 5128 * 5129 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5130 * mailbox command. This callback function is to actually send the Accept 5131 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5132 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5133 * mailbox command, constructs the RPS response with the link statistics 5134 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5135 * response to the RPS. 5136 * 5137 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5138 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5139 * will be stored into the context1 field of the IOCB for the completion 5140 * callback function to the RPS Accept Response ELS IOCB command. 5141 * 5142 **/ 5143 static void 5144 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5145 { 5146 MAILBOX_t *mb; 5147 IOCB_t *icmd; 5148 struct RLS_RSP *rls_rsp; 5149 uint8_t *pcmd; 5150 struct lpfc_iocbq *elsiocb; 5151 struct lpfc_nodelist *ndlp; 5152 uint16_t oxid; 5153 uint16_t rxid; 5154 uint32_t cmdsize; 5155 5156 mb = &pmb->u.mb; 5157 5158 ndlp = (struct lpfc_nodelist *) pmb->context2; 5159 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5160 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5161 pmb->context1 = NULL; 5162 pmb->context2 = NULL; 5163 5164 if (mb->mbxStatus) { 5165 mempool_free(pmb, phba->mbox_mem_pool); 5166 return; 5167 } 5168 5169 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 5170 mempool_free(pmb, phba->mbox_mem_pool); 5171 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5172 lpfc_max_els_tries, ndlp, 5173 ndlp->nlp_DID, ELS_CMD_ACC); 5174 5175 /* Decrement the ndlp reference count from previous mbox command */ 5176 lpfc_nlp_put(ndlp); 5177 5178 if (!elsiocb) 5179 return; 5180 5181 icmd = &elsiocb->iocb; 5182 icmd->ulpContext = rxid; 5183 icmd->unsli3.rcvsli3.ox_id = oxid; 5184 5185 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5186 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5187 pcmd += sizeof(uint32_t); /* Skip past command */ 5188 rls_rsp = (struct RLS_RSP *)pcmd; 5189 5190 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5191 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5192 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5193 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5194 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5195 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5196 5197 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5198 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5199 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 5200 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5201 elsiocb->iotag, elsiocb->iocb.ulpContext, 5202 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5203 ndlp->nlp_rpi); 5204 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5205 phba->fc_stat.elsXmitACC++; 5206 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5207 lpfc_els_free_iocb(phba, elsiocb); 5208 } 5209 5210 /** 5211 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5212 * @phba: pointer to lpfc hba data structure. 5213 * @pmb: pointer to the driver internal queue element for mailbox command. 5214 * 5215 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5216 * mailbox command. This callback function is to actually send the Accept 5217 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5218 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5219 * mailbox command, constructs the RPS response with the link statistics 5220 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5221 * response to the RPS. 5222 * 5223 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5224 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5225 * will be stored into the context1 field of the IOCB for the completion 5226 * callback function to the RPS Accept Response ELS IOCB command. 5227 * 5228 **/ 5229 static void 5230 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5231 { 5232 MAILBOX_t *mb; 5233 IOCB_t *icmd; 5234 RPS_RSP *rps_rsp; 5235 uint8_t *pcmd; 5236 struct lpfc_iocbq *elsiocb; 5237 struct lpfc_nodelist *ndlp; 5238 uint16_t status; 5239 uint16_t oxid; 5240 uint16_t rxid; 5241 uint32_t cmdsize; 5242 5243 mb = &pmb->u.mb; 5244 5245 ndlp = (struct lpfc_nodelist *) pmb->context2; 5246 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 5247 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 5248 pmb->context1 = NULL; 5249 pmb->context2 = NULL; 5250 5251 if (mb->mbxStatus) { 5252 mempool_free(pmb, phba->mbox_mem_pool); 5253 return; 5254 } 5255 5256 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 5257 mempool_free(pmb, phba->mbox_mem_pool); 5258 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5259 lpfc_max_els_tries, ndlp, 5260 ndlp->nlp_DID, ELS_CMD_ACC); 5261 5262 /* Decrement the ndlp reference count from previous mbox command */ 5263 lpfc_nlp_put(ndlp); 5264 5265 if (!elsiocb) 5266 return; 5267 5268 icmd = &elsiocb->iocb; 5269 icmd->ulpContext = rxid; 5270 icmd->unsli3.rcvsli3.ox_id = oxid; 5271 5272 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5273 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5274 pcmd += sizeof(uint32_t); /* Skip past command */ 5275 rps_rsp = (RPS_RSP *)pcmd; 5276 5277 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5278 status = 0x10; 5279 else 5280 status = 0x8; 5281 if (phba->pport->fc_flag & FC_FABRIC) 5282 status |= 0x4; 5283 5284 rps_rsp->rsvd1 = 0; 5285 rps_rsp->portStatus = cpu_to_be16(status); 5286 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5287 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5288 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5289 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5290 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5291 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5292 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 5293 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5294 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 5295 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5296 elsiocb->iotag, elsiocb->iocb.ulpContext, 5297 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5298 ndlp->nlp_rpi); 5299 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5300 phba->fc_stat.elsXmitACC++; 5301 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5302 lpfc_els_free_iocb(phba, elsiocb); 5303 return; 5304 } 5305 5306 /** 5307 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 5308 * @vport: pointer to a host virtual N_Port data structure. 5309 * @cmdiocb: pointer to lpfc command iocb data structure. 5310 * @ndlp: pointer to a node-list data structure. 5311 * 5312 * This routine processes Read Port Status (RPL) IOCB received as an 5313 * ELS unsolicited event. It first checks the remote port state. If the 5314 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5315 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5316 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5317 * for reading the HBA link statistics. It is for the callback function, 5318 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 5319 * to actually sending out RPL Accept (ACC) response. 5320 * 5321 * Return codes 5322 * 0 - Successfully processed rls iocb (currently always return 0) 5323 **/ 5324 static int 5325 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5326 struct lpfc_nodelist *ndlp) 5327 { 5328 struct lpfc_hba *phba = vport->phba; 5329 LPFC_MBOXQ_t *mbox; 5330 struct lpfc_dmabuf *pcmd; 5331 struct ls_rjt stat; 5332 5333 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5334 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5335 /* reject the unsolicited RPS request and done with it */ 5336 goto reject_out; 5337 5338 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5339 5340 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5341 if (mbox) { 5342 lpfc_read_lnk_stat(phba, mbox); 5343 mbox->context1 = (void *)((unsigned long) 5344 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5345 cmdiocb->iocb.ulpContext)); /* rx_id */ 5346 mbox->context2 = lpfc_nlp_get(ndlp); 5347 mbox->vport = vport; 5348 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5349 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5350 != MBX_NOT_FINISHED) 5351 /* Mbox completion will send ELS Response */ 5352 return 0; 5353 /* Decrement reference count used for the failed mbox 5354 * command. 5355 */ 5356 lpfc_nlp_put(ndlp); 5357 mempool_free(mbox, phba->mbox_mem_pool); 5358 } 5359 reject_out: 5360 /* issue rejection response */ 5361 stat.un.b.lsRjtRsvd0 = 0; 5362 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5363 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5364 stat.un.b.vendorUnique = 0; 5365 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5366 return 0; 5367 } 5368 5369 /** 5370 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 5371 * @vport: pointer to a host virtual N_Port data structure. 5372 * @cmdiocb: pointer to lpfc command iocb data structure. 5373 * @ndlp: pointer to a node-list data structure. 5374 * 5375 * This routine processes Read Timout Value (RTV) IOCB received as an 5376 * ELS unsolicited event. It first checks the remote port state. If the 5377 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5378 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5379 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 5380 * Value (RTV) unsolicited IOCB event. 5381 * 5382 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5383 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5384 * will be stored into the context1 field of the IOCB for the completion 5385 * callback function to the RPS Accept Response ELS IOCB command. 5386 * 5387 * Return codes 5388 * 0 - Successfully processed rtv iocb (currently always return 0) 5389 **/ 5390 static int 5391 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5392 struct lpfc_nodelist *ndlp) 5393 { 5394 struct lpfc_hba *phba = vport->phba; 5395 struct ls_rjt stat; 5396 struct RTV_RSP *rtv_rsp; 5397 uint8_t *pcmd; 5398 struct lpfc_iocbq *elsiocb; 5399 uint32_t cmdsize; 5400 5401 5402 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5403 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5404 /* reject the unsolicited RPS request and done with it */ 5405 goto reject_out; 5406 5407 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 5408 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5409 lpfc_max_els_tries, ndlp, 5410 ndlp->nlp_DID, ELS_CMD_ACC); 5411 5412 if (!elsiocb) 5413 return 1; 5414 5415 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5416 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5417 pcmd += sizeof(uint32_t); /* Skip past command */ 5418 5419 /* use the command's xri in the response */ 5420 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 5421 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 5422 5423 rtv_rsp = (struct RTV_RSP *)pcmd; 5424 5425 /* populate RTV payload */ 5426 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 5427 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 5428 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 5429 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 5430 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 5431 5432 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5433 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5434 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 5435 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 5436 "Data: x%x x%x x%x\n", 5437 elsiocb->iotag, elsiocb->iocb.ulpContext, 5438 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5439 ndlp->nlp_rpi, 5440 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 5441 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5442 phba->fc_stat.elsXmitACC++; 5443 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5444 lpfc_els_free_iocb(phba, elsiocb); 5445 return 0; 5446 5447 reject_out: 5448 /* issue rejection response */ 5449 stat.un.b.lsRjtRsvd0 = 0; 5450 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5451 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5452 stat.un.b.vendorUnique = 0; 5453 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5454 return 0; 5455 } 5456 5457 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 5458 * @vport: pointer to a host virtual N_Port data structure. 5459 * @cmdiocb: pointer to lpfc command iocb data structure. 5460 * @ndlp: pointer to a node-list data structure. 5461 * 5462 * This routine processes Read Port Status (RPS) IOCB received as an 5463 * ELS unsolicited event. It first checks the remote port state. If the 5464 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5465 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 5466 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5467 * for reading the HBA link statistics. It is for the callback function, 5468 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 5469 * to actually sending out RPS Accept (ACC) response. 5470 * 5471 * Return codes 5472 * 0 - Successfully processed rps iocb (currently always return 0) 5473 **/ 5474 static int 5475 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5476 struct lpfc_nodelist *ndlp) 5477 { 5478 struct lpfc_hba *phba = vport->phba; 5479 uint32_t *lp; 5480 uint8_t flag; 5481 LPFC_MBOXQ_t *mbox; 5482 struct lpfc_dmabuf *pcmd; 5483 RPS *rps; 5484 struct ls_rjt stat; 5485 5486 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5487 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5488 /* reject the unsolicited RPS request and done with it */ 5489 goto reject_out; 5490 5491 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5492 lp = (uint32_t *) pcmd->virt; 5493 flag = (be32_to_cpu(*lp++) & 0xf); 5494 rps = (RPS *) lp; 5495 5496 if ((flag == 0) || 5497 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 5498 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 5499 sizeof(struct lpfc_name)) == 0))) { 5500 5501 printk("Fix me....\n"); 5502 dump_stack(); 5503 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5504 if (mbox) { 5505 lpfc_read_lnk_stat(phba, mbox); 5506 mbox->context1 = (void *)((unsigned long) 5507 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 5508 cmdiocb->iocb.ulpContext)); /* rx_id */ 5509 mbox->context2 = lpfc_nlp_get(ndlp); 5510 mbox->vport = vport; 5511 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5512 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5513 != MBX_NOT_FINISHED) 5514 /* Mbox completion will send ELS Response */ 5515 return 0; 5516 /* Decrement reference count used for the failed mbox 5517 * command. 5518 */ 5519 lpfc_nlp_put(ndlp); 5520 mempool_free(mbox, phba->mbox_mem_pool); 5521 } 5522 } 5523 5524 reject_out: 5525 /* issue rejection response */ 5526 stat.un.b.lsRjtRsvd0 = 0; 5527 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5528 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5529 stat.un.b.vendorUnique = 0; 5530 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5531 return 0; 5532 } 5533 5534 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 5535 * @vport: pointer to a host virtual N_Port data structure. 5536 * @ndlp: pointer to a node-list data structure. 5537 * @did: DID of the target. 5538 * @rrq: Pointer to the rrq struct. 5539 * 5540 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 5541 * Successful the the completion handler will clear the RRQ. 5542 * 5543 * Return codes 5544 * 0 - Successfully sent rrq els iocb. 5545 * 1 - Failed to send rrq els iocb. 5546 **/ 5547 static int 5548 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5549 uint32_t did, struct lpfc_node_rrq *rrq) 5550 { 5551 struct lpfc_hba *phba = vport->phba; 5552 struct RRQ *els_rrq; 5553 IOCB_t *icmd; 5554 struct lpfc_iocbq *elsiocb; 5555 uint8_t *pcmd; 5556 uint16_t cmdsize; 5557 int ret; 5558 5559 5560 if (ndlp != rrq->ndlp) 5561 ndlp = rrq->ndlp; 5562 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 5563 return 1; 5564 5565 /* If ndlp is not NULL, we will bump the reference count on it */ 5566 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 5567 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 5568 ELS_CMD_RRQ); 5569 if (!elsiocb) 5570 return 1; 5571 5572 icmd = &elsiocb->iocb; 5573 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5574 5575 /* For RRQ request, remainder of payload is Exchange IDs */ 5576 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 5577 pcmd += sizeof(uint32_t); 5578 els_rrq = (struct RRQ *) pcmd; 5579 5580 bf_set(rrq_oxid, els_rrq, rrq->xritag); 5581 bf_set(rrq_rxid, els_rrq, rrq->rxid); 5582 bf_set(rrq_did, els_rrq, vport->fc_myDID); 5583 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 5584 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 5585 5586 5587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5588 "Issue RRQ: did:x%x", 5589 did, rrq->xritag, rrq->rxid); 5590 elsiocb->context_un.rrq = rrq; 5591 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 5592 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5593 5594 if (ret == IOCB_ERROR) { 5595 lpfc_els_free_iocb(phba, elsiocb); 5596 return 1; 5597 } 5598 return 0; 5599 } 5600 5601 /** 5602 * lpfc_send_rrq - Sends ELS RRQ if needed. 5603 * @phba: pointer to lpfc hba data structure. 5604 * @rrq: pointer to the active rrq. 5605 * 5606 * This routine will call the lpfc_issue_els_rrq if the rrq is 5607 * still active for the xri. If this function returns a failure then 5608 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 5609 * 5610 * Returns 0 Success. 5611 * 1 Failure. 5612 **/ 5613 int 5614 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 5615 { 5616 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 5617 rrq->nlp_DID); 5618 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 5619 return lpfc_issue_els_rrq(rrq->vport, ndlp, 5620 rrq->nlp_DID, rrq); 5621 else 5622 return 1; 5623 } 5624 5625 /** 5626 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 5627 * @vport: pointer to a host virtual N_Port data structure. 5628 * @cmdsize: size of the ELS command. 5629 * @oldiocb: pointer to the original lpfc command iocb data structure. 5630 * @ndlp: pointer to a node-list data structure. 5631 * 5632 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 5633 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 5634 * 5635 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5636 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5637 * will be stored into the context1 field of the IOCB for the completion 5638 * callback function to the RPL Accept Response ELS command. 5639 * 5640 * Return code 5641 * 0 - Successfully issued ACC RPL ELS command 5642 * 1 - Failed to issue ACC RPL ELS command 5643 **/ 5644 static int 5645 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 5646 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5647 { 5648 struct lpfc_hba *phba = vport->phba; 5649 IOCB_t *icmd, *oldcmd; 5650 RPL_RSP rpl_rsp; 5651 struct lpfc_iocbq *elsiocb; 5652 uint8_t *pcmd; 5653 5654 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5655 ndlp->nlp_DID, ELS_CMD_ACC); 5656 5657 if (!elsiocb) 5658 return 1; 5659 5660 icmd = &elsiocb->iocb; 5661 oldcmd = &oldiocb->iocb; 5662 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5663 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5664 5665 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5666 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5667 pcmd += sizeof(uint16_t); 5668 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 5669 pcmd += sizeof(uint16_t); 5670 5671 /* Setup the RPL ACC payload */ 5672 rpl_rsp.listLen = be32_to_cpu(1); 5673 rpl_rsp.index = 0; 5674 rpl_rsp.port_num_blk.portNum = 0; 5675 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 5676 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 5677 sizeof(struct lpfc_name)); 5678 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 5679 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 5680 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5681 "0120 Xmit ELS RPL ACC response tag x%x " 5682 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5683 "rpi x%x\n", 5684 elsiocb->iotag, elsiocb->iocb.ulpContext, 5685 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5686 ndlp->nlp_rpi); 5687 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5688 phba->fc_stat.elsXmitACC++; 5689 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 5690 IOCB_ERROR) { 5691 lpfc_els_free_iocb(phba, elsiocb); 5692 return 1; 5693 } 5694 return 0; 5695 } 5696 5697 /** 5698 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 5699 * @vport: pointer to a host virtual N_Port data structure. 5700 * @cmdiocb: pointer to lpfc command iocb data structure. 5701 * @ndlp: pointer to a node-list data structure. 5702 * 5703 * This routine processes Read Port List (RPL) IOCB received as an ELS 5704 * unsolicited event. It first checks the remote port state. If the remote 5705 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 5706 * invokes the lpfc_els_rsp_reject() routine to send reject response. 5707 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 5708 * to accept the RPL. 5709 * 5710 * Return code 5711 * 0 - Successfully processed rpl iocb (currently always return 0) 5712 **/ 5713 static int 5714 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5715 struct lpfc_nodelist *ndlp) 5716 { 5717 struct lpfc_dmabuf *pcmd; 5718 uint32_t *lp; 5719 uint32_t maxsize; 5720 uint16_t cmdsize; 5721 RPL *rpl; 5722 struct ls_rjt stat; 5723 5724 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5725 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 5726 /* issue rejection response */ 5727 stat.un.b.lsRjtRsvd0 = 0; 5728 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5729 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5730 stat.un.b.vendorUnique = 0; 5731 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5732 NULL); 5733 /* rejected the unsolicited RPL request and done with it */ 5734 return 0; 5735 } 5736 5737 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5738 lp = (uint32_t *) pcmd->virt; 5739 rpl = (RPL *) (lp + 1); 5740 maxsize = be32_to_cpu(rpl->maxsize); 5741 5742 /* We support only one port */ 5743 if ((rpl->index == 0) && 5744 ((maxsize == 0) || 5745 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 5746 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 5747 } else { 5748 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 5749 } 5750 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 5751 5752 return 0; 5753 } 5754 5755 /** 5756 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 5757 * @vport: pointer to a virtual N_Port data structure. 5758 * @cmdiocb: pointer to lpfc command iocb data structure. 5759 * @ndlp: pointer to a node-list data structure. 5760 * 5761 * This routine processes Fibre Channel Address Resolution Protocol 5762 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 5763 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 5764 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 5765 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 5766 * remote PortName is compared against the FC PortName stored in the @vport 5767 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 5768 * compared against the FC NodeName stored in the @vport data structure. 5769 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 5770 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 5771 * invoked to send out FARP Response to the remote node. Before sending the 5772 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 5773 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 5774 * routine is invoked to log into the remote port first. 5775 * 5776 * Return code 5777 * 0 - Either the FARP Match Mode not supported or successfully processed 5778 **/ 5779 static int 5780 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5781 struct lpfc_nodelist *ndlp) 5782 { 5783 struct lpfc_dmabuf *pcmd; 5784 uint32_t *lp; 5785 IOCB_t *icmd; 5786 FARP *fp; 5787 uint32_t cmd, cnt, did; 5788 5789 icmd = &cmdiocb->iocb; 5790 did = icmd->un.elsreq64.remoteID; 5791 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5792 lp = (uint32_t *) pcmd->virt; 5793 5794 cmd = *lp++; 5795 fp = (FARP *) lp; 5796 /* FARP-REQ received from DID <did> */ 5797 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5798 "0601 FARP-REQ received from DID x%x\n", did); 5799 /* We will only support match on WWPN or WWNN */ 5800 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 5801 return 0; 5802 } 5803 5804 cnt = 0; 5805 /* If this FARP command is searching for my portname */ 5806 if (fp->Mflags & FARP_MATCH_PORT) { 5807 if (memcmp(&fp->RportName, &vport->fc_portname, 5808 sizeof(struct lpfc_name)) == 0) 5809 cnt = 1; 5810 } 5811 5812 /* If this FARP command is searching for my nodename */ 5813 if (fp->Mflags & FARP_MATCH_NODE) { 5814 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 5815 sizeof(struct lpfc_name)) == 0) 5816 cnt = 1; 5817 } 5818 5819 if (cnt) { 5820 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 5821 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 5822 /* Log back into the node before sending the FARP. */ 5823 if (fp->Rflags & FARP_REQUEST_PLOGI) { 5824 ndlp->nlp_prev_state = ndlp->nlp_state; 5825 lpfc_nlp_set_state(vport, ndlp, 5826 NLP_STE_PLOGI_ISSUE); 5827 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5828 } 5829 5830 /* Send a FARP response to that node */ 5831 if (fp->Rflags & FARP_REQUEST_FARPR) 5832 lpfc_issue_els_farpr(vport, did, 0); 5833 } 5834 } 5835 return 0; 5836 } 5837 5838 /** 5839 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 5840 * @vport: pointer to a host virtual N_Port data structure. 5841 * @cmdiocb: pointer to lpfc command iocb data structure. 5842 * @ndlp: pointer to a node-list data structure. 5843 * 5844 * This routine processes Fibre Channel Address Resolution Protocol 5845 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 5846 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 5847 * the FARP response request. 5848 * 5849 * Return code 5850 * 0 - Successfully processed FARPR IOCB (currently always return 0) 5851 **/ 5852 static int 5853 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5854 struct lpfc_nodelist *ndlp) 5855 { 5856 struct lpfc_dmabuf *pcmd; 5857 uint32_t *lp; 5858 IOCB_t *icmd; 5859 uint32_t cmd, did; 5860 5861 icmd = &cmdiocb->iocb; 5862 did = icmd->un.elsreq64.remoteID; 5863 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5864 lp = (uint32_t *) pcmd->virt; 5865 5866 cmd = *lp++; 5867 /* FARP-RSP received from DID <did> */ 5868 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5869 "0600 FARP-RSP received from DID x%x\n", did); 5870 /* ACCEPT the Farp resp request */ 5871 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5872 5873 return 0; 5874 } 5875 5876 /** 5877 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 5878 * @vport: pointer to a host virtual N_Port data structure. 5879 * @cmdiocb: pointer to lpfc command iocb data structure. 5880 * @fan_ndlp: pointer to a node-list data structure. 5881 * 5882 * This routine processes a Fabric Address Notification (FAN) IOCB 5883 * command received as an ELS unsolicited event. The FAN ELS command will 5884 * only be processed on a physical port (i.e., the @vport represents the 5885 * physical port). The fabric NodeName and PortName from the FAN IOCB are 5886 * compared against those in the phba data structure. If any of those is 5887 * different, the lpfc_initial_flogi() routine is invoked to initialize 5888 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 5889 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 5890 * is invoked to register login to the fabric. 5891 * 5892 * Return code 5893 * 0 - Successfully processed fan iocb (currently always return 0). 5894 **/ 5895 static int 5896 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5897 struct lpfc_nodelist *fan_ndlp) 5898 { 5899 struct lpfc_hba *phba = vport->phba; 5900 uint32_t *lp; 5901 FAN *fp; 5902 5903 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 5904 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 5905 fp = (FAN *) ++lp; 5906 /* FAN received; Fan does not have a reply sequence */ 5907 if ((vport == phba->pport) && 5908 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 5909 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 5910 sizeof(struct lpfc_name))) || 5911 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 5912 sizeof(struct lpfc_name)))) { 5913 /* This port has switched fabrics. FLOGI is required */ 5914 lpfc_issue_init_vfi(vport); 5915 } else { 5916 /* FAN verified - skip FLOGI */ 5917 vport->fc_myDID = vport->fc_prevDID; 5918 if (phba->sli_rev < LPFC_SLI_REV4) 5919 lpfc_issue_fabric_reglogin(vport); 5920 else { 5921 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5922 "3138 Need register VFI: (x%x/%x)\n", 5923 vport->fc_prevDID, vport->fc_myDID); 5924 lpfc_issue_reg_vfi(vport); 5925 } 5926 } 5927 } 5928 return 0; 5929 } 5930 5931 /** 5932 * lpfc_els_timeout - Handler funciton to the els timer 5933 * @ptr: holder for the timer function associated data. 5934 * 5935 * This routine is invoked by the ELS timer after timeout. It posts the ELS 5936 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 5937 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 5938 * up the worker thread. It is for the worker thread to invoke the routine 5939 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 5940 **/ 5941 void 5942 lpfc_els_timeout(unsigned long ptr) 5943 { 5944 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 5945 struct lpfc_hba *phba = vport->phba; 5946 uint32_t tmo_posted; 5947 unsigned long iflag; 5948 5949 spin_lock_irqsave(&vport->work_port_lock, iflag); 5950 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 5951 if (!tmo_posted) 5952 vport->work_port_events |= WORKER_ELS_TMO; 5953 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 5954 5955 if (!tmo_posted) 5956 lpfc_worker_wake_up(phba); 5957 return; 5958 } 5959 5960 5961 /** 5962 * lpfc_els_timeout_handler - Process an els timeout event 5963 * @vport: pointer to a virtual N_Port data structure. 5964 * 5965 * This routine is the actual handler function that processes an ELS timeout 5966 * event. It walks the ELS ring to get and abort all the IOCBs (except the 5967 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 5968 * invoking the lpfc_sli_issue_abort_iotag() routine. 5969 **/ 5970 void 5971 lpfc_els_timeout_handler(struct lpfc_vport *vport) 5972 { 5973 struct lpfc_hba *phba = vport->phba; 5974 struct lpfc_sli_ring *pring; 5975 struct lpfc_iocbq *tmp_iocb, *piocb; 5976 IOCB_t *cmd = NULL; 5977 struct lpfc_dmabuf *pcmd; 5978 uint32_t els_command = 0; 5979 uint32_t timeout; 5980 uint32_t remote_ID = 0xffffffff; 5981 LIST_HEAD(txcmplq_completions); 5982 LIST_HEAD(abort_list); 5983 5984 5985 timeout = (uint32_t)(phba->fc_ratov << 1); 5986 5987 pring = &phba->sli.ring[LPFC_ELS_RING]; 5988 5989 spin_lock_irq(&phba->hbalock); 5990 list_splice_init(&pring->txcmplq, &txcmplq_completions); 5991 spin_unlock_irq(&phba->hbalock); 5992 5993 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { 5994 cmd = &piocb->iocb; 5995 5996 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 5997 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 5998 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 5999 continue; 6000 6001 if (piocb->vport != vport) 6002 continue; 6003 6004 pcmd = (struct lpfc_dmabuf *) piocb->context2; 6005 if (pcmd) 6006 els_command = *(uint32_t *) (pcmd->virt); 6007 6008 if (els_command == ELS_CMD_FARP || 6009 els_command == ELS_CMD_FARPR || 6010 els_command == ELS_CMD_FDISC) 6011 continue; 6012 6013 if (piocb->drvrTimeout > 0) { 6014 if (piocb->drvrTimeout >= timeout) 6015 piocb->drvrTimeout -= timeout; 6016 else 6017 piocb->drvrTimeout = 0; 6018 continue; 6019 } 6020 6021 remote_ID = 0xffffffff; 6022 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 6023 remote_ID = cmd->un.elsreq64.remoteID; 6024 else { 6025 struct lpfc_nodelist *ndlp; 6026 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 6027 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 6028 remote_ID = ndlp->nlp_DID; 6029 } 6030 list_add_tail(&piocb->dlist, &abort_list); 6031 } 6032 spin_lock_irq(&phba->hbalock); 6033 list_splice(&txcmplq_completions, &pring->txcmplq); 6034 spin_unlock_irq(&phba->hbalock); 6035 6036 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 6037 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6038 "0127 ELS timeout Data: x%x x%x x%x " 6039 "x%x\n", els_command, 6040 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 6041 spin_lock_irq(&phba->hbalock); 6042 list_del_init(&piocb->dlist); 6043 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6044 spin_unlock_irq(&phba->hbalock); 6045 } 6046 6047 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 6048 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6049 } 6050 6051 /** 6052 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 6053 * @vport: pointer to a host virtual N_Port data structure. 6054 * 6055 * This routine is used to clean up all the outstanding ELS commands on a 6056 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 6057 * routine. After that, it walks the ELS transmit queue to remove all the 6058 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 6059 * the IOCBs with a non-NULL completion callback function, the callback 6060 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6061 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 6062 * callback function, the IOCB will simply be released. Finally, it walks 6063 * the ELS transmit completion queue to issue an abort IOCB to any transmit 6064 * completion queue IOCB that is associated with the @vport and is not 6065 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 6066 * part of the discovery state machine) out to HBA by invoking the 6067 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 6068 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 6069 * the IOCBs are aborted when this function returns. 6070 **/ 6071 void 6072 lpfc_els_flush_cmd(struct lpfc_vport *vport) 6073 { 6074 LIST_HEAD(completions); 6075 struct lpfc_hba *phba = vport->phba; 6076 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6077 struct lpfc_iocbq *tmp_iocb, *piocb; 6078 IOCB_t *cmd = NULL; 6079 6080 lpfc_fabric_abort_vport(vport); 6081 6082 spin_lock_irq(&phba->hbalock); 6083 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6084 cmd = &piocb->iocb; 6085 6086 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6087 continue; 6088 } 6089 6090 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6091 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6092 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6093 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6094 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6095 continue; 6096 6097 if (piocb->vport != vport) 6098 continue; 6099 6100 list_move_tail(&piocb->list, &completions); 6101 pring->txq_cnt--; 6102 } 6103 6104 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6105 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 6106 continue; 6107 } 6108 6109 if (piocb->vport != vport) 6110 continue; 6111 6112 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6113 } 6114 spin_unlock_irq(&phba->hbalock); 6115 6116 /* Cancell all the IOCBs from the completions list */ 6117 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6118 IOERR_SLI_ABORTED); 6119 6120 return; 6121 } 6122 6123 /** 6124 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 6125 * @phba: pointer to lpfc hba data structure. 6126 * 6127 * This routine is used to clean up all the outstanding ELS commands on a 6128 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 6129 * routine. After that, it walks the ELS transmit queue to remove all the 6130 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 6131 * the IOCBs with the completion callback function associated, the callback 6132 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6133 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 6134 * callback function associated, the IOCB will simply be released. Finally, 6135 * it walks the ELS transmit completion queue to issue an abort IOCB to any 6136 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 6137 * management plane IOCBs that are not part of the discovery state machine) 6138 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 6139 **/ 6140 void 6141 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 6142 { 6143 LIST_HEAD(completions); 6144 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6145 struct lpfc_iocbq *tmp_iocb, *piocb; 6146 IOCB_t *cmd = NULL; 6147 6148 lpfc_fabric_abort_hba(phba); 6149 spin_lock_irq(&phba->hbalock); 6150 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6151 cmd = &piocb->iocb; 6152 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6153 continue; 6154 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6155 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6156 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6157 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6158 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6159 continue; 6160 list_move_tail(&piocb->list, &completions); 6161 pring->txq_cnt--; 6162 } 6163 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6164 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6165 continue; 6166 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6167 } 6168 spin_unlock_irq(&phba->hbalock); 6169 6170 /* Cancel all the IOCBs from the completions list */ 6171 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6172 IOERR_SLI_ABORTED); 6173 6174 return; 6175 } 6176 6177 /** 6178 * lpfc_send_els_failure_event - Posts an ELS command failure event 6179 * @phba: Pointer to hba context object. 6180 * @cmdiocbp: Pointer to command iocb which reported error. 6181 * @rspiocbp: Pointer to response iocb which reported error. 6182 * 6183 * This function sends an event when there is an ELS command 6184 * failure. 6185 **/ 6186 void 6187 lpfc_send_els_failure_event(struct lpfc_hba *phba, 6188 struct lpfc_iocbq *cmdiocbp, 6189 struct lpfc_iocbq *rspiocbp) 6190 { 6191 struct lpfc_vport *vport = cmdiocbp->vport; 6192 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6193 struct lpfc_lsrjt_event lsrjt_event; 6194 struct lpfc_fabric_event_header fabric_event; 6195 struct ls_rjt stat; 6196 struct lpfc_nodelist *ndlp; 6197 uint32_t *pcmd; 6198 6199 ndlp = cmdiocbp->context1; 6200 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 6201 return; 6202 6203 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 6204 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 6205 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 6206 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 6207 sizeof(struct lpfc_name)); 6208 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 6209 sizeof(struct lpfc_name)); 6210 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 6211 cmdiocbp->context2)->virt); 6212 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 6213 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 6214 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 6215 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 6216 fc_host_post_vendor_event(shost, 6217 fc_get_event_number(), 6218 sizeof(lsrjt_event), 6219 (char *)&lsrjt_event, 6220 LPFC_NL_VENDOR_ID); 6221 return; 6222 } 6223 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 6224 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 6225 fabric_event.event_type = FC_REG_FABRIC_EVENT; 6226 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 6227 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 6228 else 6229 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 6230 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 6231 sizeof(struct lpfc_name)); 6232 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 6233 sizeof(struct lpfc_name)); 6234 fc_host_post_vendor_event(shost, 6235 fc_get_event_number(), 6236 sizeof(fabric_event), 6237 (char *)&fabric_event, 6238 LPFC_NL_VENDOR_ID); 6239 return; 6240 } 6241 6242 } 6243 6244 /** 6245 * lpfc_send_els_event - Posts unsolicited els event 6246 * @vport: Pointer to vport object. 6247 * @ndlp: Pointer FC node object. 6248 * @cmd: ELS command code. 6249 * 6250 * This function posts an event when there is an incoming 6251 * unsolicited ELS command. 6252 **/ 6253 static void 6254 lpfc_send_els_event(struct lpfc_vport *vport, 6255 struct lpfc_nodelist *ndlp, 6256 uint32_t *payload) 6257 { 6258 struct lpfc_els_event_header *els_data = NULL; 6259 struct lpfc_logo_event *logo_data = NULL; 6260 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6261 6262 if (*payload == ELS_CMD_LOGO) { 6263 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 6264 if (!logo_data) { 6265 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6266 "0148 Failed to allocate memory " 6267 "for LOGO event\n"); 6268 return; 6269 } 6270 els_data = &logo_data->header; 6271 } else { 6272 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 6273 GFP_KERNEL); 6274 if (!els_data) { 6275 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6276 "0149 Failed to allocate memory " 6277 "for ELS event\n"); 6278 return; 6279 } 6280 } 6281 els_data->event_type = FC_REG_ELS_EVENT; 6282 switch (*payload) { 6283 case ELS_CMD_PLOGI: 6284 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 6285 break; 6286 case ELS_CMD_PRLO: 6287 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 6288 break; 6289 case ELS_CMD_ADISC: 6290 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 6291 break; 6292 case ELS_CMD_LOGO: 6293 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 6294 /* Copy the WWPN in the LOGO payload */ 6295 memcpy(logo_data->logo_wwpn, &payload[2], 6296 sizeof(struct lpfc_name)); 6297 break; 6298 default: 6299 kfree(els_data); 6300 return; 6301 } 6302 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 6303 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 6304 if (*payload == ELS_CMD_LOGO) { 6305 fc_host_post_vendor_event(shost, 6306 fc_get_event_number(), 6307 sizeof(struct lpfc_logo_event), 6308 (char *)logo_data, 6309 LPFC_NL_VENDOR_ID); 6310 kfree(logo_data); 6311 } else { 6312 fc_host_post_vendor_event(shost, 6313 fc_get_event_number(), 6314 sizeof(struct lpfc_els_event_header), 6315 (char *)els_data, 6316 LPFC_NL_VENDOR_ID); 6317 kfree(els_data); 6318 } 6319 6320 return; 6321 } 6322 6323 6324 /** 6325 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 6326 * @phba: pointer to lpfc hba data structure. 6327 * @pring: pointer to a SLI ring. 6328 * @vport: pointer to a host virtual N_Port data structure. 6329 * @elsiocb: pointer to lpfc els command iocb data structure. 6330 * 6331 * This routine is used for processing the IOCB associated with a unsolicited 6332 * event. It first determines whether there is an existing ndlp that matches 6333 * the DID from the unsolicited IOCB. If not, it will create a new one with 6334 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 6335 * IOCB is then used to invoke the proper routine and to set up proper state 6336 * of the discovery state machine. 6337 **/ 6338 static void 6339 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6340 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 6341 { 6342 struct Scsi_Host *shost; 6343 struct lpfc_nodelist *ndlp; 6344 struct ls_rjt stat; 6345 uint32_t *payload; 6346 uint32_t cmd, did, newnode, rjt_err = 0; 6347 IOCB_t *icmd = &elsiocb->iocb; 6348 6349 if (!vport || !(elsiocb->context2)) 6350 goto dropit; 6351 6352 newnode = 0; 6353 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 6354 cmd = *payload; 6355 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 6356 lpfc_post_buffer(phba, pring, 1); 6357 6358 did = icmd->un.rcvels.remoteID; 6359 if (icmd->ulpStatus) { 6360 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6361 "RCV Unsol ELS: status:x%x/x%x did:x%x", 6362 icmd->ulpStatus, icmd->un.ulpWord[4], did); 6363 goto dropit; 6364 } 6365 6366 /* Check to see if link went down during discovery */ 6367 if (lpfc_els_chk_latt(vport)) 6368 goto dropit; 6369 6370 /* Ignore traffic received during vport shutdown. */ 6371 if (vport->load_flag & FC_UNLOADING) 6372 goto dropit; 6373 6374 /* If NPort discovery is delayed drop incoming ELS */ 6375 if ((vport->fc_flag & FC_DISC_DELAYED) && 6376 (cmd != ELS_CMD_PLOGI)) 6377 goto dropit; 6378 6379 ndlp = lpfc_findnode_did(vport, did); 6380 if (!ndlp) { 6381 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6382 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6383 if (!ndlp) 6384 goto dropit; 6385 6386 lpfc_nlp_init(vport, ndlp, did); 6387 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6388 newnode = 1; 6389 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6390 ndlp->nlp_type |= NLP_FABRIC; 6391 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6392 ndlp = lpfc_enable_node(vport, ndlp, 6393 NLP_STE_UNUSED_NODE); 6394 if (!ndlp) 6395 goto dropit; 6396 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6397 newnode = 1; 6398 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6399 ndlp->nlp_type |= NLP_FABRIC; 6400 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 6401 /* This is similar to the new node path */ 6402 ndlp = lpfc_nlp_get(ndlp); 6403 if (!ndlp) 6404 goto dropit; 6405 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6406 newnode = 1; 6407 } 6408 6409 phba->fc_stat.elsRcvFrame++; 6410 6411 elsiocb->context1 = lpfc_nlp_get(ndlp); 6412 elsiocb->vport = vport; 6413 6414 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 6415 cmd &= ELS_CMD_MASK; 6416 } 6417 /* ELS command <elsCmd> received from NPORT <did> */ 6418 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6419 "0112 ELS command x%x received from NPORT x%x " 6420 "Data: x%x\n", cmd, did, vport->port_state); 6421 switch (cmd) { 6422 case ELS_CMD_PLOGI: 6423 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6424 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 6425 did, vport->port_state, ndlp->nlp_flag); 6426 6427 phba->fc_stat.elsRcvPLOGI++; 6428 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6429 6430 lpfc_send_els_event(vport, ndlp, payload); 6431 6432 /* If Nport discovery is delayed, reject PLOGIs */ 6433 if (vport->fc_flag & FC_DISC_DELAYED) { 6434 rjt_err = LSRJT_UNABLE_TPC; 6435 break; 6436 } 6437 if (vport->port_state < LPFC_DISC_AUTH) { 6438 if (!(phba->pport->fc_flag & FC_PT2PT) || 6439 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6440 rjt_err = LSRJT_UNABLE_TPC; 6441 break; 6442 } 6443 /* We get here, and drop thru, if we are PT2PT with 6444 * another NPort and the other side has initiated 6445 * the PLOGI before responding to our FLOGI. 6446 */ 6447 } 6448 6449 shost = lpfc_shost_from_vport(vport); 6450 spin_lock_irq(shost->host_lock); 6451 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6452 spin_unlock_irq(shost->host_lock); 6453 6454 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6455 NLP_EVT_RCV_PLOGI); 6456 6457 break; 6458 case ELS_CMD_FLOGI: 6459 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6460 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 6461 did, vport->port_state, ndlp->nlp_flag); 6462 6463 phba->fc_stat.elsRcvFLOGI++; 6464 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 6465 if (newnode) 6466 lpfc_nlp_put(ndlp); 6467 break; 6468 case ELS_CMD_LOGO: 6469 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6470 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 6471 did, vport->port_state, ndlp->nlp_flag); 6472 6473 phba->fc_stat.elsRcvLOGO++; 6474 lpfc_send_els_event(vport, ndlp, payload); 6475 if (vport->port_state < LPFC_DISC_AUTH) { 6476 rjt_err = LSRJT_UNABLE_TPC; 6477 break; 6478 } 6479 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6480 break; 6481 case ELS_CMD_PRLO: 6482 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6483 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 6484 did, vport->port_state, ndlp->nlp_flag); 6485 6486 phba->fc_stat.elsRcvPRLO++; 6487 lpfc_send_els_event(vport, ndlp, payload); 6488 if (vport->port_state < LPFC_DISC_AUTH) { 6489 rjt_err = LSRJT_UNABLE_TPC; 6490 break; 6491 } 6492 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6493 break; 6494 case ELS_CMD_RSCN: 6495 phba->fc_stat.elsRcvRSCN++; 6496 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 6497 if (newnode) 6498 lpfc_nlp_put(ndlp); 6499 break; 6500 case ELS_CMD_ADISC: 6501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6502 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 6503 did, vport->port_state, ndlp->nlp_flag); 6504 6505 lpfc_send_els_event(vport, ndlp, payload); 6506 phba->fc_stat.elsRcvADISC++; 6507 if (vport->port_state < LPFC_DISC_AUTH) { 6508 rjt_err = LSRJT_UNABLE_TPC; 6509 break; 6510 } 6511 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6512 NLP_EVT_RCV_ADISC); 6513 break; 6514 case ELS_CMD_PDISC: 6515 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6516 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 6517 did, vport->port_state, ndlp->nlp_flag); 6518 6519 phba->fc_stat.elsRcvPDISC++; 6520 if (vport->port_state < LPFC_DISC_AUTH) { 6521 rjt_err = LSRJT_UNABLE_TPC; 6522 break; 6523 } 6524 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6525 NLP_EVT_RCV_PDISC); 6526 break; 6527 case ELS_CMD_FARPR: 6528 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6529 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 6530 did, vport->port_state, ndlp->nlp_flag); 6531 6532 phba->fc_stat.elsRcvFARPR++; 6533 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 6534 break; 6535 case ELS_CMD_FARP: 6536 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6537 "RCV FARP: did:x%x/ste:x%x flg:x%x", 6538 did, vport->port_state, ndlp->nlp_flag); 6539 6540 phba->fc_stat.elsRcvFARP++; 6541 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 6542 break; 6543 case ELS_CMD_FAN: 6544 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6545 "RCV FAN: did:x%x/ste:x%x flg:x%x", 6546 did, vport->port_state, ndlp->nlp_flag); 6547 6548 phba->fc_stat.elsRcvFAN++; 6549 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 6550 break; 6551 case ELS_CMD_PRLI: 6552 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6553 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 6554 did, vport->port_state, ndlp->nlp_flag); 6555 6556 phba->fc_stat.elsRcvPRLI++; 6557 if (vport->port_state < LPFC_DISC_AUTH) { 6558 rjt_err = LSRJT_UNABLE_TPC; 6559 break; 6560 } 6561 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6562 break; 6563 case ELS_CMD_LIRR: 6564 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6565 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 6566 did, vport->port_state, ndlp->nlp_flag); 6567 6568 phba->fc_stat.elsRcvLIRR++; 6569 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 6570 if (newnode) 6571 lpfc_nlp_put(ndlp); 6572 break; 6573 case ELS_CMD_RLS: 6574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6575 "RCV RLS: did:x%x/ste:x%x flg:x%x", 6576 did, vport->port_state, ndlp->nlp_flag); 6577 6578 phba->fc_stat.elsRcvRLS++; 6579 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 6580 if (newnode) 6581 lpfc_nlp_put(ndlp); 6582 break; 6583 case ELS_CMD_RPS: 6584 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6585 "RCV RPS: did:x%x/ste:x%x flg:x%x", 6586 did, vport->port_state, ndlp->nlp_flag); 6587 6588 phba->fc_stat.elsRcvRPS++; 6589 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 6590 if (newnode) 6591 lpfc_nlp_put(ndlp); 6592 break; 6593 case ELS_CMD_RPL: 6594 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6595 "RCV RPL: did:x%x/ste:x%x flg:x%x", 6596 did, vport->port_state, ndlp->nlp_flag); 6597 6598 phba->fc_stat.elsRcvRPL++; 6599 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 6600 if (newnode) 6601 lpfc_nlp_put(ndlp); 6602 break; 6603 case ELS_CMD_RNID: 6604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6605 "RCV RNID: did:x%x/ste:x%x flg:x%x", 6606 did, vport->port_state, ndlp->nlp_flag); 6607 6608 phba->fc_stat.elsRcvRNID++; 6609 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 6610 if (newnode) 6611 lpfc_nlp_put(ndlp); 6612 break; 6613 case ELS_CMD_RTV: 6614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6615 "RCV RTV: did:x%x/ste:x%x flg:x%x", 6616 did, vport->port_state, ndlp->nlp_flag); 6617 phba->fc_stat.elsRcvRTV++; 6618 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 6619 if (newnode) 6620 lpfc_nlp_put(ndlp); 6621 break; 6622 case ELS_CMD_RRQ: 6623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6624 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 6625 did, vport->port_state, ndlp->nlp_flag); 6626 6627 phba->fc_stat.elsRcvRRQ++; 6628 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 6629 if (newnode) 6630 lpfc_nlp_put(ndlp); 6631 break; 6632 case ELS_CMD_ECHO: 6633 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6634 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 6635 did, vport->port_state, ndlp->nlp_flag); 6636 6637 phba->fc_stat.elsRcvECHO++; 6638 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 6639 if (newnode) 6640 lpfc_nlp_put(ndlp); 6641 break; 6642 default: 6643 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6644 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6645 cmd, did, vport->port_state); 6646 6647 /* Unsupported ELS command, reject */ 6648 rjt_err = LSRJT_CMD_UNSUPPORTED; 6649 6650 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6651 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6652 "0115 Unknown ELS command x%x " 6653 "received from NPORT x%x\n", cmd, did); 6654 if (newnode) 6655 lpfc_nlp_put(ndlp); 6656 break; 6657 } 6658 6659 /* check if need to LS_RJT received ELS cmd */ 6660 if (rjt_err) { 6661 memset(&stat, 0, sizeof(stat)); 6662 stat.un.b.lsRjtRsnCode = rjt_err; 6663 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6664 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6665 NULL); 6666 } 6667 6668 lpfc_nlp_put(elsiocb->context1); 6669 elsiocb->context1 = NULL; 6670 return; 6671 6672 dropit: 6673 if (vport && !(vport->load_flag & FC_UNLOADING)) 6674 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6675 "0111 Dropping received ELS cmd " 6676 "Data: x%x x%x x%x\n", 6677 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 6678 phba->fc_stat.elsRcvDrop++; 6679 } 6680 6681 /** 6682 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 6683 * @phba: pointer to lpfc hba data structure. 6684 * @pring: pointer to a SLI ring. 6685 * @elsiocb: pointer to lpfc els iocb data structure. 6686 * 6687 * This routine is used to process an unsolicited event received from a SLI 6688 * (Service Level Interface) ring. The actual processing of the data buffer 6689 * associated with the unsolicited event is done by invoking the routine 6690 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 6691 * SLI ring on which the unsolicited event was received. 6692 **/ 6693 void 6694 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6695 struct lpfc_iocbq *elsiocb) 6696 { 6697 struct lpfc_vport *vport = phba->pport; 6698 IOCB_t *icmd = &elsiocb->iocb; 6699 dma_addr_t paddr; 6700 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 6701 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 6702 6703 elsiocb->context1 = NULL; 6704 elsiocb->context2 = NULL; 6705 elsiocb->context3 = NULL; 6706 6707 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6708 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6709 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6710 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 6711 phba->fc_stat.NoRcvBuf++; 6712 /* Not enough posted buffers; Try posting more buffers */ 6713 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6714 lpfc_post_buffer(phba, pring, 0); 6715 return; 6716 } 6717 6718 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6719 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 6720 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 6721 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 6722 vport = phba->pport; 6723 else 6724 vport = lpfc_find_vport_by_vpid(phba, 6725 icmd->unsli3.rcvsli3.vpi); 6726 } 6727 6728 /* If there are no BDEs associated 6729 * with this IOCB, there is nothing to do. 6730 */ 6731 if (icmd->ulpBdeCount == 0) 6732 return; 6733 6734 /* type of ELS cmd is first 32bit word 6735 * in packet 6736 */ 6737 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 6738 elsiocb->context2 = bdeBuf1; 6739 } else { 6740 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 6741 icmd->un.cont64[0].addrLow); 6742 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 6743 paddr); 6744 } 6745 6746 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6747 /* 6748 * The different unsolicited event handlers would tell us 6749 * if they are done with "mp" by setting context2 to NULL. 6750 */ 6751 if (elsiocb->context2) { 6752 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 6753 elsiocb->context2 = NULL; 6754 } 6755 6756 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 6757 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 6758 icmd->ulpBdeCount == 2) { 6759 elsiocb->context2 = bdeBuf2; 6760 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6761 /* free mp if we are done with it */ 6762 if (elsiocb->context2) { 6763 lpfc_in_buf_free(phba, elsiocb->context2); 6764 elsiocb->context2 = NULL; 6765 } 6766 } 6767 } 6768 6769 /** 6770 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 6771 * @phba: pointer to lpfc hba data structure. 6772 * @vport: pointer to a virtual N_Port data structure. 6773 * 6774 * This routine issues a Port Login (PLOGI) to the Name Server with 6775 * State Change Request (SCR) for a @vport. This routine will create an 6776 * ndlp for the Name Server associated to the @vport if such node does 6777 * not already exist. The PLOGI to Name Server is issued by invoking the 6778 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 6779 * (FDMI) is configured to the @vport, a FDMI node will be created and 6780 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 6781 **/ 6782 void 6783 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 6784 { 6785 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 6786 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6787 6788 /* 6789 * If lpfc_delay_discovery parameter is set and the clean address 6790 * bit is cleared and fc fabric parameters chenged, delay FC NPort 6791 * discovery. 6792 */ 6793 spin_lock_irq(shost->host_lock); 6794 if (vport->fc_flag & FC_DISC_DELAYED) { 6795 spin_unlock_irq(shost->host_lock); 6796 mod_timer(&vport->delayed_disc_tmo, 6797 jiffies + HZ * phba->fc_ratov); 6798 return; 6799 } 6800 spin_unlock_irq(shost->host_lock); 6801 6802 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6803 if (!ndlp) { 6804 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6805 if (!ndlp) { 6806 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6807 lpfc_disc_start(vport); 6808 return; 6809 } 6810 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6811 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6812 "0251 NameServer login: no memory\n"); 6813 return; 6814 } 6815 lpfc_nlp_init(vport, ndlp, NameServer_DID); 6816 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6817 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 6818 if (!ndlp) { 6819 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6820 lpfc_disc_start(vport); 6821 return; 6822 } 6823 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6824 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6825 "0348 NameServer login: node freed\n"); 6826 return; 6827 } 6828 } 6829 ndlp->nlp_type |= NLP_FABRIC; 6830 6831 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6832 6833 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 6834 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6835 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6836 "0252 Cannot issue NameServer login\n"); 6837 return; 6838 } 6839 6840 if (vport->cfg_fdmi_on) { 6841 /* If this is the first time, allocate an ndlp and initialize 6842 * it. Otherwise, make sure the node is enabled and then do the 6843 * login. 6844 */ 6845 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); 6846 if (!ndlp_fdmi) { 6847 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 6848 GFP_KERNEL); 6849 if (ndlp_fdmi) { 6850 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 6851 ndlp_fdmi->nlp_type |= NLP_FABRIC; 6852 } else 6853 return; 6854 } 6855 if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) 6856 ndlp_fdmi = lpfc_enable_node(vport, 6857 ndlp_fdmi, 6858 NLP_STE_NPR_NODE); 6859 6860 if (ndlp_fdmi) { 6861 lpfc_nlp_set_state(vport, ndlp_fdmi, 6862 NLP_STE_PLOGI_ISSUE); 6863 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); 6864 } 6865 } 6866 } 6867 6868 /** 6869 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 6870 * @phba: pointer to lpfc hba data structure. 6871 * @pmb: pointer to the driver internal queue element for mailbox command. 6872 * 6873 * This routine is the completion callback function to register new vport 6874 * mailbox command. If the new vport mailbox command completes successfully, 6875 * the fabric registration login shall be performed on physical port (the 6876 * new vport created is actually a physical port, with VPI 0) or the port 6877 * login to Name Server for State Change Request (SCR) will be performed 6878 * on virtual port (real virtual port, with VPI greater than 0). 6879 **/ 6880 static void 6881 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6882 { 6883 struct lpfc_vport *vport = pmb->vport; 6884 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6885 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 6886 MAILBOX_t *mb = &pmb->u.mb; 6887 int rc; 6888 6889 spin_lock_irq(shost->host_lock); 6890 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6891 spin_unlock_irq(shost->host_lock); 6892 6893 if (mb->mbxStatus) { 6894 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 6895 "0915 Register VPI failed : Status: x%x" 6896 " upd bit: x%x \n", mb->mbxStatus, 6897 mb->un.varRegVpi.upd); 6898 if (phba->sli_rev == LPFC_SLI_REV4 && 6899 mb->un.varRegVpi.upd) 6900 goto mbox_err_exit ; 6901 6902 switch (mb->mbxStatus) { 6903 case 0x11: /* unsupported feature */ 6904 case 0x9603: /* max_vpi exceeded */ 6905 case 0x9602: /* Link event since CLEAR_LA */ 6906 /* giving up on vport registration */ 6907 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6908 spin_lock_irq(shost->host_lock); 6909 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 6910 spin_unlock_irq(shost->host_lock); 6911 lpfc_can_disctmo(vport); 6912 break; 6913 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 6914 case 0x20: 6915 spin_lock_irq(shost->host_lock); 6916 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6917 spin_unlock_irq(shost->host_lock); 6918 lpfc_init_vpi(phba, pmb, vport->vpi); 6919 pmb->vport = vport; 6920 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 6921 rc = lpfc_sli_issue_mbox(phba, pmb, 6922 MBX_NOWAIT); 6923 if (rc == MBX_NOT_FINISHED) { 6924 lpfc_printf_vlog(vport, 6925 KERN_ERR, LOG_MBOX, 6926 "2732 Failed to issue INIT_VPI" 6927 " mailbox command\n"); 6928 } else { 6929 lpfc_nlp_put(ndlp); 6930 return; 6931 } 6932 6933 default: 6934 /* Try to recover from this error */ 6935 if (phba->sli_rev == LPFC_SLI_REV4) 6936 lpfc_sli4_unreg_all_rpis(vport); 6937 lpfc_mbx_unreg_vpi(vport); 6938 spin_lock_irq(shost->host_lock); 6939 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6940 spin_unlock_irq(shost->host_lock); 6941 if (vport->port_type == LPFC_PHYSICAL_PORT 6942 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 6943 lpfc_issue_init_vfi(vport); 6944 else 6945 lpfc_initial_fdisc(vport); 6946 break; 6947 } 6948 } else { 6949 spin_lock_irq(shost->host_lock); 6950 vport->vpi_state |= LPFC_VPI_REGISTERED; 6951 spin_unlock_irq(shost->host_lock); 6952 if (vport == phba->pport) { 6953 if (phba->sli_rev < LPFC_SLI_REV4) 6954 lpfc_issue_fabric_reglogin(vport); 6955 else { 6956 /* 6957 * If the physical port is instantiated using 6958 * FDISC, do not start vport discovery. 6959 */ 6960 if (vport->port_state != LPFC_FDISC) 6961 lpfc_start_fdiscs(phba); 6962 lpfc_do_scr_ns_plogi(phba, vport); 6963 } 6964 } else 6965 lpfc_do_scr_ns_plogi(phba, vport); 6966 } 6967 mbox_err_exit: 6968 /* Now, we decrement the ndlp reference count held for this 6969 * callback function 6970 */ 6971 lpfc_nlp_put(ndlp); 6972 6973 mempool_free(pmb, phba->mbox_mem_pool); 6974 return; 6975 } 6976 6977 /** 6978 * lpfc_register_new_vport - Register a new vport with a HBA 6979 * @phba: pointer to lpfc hba data structure. 6980 * @vport: pointer to a host virtual N_Port data structure. 6981 * @ndlp: pointer to a node-list data structure. 6982 * 6983 * This routine registers the @vport as a new virtual port with a HBA. 6984 * It is done through a registering vpi mailbox command. 6985 **/ 6986 void 6987 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 6988 struct lpfc_nodelist *ndlp) 6989 { 6990 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6991 LPFC_MBOXQ_t *mbox; 6992 6993 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6994 if (mbox) { 6995 lpfc_reg_vpi(vport, mbox); 6996 mbox->vport = vport; 6997 mbox->context2 = lpfc_nlp_get(ndlp); 6998 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 6999 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7000 == MBX_NOT_FINISHED) { 7001 /* mailbox command not success, decrement ndlp 7002 * reference count for this command 7003 */ 7004 lpfc_nlp_put(ndlp); 7005 mempool_free(mbox, phba->mbox_mem_pool); 7006 7007 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7008 "0253 Register VPI: Can't send mbox\n"); 7009 goto mbox_err_exit; 7010 } 7011 } else { 7012 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 7013 "0254 Register VPI: no memory\n"); 7014 goto mbox_err_exit; 7015 } 7016 return; 7017 7018 mbox_err_exit: 7019 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7020 spin_lock_irq(shost->host_lock); 7021 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 7022 spin_unlock_irq(shost->host_lock); 7023 return; 7024 } 7025 7026 /** 7027 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 7028 * @phba: pointer to lpfc hba data structure. 7029 * 7030 * This routine cancels the retry delay timers to all the vports. 7031 **/ 7032 void 7033 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 7034 { 7035 struct lpfc_vport **vports; 7036 struct lpfc_nodelist *ndlp; 7037 uint32_t link_state; 7038 int i; 7039 7040 /* Treat this failure as linkdown for all vports */ 7041 link_state = phba->link_state; 7042 lpfc_linkdown(phba); 7043 phba->link_state = link_state; 7044 7045 vports = lpfc_create_vport_work_array(phba); 7046 7047 if (vports) { 7048 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 7049 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 7050 if (ndlp) 7051 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 7052 lpfc_els_flush_cmd(vports[i]); 7053 } 7054 lpfc_destroy_vport_work_array(phba, vports); 7055 } 7056 } 7057 7058 /** 7059 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 7060 * @phba: pointer to lpfc hba data structure. 7061 * 7062 * This routine abort all pending discovery commands and 7063 * start a timer to retry FLOGI for the physical port 7064 * discovery. 7065 **/ 7066 void 7067 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 7068 { 7069 struct lpfc_nodelist *ndlp; 7070 struct Scsi_Host *shost; 7071 7072 /* Cancel the all vports retry delay retry timers */ 7073 lpfc_cancel_all_vport_retry_delay_timer(phba); 7074 7075 /* If fabric require FLOGI, then re-instantiate physical login */ 7076 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 7077 if (!ndlp) 7078 return; 7079 7080 shost = lpfc_shost_from_vport(phba->pport); 7081 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7082 spin_lock_irq(shost->host_lock); 7083 ndlp->nlp_flag |= NLP_DELAY_TMO; 7084 spin_unlock_irq(shost->host_lock); 7085 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 7086 phba->pport->port_state = LPFC_FLOGI; 7087 return; 7088 } 7089 7090 /** 7091 * lpfc_fabric_login_reqd - Check if FLOGI required. 7092 * @phba: pointer to lpfc hba data structure. 7093 * @cmdiocb: pointer to FDISC command iocb. 7094 * @rspiocb: pointer to FDISC response iocb. 7095 * 7096 * This routine checks if a FLOGI is reguired for FDISC 7097 * to succeed. 7098 **/ 7099 static int 7100 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 7101 struct lpfc_iocbq *cmdiocb, 7102 struct lpfc_iocbq *rspiocb) 7103 { 7104 7105 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 7106 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 7107 return 0; 7108 else 7109 return 1; 7110 } 7111 7112 /** 7113 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 7114 * @phba: pointer to lpfc hba data structure. 7115 * @cmdiocb: pointer to lpfc command iocb data structure. 7116 * @rspiocb: pointer to lpfc response iocb data structure. 7117 * 7118 * This routine is the completion callback function to a Fabric Discover 7119 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 7120 * single threaded, each FDISC completion callback function will reset 7121 * the discovery timer for all vports such that the timers will not get 7122 * unnecessary timeout. The function checks the FDISC IOCB status. If error 7123 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 7124 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 7125 * assigned to the vport has been changed with the completion of the FDISC 7126 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 7127 * are unregistered from the HBA, and then the lpfc_register_new_vport() 7128 * routine is invoked to register new vport with the HBA. Otherwise, the 7129 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 7130 * Server for State Change Request (SCR). 7131 **/ 7132 static void 7133 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7134 struct lpfc_iocbq *rspiocb) 7135 { 7136 struct lpfc_vport *vport = cmdiocb->vport; 7137 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7138 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 7139 struct lpfc_nodelist *np; 7140 struct lpfc_nodelist *next_np; 7141 IOCB_t *irsp = &rspiocb->iocb; 7142 struct lpfc_iocbq *piocb; 7143 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 7144 struct serv_parm *sp; 7145 uint8_t fabric_param_changed; 7146 7147 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7148 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 7149 irsp->ulpStatus, irsp->un.ulpWord[4], 7150 vport->fc_prevDID); 7151 /* Since all FDISCs are being single threaded, we 7152 * must reset the discovery timer for ALL vports 7153 * waiting to send FDISC when one completes. 7154 */ 7155 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 7156 lpfc_set_disctmo(piocb->vport); 7157 } 7158 7159 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7160 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 7161 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 7162 7163 if (irsp->ulpStatus) { 7164 7165 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 7166 lpfc_retry_pport_discovery(phba); 7167 goto out; 7168 } 7169 7170 /* Check for retry */ 7171 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 7172 goto out; 7173 /* FDISC failed */ 7174 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7175 "0126 FDISC failed. (x%x/x%x)\n", 7176 irsp->ulpStatus, irsp->un.ulpWord[4]); 7177 goto fdisc_failed; 7178 } 7179 spin_lock_irq(shost->host_lock); 7180 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 7181 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 7182 vport->fc_flag |= FC_FABRIC; 7183 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 7184 vport->fc_flag |= FC_PUBLIC_LOOP; 7185 spin_unlock_irq(shost->host_lock); 7186 7187 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 7188 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 7189 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 7190 sp = prsp->virt + sizeof(uint32_t); 7191 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 7192 memcpy(&vport->fabric_portname, &sp->portName, 7193 sizeof(struct lpfc_name)); 7194 memcpy(&vport->fabric_nodename, &sp->nodeName, 7195 sizeof(struct lpfc_name)); 7196 if (fabric_param_changed && 7197 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7198 /* If our NportID changed, we need to ensure all 7199 * remaining NPORTs get unreg_login'ed so we can 7200 * issue unreg_vpi. 7201 */ 7202 list_for_each_entry_safe(np, next_np, 7203 &vport->fc_nodes, nlp_listp) { 7204 if (!NLP_CHK_NODE_ACT(ndlp) || 7205 (np->nlp_state != NLP_STE_NPR_NODE) || 7206 !(np->nlp_flag & NLP_NPR_ADISC)) 7207 continue; 7208 spin_lock_irq(shost->host_lock); 7209 np->nlp_flag &= ~NLP_NPR_ADISC; 7210 spin_unlock_irq(shost->host_lock); 7211 lpfc_unreg_rpi(vport, np); 7212 } 7213 lpfc_cleanup_pending_mbox(vport); 7214 7215 if (phba->sli_rev == LPFC_SLI_REV4) 7216 lpfc_sli4_unreg_all_rpis(vport); 7217 7218 lpfc_mbx_unreg_vpi(vport); 7219 spin_lock_irq(shost->host_lock); 7220 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7221 if (phba->sli_rev == LPFC_SLI_REV4) 7222 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 7223 else 7224 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 7225 spin_unlock_irq(shost->host_lock); 7226 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 7227 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7228 /* 7229 * Driver needs to re-reg VPI in order for f/w 7230 * to update the MAC address. 7231 */ 7232 lpfc_register_new_vport(phba, vport, ndlp); 7233 goto out; 7234 } 7235 7236 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 7237 lpfc_issue_init_vpi(vport); 7238 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 7239 lpfc_register_new_vport(phba, vport, ndlp); 7240 else 7241 lpfc_do_scr_ns_plogi(phba, vport); 7242 goto out; 7243 fdisc_failed: 7244 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7245 /* Cancel discovery timer */ 7246 lpfc_can_disctmo(vport); 7247 lpfc_nlp_put(ndlp); 7248 out: 7249 lpfc_els_free_iocb(phba, cmdiocb); 7250 } 7251 7252 /** 7253 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 7254 * @vport: pointer to a virtual N_Port data structure. 7255 * @ndlp: pointer to a node-list data structure. 7256 * @retry: number of retries to the command IOCB. 7257 * 7258 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 7259 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 7260 * routine to issue the IOCB, which makes sure only one outstanding fabric 7261 * IOCB will be sent off HBA at any given time. 7262 * 7263 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7264 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7265 * will be stored into the context1 field of the IOCB for the completion 7266 * callback function to the FDISC ELS command. 7267 * 7268 * Return code 7269 * 0 - Successfully issued fdisc iocb command 7270 * 1 - Failed to issue fdisc iocb command 7271 **/ 7272 static int 7273 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7274 uint8_t retry) 7275 { 7276 struct lpfc_hba *phba = vport->phba; 7277 IOCB_t *icmd; 7278 struct lpfc_iocbq *elsiocb; 7279 struct serv_parm *sp; 7280 uint8_t *pcmd; 7281 uint16_t cmdsize; 7282 int did = ndlp->nlp_DID; 7283 int rc; 7284 7285 vport->port_state = LPFC_FDISC; 7286 vport->fc_myDID = 0; 7287 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 7288 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 7289 ELS_CMD_FDISC); 7290 if (!elsiocb) { 7291 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7292 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7293 "0255 Issue FDISC: no IOCB\n"); 7294 return 1; 7295 } 7296 7297 icmd = &elsiocb->iocb; 7298 icmd->un.elsreq64.myID = 0; 7299 icmd->un.elsreq64.fl = 1; 7300 7301 /* 7302 * SLI3 ports require a different context type value than SLI4. 7303 * Catch SLI3 ports here and override the prep. 7304 */ 7305 if (phba->sli_rev == LPFC_SLI_REV3) { 7306 icmd->ulpCt_h = 1; 7307 icmd->ulpCt_l = 0; 7308 } 7309 7310 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7311 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 7312 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 7313 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 7314 sp = (struct serv_parm *) pcmd; 7315 /* Setup CSPs accordingly for Fabric */ 7316 sp->cmn.e_d_tov = 0; 7317 sp->cmn.w2.r_a_tov = 0; 7318 sp->cmn.virtual_fabric_support = 0; 7319 sp->cls1.classValid = 0; 7320 sp->cls2.seqDelivery = 1; 7321 sp->cls3.seqDelivery = 1; 7322 7323 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 7324 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 7325 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 7326 pcmd += sizeof(uint32_t); /* Port Name */ 7327 memcpy(pcmd, &vport->fc_portname, 8); 7328 pcmd += sizeof(uint32_t); /* Node Name */ 7329 pcmd += sizeof(uint32_t); /* Node Name */ 7330 memcpy(pcmd, &vport->fc_nodename, 8); 7331 7332 lpfc_set_disctmo(vport); 7333 7334 phba->fc_stat.elsXmitFDISC++; 7335 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 7336 7337 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7338 "Issue FDISC: did:x%x", 7339 did, 0, 0); 7340 7341 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 7342 if (rc == IOCB_ERROR) { 7343 lpfc_els_free_iocb(phba, elsiocb); 7344 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7345 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7346 "0256 Issue FDISC: Cannot send IOCB\n"); 7347 return 1; 7348 } 7349 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 7350 return 0; 7351 } 7352 7353 /** 7354 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 7355 * @phba: pointer to lpfc hba data structure. 7356 * @cmdiocb: pointer to lpfc command iocb data structure. 7357 * @rspiocb: pointer to lpfc response iocb data structure. 7358 * 7359 * This routine is the completion callback function to the issuing of a LOGO 7360 * ELS command off a vport. It frees the command IOCB and then decrement the 7361 * reference count held on ndlp for this completion function, indicating that 7362 * the reference to the ndlp is no long needed. Note that the 7363 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 7364 * callback function and an additional explicit ndlp reference decrementation 7365 * will trigger the actual release of the ndlp. 7366 **/ 7367 static void 7368 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7369 struct lpfc_iocbq *rspiocb) 7370 { 7371 struct lpfc_vport *vport = cmdiocb->vport; 7372 IOCB_t *irsp; 7373 struct lpfc_nodelist *ndlp; 7374 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7375 7376 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7377 irsp = &rspiocb->iocb; 7378 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7379 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7380 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 7381 7382 lpfc_els_free_iocb(phba, cmdiocb); 7383 vport->unreg_vpi_cmpl = VPORT_ERROR; 7384 7385 /* Trigger the release of the ndlp after logo */ 7386 lpfc_nlp_put(ndlp); 7387 7388 /* NPIV LOGO completes to NPort <nlp_DID> */ 7389 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7390 "2928 NPIV LOGO completes to NPort x%x " 7391 "Data: x%x x%x x%x x%x\n", 7392 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 7393 irsp->ulpTimeout, vport->num_disc_nodes); 7394 7395 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 7396 spin_lock_irq(shost->host_lock); 7397 vport->fc_flag &= ~FC_FABRIC; 7398 spin_unlock_irq(shost->host_lock); 7399 } 7400 } 7401 7402 /** 7403 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 7404 * @vport: pointer to a virtual N_Port data structure. 7405 * @ndlp: pointer to a node-list data structure. 7406 * 7407 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 7408 * 7409 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7410 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7411 * will be stored into the context1 field of the IOCB for the completion 7412 * callback function to the LOGO ELS command. 7413 * 7414 * Return codes 7415 * 0 - Successfully issued logo off the @vport 7416 * 1 - Failed to issue logo off the @vport 7417 **/ 7418 int 7419 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7420 { 7421 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7422 struct lpfc_hba *phba = vport->phba; 7423 IOCB_t *icmd; 7424 struct lpfc_iocbq *elsiocb; 7425 uint8_t *pcmd; 7426 uint16_t cmdsize; 7427 7428 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 7429 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 7430 ELS_CMD_LOGO); 7431 if (!elsiocb) 7432 return 1; 7433 7434 icmd = &elsiocb->iocb; 7435 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7436 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 7437 pcmd += sizeof(uint32_t); 7438 7439 /* Fill in LOGO payload */ 7440 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 7441 pcmd += sizeof(uint32_t); 7442 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 7443 7444 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7445 "Issue LOGO npiv did:x%x flg:x%x", 7446 ndlp->nlp_DID, ndlp->nlp_flag, 0); 7447 7448 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 7449 spin_lock_irq(shost->host_lock); 7450 ndlp->nlp_flag |= NLP_LOGO_SND; 7451 spin_unlock_irq(shost->host_lock); 7452 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7453 IOCB_ERROR) { 7454 spin_lock_irq(shost->host_lock); 7455 ndlp->nlp_flag &= ~NLP_LOGO_SND; 7456 spin_unlock_irq(shost->host_lock); 7457 lpfc_els_free_iocb(phba, elsiocb); 7458 return 1; 7459 } 7460 return 0; 7461 } 7462 7463 /** 7464 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 7465 * @ptr: holder for the timer function associated data. 7466 * 7467 * This routine is invoked by the fabric iocb block timer after 7468 * timeout. It posts the fabric iocb block timeout event by setting the 7469 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 7470 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 7471 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 7472 * posted event WORKER_FABRIC_BLOCK_TMO. 7473 **/ 7474 void 7475 lpfc_fabric_block_timeout(unsigned long ptr) 7476 { 7477 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7478 unsigned long iflags; 7479 uint32_t tmo_posted; 7480 7481 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 7482 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 7483 if (!tmo_posted) 7484 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 7485 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 7486 7487 if (!tmo_posted) 7488 lpfc_worker_wake_up(phba); 7489 return; 7490 } 7491 7492 /** 7493 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 7494 * @phba: pointer to lpfc hba data structure. 7495 * 7496 * This routine issues one fabric iocb from the driver internal list to 7497 * the HBA. It first checks whether it's ready to issue one fabric iocb to 7498 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 7499 * remove one pending fabric iocb from the driver internal list and invokes 7500 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 7501 **/ 7502 static void 7503 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 7504 { 7505 struct lpfc_iocbq *iocb; 7506 unsigned long iflags; 7507 int ret; 7508 IOCB_t *cmd; 7509 7510 repeat: 7511 iocb = NULL; 7512 spin_lock_irqsave(&phba->hbalock, iflags); 7513 /* Post any pending iocb to the SLI layer */ 7514 if (atomic_read(&phba->fabric_iocb_count) == 0) { 7515 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 7516 list); 7517 if (iocb) 7518 /* Increment fabric iocb count to hold the position */ 7519 atomic_inc(&phba->fabric_iocb_count); 7520 } 7521 spin_unlock_irqrestore(&phba->hbalock, iflags); 7522 if (iocb) { 7523 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7524 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7525 iocb->iocb_flag |= LPFC_IO_FABRIC; 7526 7527 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7528 "Fabric sched1: ste:x%x", 7529 iocb->vport->port_state, 0, 0); 7530 7531 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7532 7533 if (ret == IOCB_ERROR) { 7534 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7535 iocb->fabric_iocb_cmpl = NULL; 7536 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7537 cmd = &iocb->iocb; 7538 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 7539 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 7540 iocb->iocb_cmpl(phba, iocb, iocb); 7541 7542 atomic_dec(&phba->fabric_iocb_count); 7543 goto repeat; 7544 } 7545 } 7546 7547 return; 7548 } 7549 7550 /** 7551 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 7552 * @phba: pointer to lpfc hba data structure. 7553 * 7554 * This routine unblocks the issuing fabric iocb command. The function 7555 * will clear the fabric iocb block bit and then invoke the routine 7556 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 7557 * from the driver internal fabric iocb list. 7558 **/ 7559 void 7560 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 7561 { 7562 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7563 7564 lpfc_resume_fabric_iocbs(phba); 7565 return; 7566 } 7567 7568 /** 7569 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 7570 * @phba: pointer to lpfc hba data structure. 7571 * 7572 * This routine blocks the issuing fabric iocb for a specified amount of 7573 * time (currently 100 ms). This is done by set the fabric iocb block bit 7574 * and set up a timeout timer for 100ms. When the block bit is set, no more 7575 * fabric iocb will be issued out of the HBA. 7576 **/ 7577 static void 7578 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 7579 { 7580 int blocked; 7581 7582 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7583 /* Start a timer to unblock fabric iocbs after 100ms */ 7584 if (!blocked) 7585 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7586 7587 return; 7588 } 7589 7590 /** 7591 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 7592 * @phba: pointer to lpfc hba data structure. 7593 * @cmdiocb: pointer to lpfc command iocb data structure. 7594 * @rspiocb: pointer to lpfc response iocb data structure. 7595 * 7596 * This routine is the callback function that is put to the fabric iocb's 7597 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 7598 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 7599 * function first restores and invokes the original iocb's callback function 7600 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 7601 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 7602 **/ 7603 static void 7604 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7605 struct lpfc_iocbq *rspiocb) 7606 { 7607 struct ls_rjt stat; 7608 7609 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 7610 BUG(); 7611 7612 switch (rspiocb->iocb.ulpStatus) { 7613 case IOSTAT_NPORT_RJT: 7614 case IOSTAT_FABRIC_RJT: 7615 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 7616 lpfc_block_fabric_iocbs(phba); 7617 } 7618 break; 7619 7620 case IOSTAT_NPORT_BSY: 7621 case IOSTAT_FABRIC_BSY: 7622 lpfc_block_fabric_iocbs(phba); 7623 break; 7624 7625 case IOSTAT_LS_RJT: 7626 stat.un.lsRjtError = 7627 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 7628 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 7629 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 7630 lpfc_block_fabric_iocbs(phba); 7631 break; 7632 } 7633 7634 if (atomic_read(&phba->fabric_iocb_count) == 0) 7635 BUG(); 7636 7637 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 7638 cmdiocb->fabric_iocb_cmpl = NULL; 7639 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 7640 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 7641 7642 atomic_dec(&phba->fabric_iocb_count); 7643 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 7644 /* Post any pending iocbs to HBA */ 7645 lpfc_resume_fabric_iocbs(phba); 7646 } 7647 } 7648 7649 /** 7650 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 7651 * @phba: pointer to lpfc hba data structure. 7652 * @iocb: pointer to lpfc command iocb data structure. 7653 * 7654 * This routine is used as the top-level API for issuing a fabric iocb command 7655 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 7656 * function makes sure that only one fabric bound iocb will be outstanding at 7657 * any given time. As such, this function will first check to see whether there 7658 * is already an outstanding fabric iocb on the wire. If so, it will put the 7659 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 7660 * issued later. Otherwise, it will issue the iocb on the wire and update the 7661 * fabric iocb count it indicate that there is one fabric iocb on the wire. 7662 * 7663 * Note, this implementation has a potential sending out fabric IOCBs out of 7664 * order. The problem is caused by the construction of the "ready" boolen does 7665 * not include the condition that the internal fabric IOCB list is empty. As 7666 * such, it is possible a fabric IOCB issued by this routine might be "jump" 7667 * ahead of the fabric IOCBs in the internal list. 7668 * 7669 * Return code 7670 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 7671 * IOCB_ERROR - failed to issue fabric iocb 7672 **/ 7673 static int 7674 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 7675 { 7676 unsigned long iflags; 7677 int ready; 7678 int ret; 7679 7680 if (atomic_read(&phba->fabric_iocb_count) > 1) 7681 BUG(); 7682 7683 spin_lock_irqsave(&phba->hbalock, iflags); 7684 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 7685 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7686 7687 if (ready) 7688 /* Increment fabric iocb count to hold the position */ 7689 atomic_inc(&phba->fabric_iocb_count); 7690 spin_unlock_irqrestore(&phba->hbalock, iflags); 7691 if (ready) { 7692 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7693 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7694 iocb->iocb_flag |= LPFC_IO_FABRIC; 7695 7696 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7697 "Fabric sched2: ste:x%x", 7698 iocb->vport->port_state, 0, 0); 7699 7700 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7701 7702 if (ret == IOCB_ERROR) { 7703 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7704 iocb->fabric_iocb_cmpl = NULL; 7705 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7706 atomic_dec(&phba->fabric_iocb_count); 7707 } 7708 } else { 7709 spin_lock_irqsave(&phba->hbalock, iflags); 7710 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 7711 spin_unlock_irqrestore(&phba->hbalock, iflags); 7712 ret = IOCB_SUCCESS; 7713 } 7714 return ret; 7715 } 7716 7717 /** 7718 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 7719 * @vport: pointer to a virtual N_Port data structure. 7720 * 7721 * This routine aborts all the IOCBs associated with a @vport from the 7722 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7723 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7724 * list, removes each IOCB associated with the @vport off the list, set the 7725 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7726 * associated with the IOCB. 7727 **/ 7728 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 7729 { 7730 LIST_HEAD(completions); 7731 struct lpfc_hba *phba = vport->phba; 7732 struct lpfc_iocbq *tmp_iocb, *piocb; 7733 7734 spin_lock_irq(&phba->hbalock); 7735 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7736 list) { 7737 7738 if (piocb->vport != vport) 7739 continue; 7740 7741 list_move_tail(&piocb->list, &completions); 7742 } 7743 spin_unlock_irq(&phba->hbalock); 7744 7745 /* Cancel all the IOCBs from the completions list */ 7746 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7747 IOERR_SLI_ABORTED); 7748 } 7749 7750 /** 7751 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 7752 * @ndlp: pointer to a node-list data structure. 7753 * 7754 * This routine aborts all the IOCBs associated with an @ndlp from the 7755 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7756 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7757 * list, removes each IOCB associated with the @ndlp off the list, set the 7758 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7759 * associated with the IOCB. 7760 **/ 7761 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 7762 { 7763 LIST_HEAD(completions); 7764 struct lpfc_hba *phba = ndlp->phba; 7765 struct lpfc_iocbq *tmp_iocb, *piocb; 7766 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7767 7768 spin_lock_irq(&phba->hbalock); 7769 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7770 list) { 7771 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 7772 7773 list_move_tail(&piocb->list, &completions); 7774 } 7775 } 7776 spin_unlock_irq(&phba->hbalock); 7777 7778 /* Cancel all the IOCBs from the completions list */ 7779 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7780 IOERR_SLI_ABORTED); 7781 } 7782 7783 /** 7784 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 7785 * @phba: pointer to lpfc hba data structure. 7786 * 7787 * This routine aborts all the IOCBs currently on the driver internal 7788 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 7789 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 7790 * list, removes IOCBs off the list, set the status feild to 7791 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 7792 * the IOCB. 7793 **/ 7794 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 7795 { 7796 LIST_HEAD(completions); 7797 7798 spin_lock_irq(&phba->hbalock); 7799 list_splice_init(&phba->fabric_iocb_list, &completions); 7800 spin_unlock_irq(&phba->hbalock); 7801 7802 /* Cancel all the IOCBs from the completions list */ 7803 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7804 IOERR_SLI_ABORTED); 7805 } 7806 7807 /** 7808 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 7809 * @vport: pointer to lpfc vport data structure. 7810 * 7811 * This routine is invoked by the vport cleanup for deletions and the cleanup 7812 * for an ndlp on removal. 7813 **/ 7814 void 7815 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 7816 { 7817 struct lpfc_hba *phba = vport->phba; 7818 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7819 unsigned long iflag = 0; 7820 7821 spin_lock_irqsave(&phba->hbalock, iflag); 7822 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7823 list_for_each_entry_safe(sglq_entry, sglq_next, 7824 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7825 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 7826 sglq_entry->ndlp = NULL; 7827 } 7828 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7829 spin_unlock_irqrestore(&phba->hbalock, iflag); 7830 return; 7831 } 7832 7833 /** 7834 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 7835 * @phba: pointer to lpfc hba data structure. 7836 * @axri: pointer to the els xri abort wcqe structure. 7837 * 7838 * This routine is invoked by the worker thread to process a SLI4 slow-path 7839 * ELS aborted xri. 7840 **/ 7841 void 7842 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 7843 struct sli4_wcqe_xri_aborted *axri) 7844 { 7845 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7846 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7847 uint16_t lxri = 0; 7848 7849 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7850 unsigned long iflag = 0; 7851 struct lpfc_nodelist *ndlp; 7852 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7853 7854 spin_lock_irqsave(&phba->hbalock, iflag); 7855 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7856 list_for_each_entry_safe(sglq_entry, sglq_next, 7857 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7858 if (sglq_entry->sli4_xritag == xri) { 7859 list_del(&sglq_entry->list); 7860 ndlp = sglq_entry->ndlp; 7861 sglq_entry->ndlp = NULL; 7862 list_add_tail(&sglq_entry->list, 7863 &phba->sli4_hba.lpfc_sgl_list); 7864 sglq_entry->state = SGL_FREED; 7865 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7866 spin_unlock_irqrestore(&phba->hbalock, iflag); 7867 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 7868 7869 /* Check if TXQ queue needs to be serviced */ 7870 if (pring->txq_cnt) 7871 lpfc_worker_wake_up(phba); 7872 return; 7873 } 7874 } 7875 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7876 lxri = lpfc_sli4_xri_inrange(phba, xri); 7877 if (lxri == NO_XRI) { 7878 spin_unlock_irqrestore(&phba->hbalock, iflag); 7879 return; 7880 } 7881 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 7882 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7883 spin_unlock_irqrestore(&phba->hbalock, iflag); 7884 return; 7885 } 7886 sglq_entry->state = SGL_XRI_ABORTED; 7887 spin_unlock_irqrestore(&phba->hbalock, iflag); 7888 return; 7889 } 7890