1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 /* See Fibre Channel protocol T11 FC-LS for details */ 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_hw4.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_sli4.h" 36 #include "lpfc_nl.h" 37 #include "lpfc_disc.h" 38 #include "lpfc_scsi.h" 39 #include "lpfc.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_crtn.h" 42 #include "lpfc_vport.h" 43 #include "lpfc_debugfs.h" 44 45 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 46 struct lpfc_iocbq *); 47 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 50 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 51 struct lpfc_nodelist *ndlp, uint8_t retry); 52 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 53 struct lpfc_iocbq *iocb); 54 55 static int lpfc_max_els_tries = 3; 56 57 /** 58 * lpfc_els_chk_latt - Check host link attention event for a vport 59 * @vport: pointer to a host virtual N_Port data structure. 60 * 61 * This routine checks whether there is an outstanding host link 62 * attention event during the discovery process with the @vport. It is done 63 * by reading the HBA's Host Attention (HA) register. If there is any host 64 * link attention events during this @vport's discovery process, the @vport 65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 66 * be issued if the link state is not already in host link cleared state, 67 * and a return code shall indicate whether the host link attention event 68 * had happened. 69 * 70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 71 * state in LPFC_VPORT_READY, the request for checking host link attention 72 * event will be ignored and a return code shall indicate no host link 73 * attention event had happened. 74 * 75 * Return codes 76 * 0 - no host link attention event happened 77 * 1 - host link attention event happened 78 **/ 79 int 80 lpfc_els_chk_latt(struct lpfc_vport *vport) 81 { 82 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 83 struct lpfc_hba *phba = vport->phba; 84 uint32_t ha_copy; 85 86 if (vport->port_state >= LPFC_VPORT_READY || 87 phba->link_state == LPFC_LINK_DOWN || 88 phba->sli_rev > LPFC_SLI_REV3) 89 return 0; 90 91 /* Read the HBA Host Attention Register */ 92 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 93 return 1; 94 95 if (!(ha_copy & HA_LATT)) 96 return 0; 97 98 /* Pending Link Event during Discovery */ 99 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 100 "0237 Pending Link Event during " 101 "Discovery: State x%x\n", 102 phba->pport->port_state); 103 104 /* CLEAR_LA should re-enable link attention events and 105 * we should then immediately take a LATT event. The 106 * LATT processing should call lpfc_linkdown() which 107 * will cleanup any left over in-progress discovery 108 * events. 109 */ 110 spin_lock_irq(shost->host_lock); 111 vport->fc_flag |= FC_ABORT_DISCOVERY; 112 spin_unlock_irq(shost->host_lock); 113 114 if (phba->link_state != LPFC_CLEAR_LA) 115 lpfc_issue_clear_la(phba, vport); 116 117 return 1; 118 } 119 120 /** 121 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 122 * @vport: pointer to a host virtual N_Port data structure. 123 * @expectRsp: flag indicating whether response is expected. 124 * @cmdSize: size of the ELS command. 125 * @retry: number of retries to the command IOCB when it fails. 126 * @ndlp: pointer to a node-list data structure. 127 * @did: destination identifier. 128 * @elscmd: the ELS command code. 129 * 130 * This routine is used for allocating a lpfc-IOCB data structure from 131 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 132 * passed into the routine for discovery state machine to issue an Extended 133 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 134 * and preparation routine that is used by all the discovery state machine 135 * routines and the ELS command-specific fields will be later set up by 136 * the individual discovery machine routines after calling this routine 137 * allocating and preparing a generic IOCB data structure. It fills in the 138 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 139 * payload and response payload (if expected). The reference count on the 140 * ndlp is incremented by 1 and the reference to the ndlp is put into 141 * context1 of the IOCB data structure for this IOCB to hold the ndlp 142 * reference for the command's callback function to access later. 143 * 144 * Return code 145 * Pointer to the newly allocated/prepared els iocb data structure 146 * NULL - when els iocb data structure allocation/preparation failed 147 **/ 148 struct lpfc_iocbq * 149 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150 uint16_t cmdSize, uint8_t retry, 151 struct lpfc_nodelist *ndlp, uint32_t did, 152 uint32_t elscmd) 153 { 154 struct lpfc_hba *phba = vport->phba; 155 struct lpfc_iocbq *elsiocb; 156 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 157 struct ulp_bde64 *bpl; 158 IOCB_t *icmd; 159 160 161 if (!lpfc_is_link_up(phba)) 162 return NULL; 163 164 /* Allocate buffer for command iocb */ 165 elsiocb = lpfc_sli_get_iocbq(phba); 166 167 if (elsiocb == NULL) 168 return NULL; 169 170 /* 171 * If this command is for fabric controller and HBA running 172 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 */ 174 if ((did == Fabric_DID) && 175 (phba->hba_flag & HBA_FIP_SUPPORT) && 176 ((elscmd == ELS_CMD_FLOGI) || 177 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_LOGO))) 179 switch (elscmd) { 180 case ELS_CMD_FLOGI: 181 elsiocb->iocb_flag |= 182 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 183 & LPFC_FIP_ELS_ID_MASK); 184 break; 185 case ELS_CMD_FDISC: 186 elsiocb->iocb_flag |= 187 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 188 & LPFC_FIP_ELS_ID_MASK); 189 break; 190 case ELS_CMD_LOGO: 191 elsiocb->iocb_flag |= 192 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 193 & LPFC_FIP_ELS_ID_MASK); 194 break; 195 } 196 else 197 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 198 199 icmd = &elsiocb->iocb; 200 201 /* fill in BDEs for command */ 202 /* Allocate buffer for command payload */ 203 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 204 if (pcmd) 205 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 206 if (!pcmd || !pcmd->virt) 207 goto els_iocb_free_pcmb_exit; 208 209 INIT_LIST_HEAD(&pcmd->list); 210 211 /* Allocate buffer for response payload */ 212 if (expectRsp) { 213 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 214 if (prsp) 215 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 216 &prsp->phys); 217 if (!prsp || !prsp->virt) 218 goto els_iocb_free_prsp_exit; 219 INIT_LIST_HEAD(&prsp->list); 220 } else 221 prsp = NULL; 222 223 /* Allocate buffer for Buffer ptr list */ 224 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 225 if (pbuflist) 226 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 227 &pbuflist->phys); 228 if (!pbuflist || !pbuflist->virt) 229 goto els_iocb_free_pbuf_exit; 230 231 INIT_LIST_HEAD(&pbuflist->list); 232 233 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 234 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 235 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 236 icmd->un.elsreq64.remoteID = did; /* DID */ 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 239 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 240 icmd->ulpTimeout = phba->fc_ratov * 2; 241 } else { 242 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 243 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 244 } 245 icmd->ulpBdeCount = 1; 246 icmd->ulpLe = 1; 247 icmd->ulpClass = CLASS3; 248 249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 250 icmd->un.elsreq64.myID = vport->fc_myDID; 251 252 /* For ELS_REQUEST64_CR, use the VPI by default */ 253 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 254 icmd->ulpCt_h = 0; 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 256 if (elscmd == ELS_CMD_ECHO) 257 icmd->ulpCt_l = 0; /* context = invalid RPI */ 258 else 259 icmd->ulpCt_l = 1; /* context = VPI */ 260 } 261 262 bpl = (struct ulp_bde64 *) pbuflist->virt; 263 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 264 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 265 bpl->tus.f.bdeSize = cmdSize; 266 bpl->tus.f.bdeFlags = 0; 267 bpl->tus.w = le32_to_cpu(bpl->tus.w); 268 269 if (expectRsp) { 270 bpl++; 271 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 272 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 273 bpl->tus.f.bdeSize = FCELSSIZE; 274 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 275 bpl->tus.w = le32_to_cpu(bpl->tus.w); 276 } 277 278 /* prevent preparing iocb with NULL ndlp reference */ 279 elsiocb->context1 = lpfc_nlp_get(ndlp); 280 if (!elsiocb->context1) 281 goto els_iocb_free_pbuf_exit; 282 elsiocb->context2 = pcmd; 283 elsiocb->context3 = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) { 289 list_add(&prsp->list, &pcmd->list); 290 } 291 if (expectRsp) { 292 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 294 "0116 Xmit ELS command x%x to remote " 295 "NPORT x%x I/O tag: x%x, port state: x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state); 298 } else { 299 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 300 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 301 "0117 Xmit ELS response x%x to remote " 302 "NPORT x%x I/O tag: x%x, size: x%x\n", 303 elscmd, ndlp->nlp_DID, elsiocb->iotag, 304 cmdSize); 305 } 306 return elsiocb; 307 308 els_iocb_free_pbuf_exit: 309 if (expectRsp) 310 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 311 kfree(pbuflist); 312 313 els_iocb_free_prsp_exit: 314 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 315 kfree(prsp); 316 317 els_iocb_free_pcmb_exit: 318 kfree(pcmd); 319 lpfc_sli_release_iocbq(phba, elsiocb); 320 return NULL; 321 } 322 323 /** 324 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 325 * @vport: pointer to a host virtual N_Port data structure. 326 * 327 * This routine issues a fabric registration login for a @vport. An 328 * active ndlp node with Fabric_DID must already exist for this @vport. 329 * The routine invokes two mailbox commands to carry out fabric registration 330 * login through the HBA firmware: the first mailbox command requests the 331 * HBA to perform link configuration for the @vport; and the second mailbox 332 * command requests the HBA to perform the actual fabric registration login 333 * with the @vport. 334 * 335 * Return code 336 * 0 - successfully issued fabric registration login for @vport 337 * -ENXIO -- failed to issue fabric registration login for @vport 338 **/ 339 int 340 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 341 { 342 struct lpfc_hba *phba = vport->phba; 343 LPFC_MBOXQ_t *mbox; 344 struct lpfc_dmabuf *mp; 345 struct lpfc_nodelist *ndlp; 346 struct serv_parm *sp; 347 int rc; 348 int err = 0; 349 350 sp = &phba->fc_fabparam; 351 ndlp = lpfc_findnode_did(vport, Fabric_DID); 352 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 353 err = 1; 354 goto fail; 355 } 356 357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 358 if (!mbox) { 359 err = 2; 360 goto fail; 361 } 362 363 vport->port_state = LPFC_FABRIC_CFG_LINK; 364 lpfc_config_link(phba, mbox); 365 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 366 mbox->vport = vport; 367 368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 369 if (rc == MBX_NOT_FINISHED) { 370 err = 3; 371 goto fail_free_mbox; 372 } 373 374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 375 if (!mbox) { 376 err = 4; 377 goto fail; 378 } 379 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 380 ndlp->nlp_rpi); 381 if (rc) { 382 err = 5; 383 goto fail_free_mbox; 384 } 385 386 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 387 mbox->vport = vport; 388 /* increment the reference count on ndlp to hold reference 389 * for the callback routine. 390 */ 391 mbox->context2 = lpfc_nlp_get(ndlp); 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 6; 396 goto fail_issue_reg_login; 397 } 398 399 return 0; 400 401 fail_issue_reg_login: 402 /* decrement the reference count on ndlp just incremented 403 * for the failed mbox command. 404 */ 405 lpfc_nlp_put(ndlp); 406 mp = (struct lpfc_dmabuf *) mbox->context1; 407 lpfc_mbuf_free(phba, mp->virt, mp->phys); 408 kfree(mp); 409 fail_free_mbox: 410 mempool_free(mbox, phba->mbox_mem_pool); 411 412 fail: 413 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 414 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 415 "0249 Cannot issue Register Fabric login: Err %d\n", err); 416 return -ENXIO; 417 } 418 419 /** 420 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 421 * @vport: pointer to a host virtual N_Port data structure. 422 * 423 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 424 * the @vport. This mailbox command is necessary for FCoE only. 425 * 426 * Return code 427 * 0 - successfully issued REG_VFI for @vport 428 * A failure code otherwise. 429 **/ 430 static int 431 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 432 { 433 struct lpfc_hba *phba = vport->phba; 434 LPFC_MBOXQ_t *mboxq; 435 struct lpfc_nodelist *ndlp; 436 struct serv_parm *sp; 437 struct lpfc_dmabuf *dmabuf; 438 int rc = 0; 439 440 sp = &phba->fc_fabparam; 441 ndlp = lpfc_findnode_did(vport, Fabric_DID); 442 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 443 rc = -ENODEV; 444 goto fail; 445 } 446 447 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 448 if (!dmabuf) { 449 rc = -ENOMEM; 450 goto fail; 451 } 452 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 453 if (!dmabuf->virt) { 454 rc = -ENOMEM; 455 goto fail_free_dmabuf; 456 } 457 458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 459 if (!mboxq) { 460 rc = -ENOMEM; 461 goto fail_free_coherent; 462 } 463 vport->port_state = LPFC_FABRIC_CFG_LINK; 464 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 465 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 466 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 467 mboxq->vport = vport; 468 mboxq->context1 = dmabuf; 469 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 470 if (rc == MBX_NOT_FINISHED) { 471 rc = -ENXIO; 472 goto fail_free_mbox; 473 } 474 return 0; 475 476 fail_free_mbox: 477 mempool_free(mboxq, phba->mbox_mem_pool); 478 fail_free_coherent: 479 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 480 fail_free_dmabuf: 481 kfree(dmabuf); 482 fail: 483 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 484 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 485 "0289 Issue Register VFI failed: Err %d\n", rc); 486 return rc; 487 } 488 489 /** 490 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 491 * @vport: pointer to a host virtual N_Port data structure. 492 * @sp: pointer to service parameter data structure. 493 * 494 * This routine is called from FLOGI/FDISC completion handler functions. 495 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 496 * node nodename is changed in the completion service parameter else return 497 * 0. This function also set flag in the vport data structure to delay 498 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 499 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 500 * node nodename is changed in the completion service parameter. 501 * 502 * Return code 503 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 504 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 505 * 506 **/ 507 static uint8_t 508 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 509 struct serv_parm *sp) 510 { 511 uint8_t fabric_param_changed = 0; 512 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 513 514 if ((vport->fc_prevDID != vport->fc_myDID) || 515 memcmp(&vport->fabric_portname, &sp->portName, 516 sizeof(struct lpfc_name)) || 517 memcmp(&vport->fabric_nodename, &sp->nodeName, 518 sizeof(struct lpfc_name))) 519 fabric_param_changed = 1; 520 521 /* 522 * Word 1 Bit 31 in common service parameter is overloaded. 523 * Word 1 Bit 31 in FLOGI request is multiple NPort request 524 * Word 1 Bit 31 in FLOGI response is clean address bit 525 * 526 * If fabric parameter is changed and clean address bit is 527 * cleared delay nport discovery if 528 * - vport->fc_prevDID != 0 (not initial discovery) OR 529 * - lpfc_delay_discovery module parameter is set. 530 */ 531 if (fabric_param_changed && !sp->cmn.clean_address_bit && 532 (vport->fc_prevDID || lpfc_delay_discovery)) { 533 spin_lock_irq(shost->host_lock); 534 vport->fc_flag |= FC_DISC_DELAYED; 535 spin_unlock_irq(shost->host_lock); 536 } 537 538 return fabric_param_changed; 539 } 540 541 542 /** 543 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 544 * @vport: pointer to a host virtual N_Port data structure. 545 * @ndlp: pointer to a node-list data structure. 546 * @sp: pointer to service parameter data structure. 547 * @irsp: pointer to the IOCB within the lpfc response IOCB. 548 * 549 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 550 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 551 * port in a fabric topology. It properly sets up the parameters to the @ndlp 552 * from the IOCB response. It also check the newly assigned N_Port ID to the 553 * @vport against the previously assigned N_Port ID. If it is different from 554 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 555 * is invoked on all the remaining nodes with the @vport to unregister the 556 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 557 * is invoked to register login to the fabric. 558 * 559 * Return code 560 * 0 - Success (currently, always return 0) 561 **/ 562 static int 563 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 564 struct serv_parm *sp, IOCB_t *irsp) 565 { 566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 567 struct lpfc_hba *phba = vport->phba; 568 struct lpfc_nodelist *np; 569 struct lpfc_nodelist *next_np; 570 uint8_t fabric_param_changed; 571 572 spin_lock_irq(shost->host_lock); 573 vport->fc_flag |= FC_FABRIC; 574 spin_unlock_irq(shost->host_lock); 575 576 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 577 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 578 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 579 580 phba->fc_edtovResol = sp->cmn.edtovResolution; 581 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 582 583 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 584 spin_lock_irq(shost->host_lock); 585 vport->fc_flag |= FC_PUBLIC_LOOP; 586 spin_unlock_irq(shost->host_lock); 587 } 588 589 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 590 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 591 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 592 ndlp->nlp_class_sup = 0; 593 if (sp->cls1.classValid) 594 ndlp->nlp_class_sup |= FC_COS_CLASS1; 595 if (sp->cls2.classValid) 596 ndlp->nlp_class_sup |= FC_COS_CLASS2; 597 if (sp->cls3.classValid) 598 ndlp->nlp_class_sup |= FC_COS_CLASS3; 599 if (sp->cls4.classValid) 600 ndlp->nlp_class_sup |= FC_COS_CLASS4; 601 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 602 sp->cmn.bbRcvSizeLsb; 603 604 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 605 memcpy(&vport->fabric_portname, &sp->portName, 606 sizeof(struct lpfc_name)); 607 memcpy(&vport->fabric_nodename, &sp->nodeName, 608 sizeof(struct lpfc_name)); 609 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 610 611 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 612 if (sp->cmn.response_multiple_NPort) { 613 lpfc_printf_vlog(vport, KERN_WARNING, 614 LOG_ELS | LOG_VPORT, 615 "1816 FLOGI NPIV supported, " 616 "response data 0x%x\n", 617 sp->cmn.response_multiple_NPort); 618 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 619 } else { 620 /* Because we asked f/w for NPIV it still expects us 621 to call reg_vnpid atleast for the physcial host */ 622 lpfc_printf_vlog(vport, KERN_WARNING, 623 LOG_ELS | LOG_VPORT, 624 "1817 Fabric does not support NPIV " 625 "- configuring single port mode.\n"); 626 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 627 } 628 } 629 630 if (fabric_param_changed && 631 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 632 633 /* If our NportID changed, we need to ensure all 634 * remaining NPORTs get unreg_login'ed. 635 */ 636 list_for_each_entry_safe(np, next_np, 637 &vport->fc_nodes, nlp_listp) { 638 if (!NLP_CHK_NODE_ACT(np)) 639 continue; 640 if ((np->nlp_state != NLP_STE_NPR_NODE) || 641 !(np->nlp_flag & NLP_NPR_ADISC)) 642 continue; 643 spin_lock_irq(shost->host_lock); 644 np->nlp_flag &= ~NLP_NPR_ADISC; 645 spin_unlock_irq(shost->host_lock); 646 lpfc_unreg_rpi(vport, np); 647 } 648 lpfc_cleanup_pending_mbox(vport); 649 650 if (phba->sli_rev == LPFC_SLI_REV4) 651 lpfc_sli4_unreg_all_rpis(vport); 652 653 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 654 lpfc_mbx_unreg_vpi(vport); 655 spin_lock_irq(shost->host_lock); 656 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 657 spin_unlock_irq(shost->host_lock); 658 } 659 /* 660 * If VPI is unreged, driver need to do INIT_VPI 661 * before re-registering 662 */ 663 if (phba->sli_rev == LPFC_SLI_REV4) { 664 spin_lock_irq(shost->host_lock); 665 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 666 spin_unlock_irq(shost->host_lock); 667 } 668 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 669 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 670 /* 671 * Driver needs to re-reg VPI in order for f/w 672 * to update the MAC address. 673 */ 674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 675 lpfc_register_new_vport(phba, vport, ndlp); 676 return 0; 677 } 678 679 if (phba->sli_rev < LPFC_SLI_REV4) { 680 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 681 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 682 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 683 lpfc_register_new_vport(phba, vport, ndlp); 684 else 685 lpfc_issue_fabric_reglogin(vport); 686 } else { 687 ndlp->nlp_type |= NLP_FABRIC; 688 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 689 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 690 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 691 lpfc_start_fdiscs(phba); 692 lpfc_do_scr_ns_plogi(phba, vport); 693 } else if (vport->fc_flag & FC_VFI_REGISTERED) 694 lpfc_issue_init_vpi(vport); 695 else 696 lpfc_issue_reg_vfi(vport); 697 } 698 return 0; 699 } 700 /** 701 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 702 * @vport: pointer to a host virtual N_Port data structure. 703 * @ndlp: pointer to a node-list data structure. 704 * @sp: pointer to service parameter data structure. 705 * 706 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 707 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 708 * in a point-to-point topology. First, the @vport's N_Port Name is compared 709 * with the received N_Port Name: if the @vport's N_Port Name is greater than 710 * the received N_Port Name lexicographically, this node shall assign local 711 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 712 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 713 * this node shall just wait for the remote node to issue PLOGI and assign 714 * N_Port IDs. 715 * 716 * Return code 717 * 0 - Success 718 * -ENXIO - Fail 719 **/ 720 static int 721 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 722 struct serv_parm *sp) 723 { 724 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 725 struct lpfc_hba *phba = vport->phba; 726 LPFC_MBOXQ_t *mbox; 727 int rc; 728 729 spin_lock_irq(shost->host_lock); 730 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 731 spin_unlock_irq(shost->host_lock); 732 733 phba->fc_edtov = FF_DEF_EDTOV; 734 phba->fc_ratov = FF_DEF_RATOV; 735 rc = memcmp(&vport->fc_portname, &sp->portName, 736 sizeof(vport->fc_portname)); 737 if (rc >= 0) { 738 /* This side will initiate the PLOGI */ 739 spin_lock_irq(shost->host_lock); 740 vport->fc_flag |= FC_PT2PT_PLOGI; 741 spin_unlock_irq(shost->host_lock); 742 743 /* 744 * N_Port ID cannot be 0, set our to LocalID the other 745 * side will be RemoteID. 746 */ 747 748 /* not equal */ 749 if (rc) 750 vport->fc_myDID = PT2PT_LocalID; 751 752 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 753 if (!mbox) 754 goto fail; 755 756 lpfc_config_link(phba, mbox); 757 758 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 759 mbox->vport = vport; 760 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 761 if (rc == MBX_NOT_FINISHED) { 762 mempool_free(mbox, phba->mbox_mem_pool); 763 goto fail; 764 } 765 /* Decrement ndlp reference count indicating that ndlp can be 766 * safely released when other references to it are done. 767 */ 768 lpfc_nlp_put(ndlp); 769 770 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 771 if (!ndlp) { 772 /* 773 * Cannot find existing Fabric ndlp, so allocate a 774 * new one 775 */ 776 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 777 if (!ndlp) 778 goto fail; 779 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 780 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 781 ndlp = lpfc_enable_node(vport, ndlp, 782 NLP_STE_UNUSED_NODE); 783 if(!ndlp) 784 goto fail; 785 } 786 787 memcpy(&ndlp->nlp_portname, &sp->portName, 788 sizeof(struct lpfc_name)); 789 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 790 sizeof(struct lpfc_name)); 791 /* Set state will put ndlp onto node list if not already done */ 792 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 793 spin_lock_irq(shost->host_lock); 794 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 795 spin_unlock_irq(shost->host_lock); 796 } else 797 /* This side will wait for the PLOGI, decrement ndlp reference 798 * count indicating that ndlp can be released when other 799 * references to it are done. 800 */ 801 lpfc_nlp_put(ndlp); 802 803 /* If we are pt2pt with another NPort, force NPIV off! */ 804 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 805 806 spin_lock_irq(shost->host_lock); 807 vport->fc_flag |= FC_PT2PT; 808 spin_unlock_irq(shost->host_lock); 809 810 /* Start discovery - this should just do CLEAR_LA */ 811 lpfc_disc_start(vport); 812 return 0; 813 fail: 814 return -ENXIO; 815 } 816 817 /** 818 * lpfc_cmpl_els_flogi - Completion callback function for flogi 819 * @phba: pointer to lpfc hba data structure. 820 * @cmdiocb: pointer to lpfc command iocb data structure. 821 * @rspiocb: pointer to lpfc response iocb data structure. 822 * 823 * This routine is the top-level completion callback function for issuing 824 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 825 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 826 * retry has been made (either immediately or delayed with lpfc_els_retry() 827 * returning 1), the command IOCB will be released and function returned. 828 * If the retry attempt has been given up (possibly reach the maximum 829 * number of retries), one additional decrement of ndlp reference shall be 830 * invoked before going out after releasing the command IOCB. This will 831 * actually release the remote node (Note, lpfc_els_free_iocb() will also 832 * invoke one decrement of ndlp reference count). If no error reported in 833 * the IOCB status, the command Port ID field is used to determine whether 834 * this is a point-to-point topology or a fabric topology: if the Port ID 835 * field is assigned, it is a fabric topology; otherwise, it is a 836 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 837 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 838 * specific topology completion conditions. 839 **/ 840 static void 841 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 842 struct lpfc_iocbq *rspiocb) 843 { 844 struct lpfc_vport *vport = cmdiocb->vport; 845 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 846 IOCB_t *irsp = &rspiocb->iocb; 847 struct lpfc_nodelist *ndlp = cmdiocb->context1; 848 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 849 struct serv_parm *sp; 850 uint16_t fcf_index; 851 int rc; 852 853 /* Check to see if link went down during discovery */ 854 if (lpfc_els_chk_latt(vport)) { 855 /* One additional decrement on node reference count to 856 * trigger the release of the node 857 */ 858 lpfc_nlp_put(ndlp); 859 goto out; 860 } 861 862 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 863 "FLOGI cmpl: status:x%x/x%x state:x%x", 864 irsp->ulpStatus, irsp->un.ulpWord[4], 865 vport->port_state); 866 867 if (irsp->ulpStatus) { 868 /* 869 * In case of FIP mode, perform roundrobin FCF failover 870 * due to new FCF discovery 871 */ 872 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 873 (phba->fcf.fcf_flag & FCF_DISCOVERY) && 874 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 875 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { 876 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 877 "2611 FLOGI failed on FCF (x%x), " 878 "status:x%x/x%x, tmo:x%x, perform " 879 "roundrobin FCF failover\n", 880 phba->fcf.current_rec.fcf_indx, 881 irsp->ulpStatus, irsp->un.ulpWord[4], 882 irsp->ulpTimeout); 883 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 884 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 885 if (rc) 886 goto out; 887 } 888 889 /* FLOGI failure */ 890 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 891 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", 892 irsp->ulpStatus, irsp->un.ulpWord[4], 893 irsp->ulpTimeout); 894 895 /* Check for retry */ 896 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 897 goto out; 898 899 /* FLOGI failure */ 900 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 901 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", 902 irsp->ulpStatus, irsp->un.ulpWord[4], 903 irsp->ulpTimeout); 904 905 /* FLOGI failed, so there is no fabric */ 906 spin_lock_irq(shost->host_lock); 907 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 908 spin_unlock_irq(shost->host_lock); 909 910 /* If private loop, then allow max outstanding els to be 911 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 912 * alpa map would take too long otherwise. 913 */ 914 if (phba->alpa_map[0] == 0) { 915 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 916 if ((phba->sli_rev == LPFC_SLI_REV4) && 917 (!(vport->fc_flag & FC_VFI_REGISTERED) || 918 (vport->fc_prevDID != vport->fc_myDID))) { 919 if (vport->fc_flag & FC_VFI_REGISTERED) 920 lpfc_sli4_unreg_all_rpis(vport); 921 lpfc_issue_reg_vfi(vport); 922 lpfc_nlp_put(ndlp); 923 goto out; 924 } 925 } 926 goto flogifail; 927 } 928 spin_lock_irq(shost->host_lock); 929 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 930 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 931 spin_unlock_irq(shost->host_lock); 932 933 /* 934 * The FLogI succeeded. Sync the data for the CPU before 935 * accessing it. 936 */ 937 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 938 939 sp = prsp->virt + sizeof(uint32_t); 940 941 /* FLOGI completes successfully */ 942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 943 "0101 FLOGI completes successfully " 944 "Data: x%x x%x x%x x%x\n", 945 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 946 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 947 948 if (vport->port_state == LPFC_FLOGI) { 949 /* 950 * If Common Service Parameters indicate Nport 951 * we are point to point, if Fport we are Fabric. 952 */ 953 if (sp->cmn.fPort) 954 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 955 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 956 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 957 else { 958 lpfc_printf_vlog(vport, KERN_ERR, 959 LOG_FIP | LOG_ELS, 960 "2831 FLOGI response with cleared Fabric " 961 "bit fcf_index 0x%x " 962 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 963 "Fabric Name " 964 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 965 phba->fcf.current_rec.fcf_indx, 966 phba->fcf.current_rec.switch_name[0], 967 phba->fcf.current_rec.switch_name[1], 968 phba->fcf.current_rec.switch_name[2], 969 phba->fcf.current_rec.switch_name[3], 970 phba->fcf.current_rec.switch_name[4], 971 phba->fcf.current_rec.switch_name[5], 972 phba->fcf.current_rec.switch_name[6], 973 phba->fcf.current_rec.switch_name[7], 974 phba->fcf.current_rec.fabric_name[0], 975 phba->fcf.current_rec.fabric_name[1], 976 phba->fcf.current_rec.fabric_name[2], 977 phba->fcf.current_rec.fabric_name[3], 978 phba->fcf.current_rec.fabric_name[4], 979 phba->fcf.current_rec.fabric_name[5], 980 phba->fcf.current_rec.fabric_name[6], 981 phba->fcf.current_rec.fabric_name[7]); 982 lpfc_nlp_put(ndlp); 983 spin_lock_irq(&phba->hbalock); 984 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 985 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 986 spin_unlock_irq(&phba->hbalock); 987 goto out; 988 } 989 if (!rc) { 990 /* Mark the FCF discovery process done */ 991 if (phba->hba_flag & HBA_FIP_SUPPORT) 992 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 993 LOG_ELS, 994 "2769 FLOGI to FCF (x%x) " 995 "completed successfully\n", 996 phba->fcf.current_rec.fcf_indx); 997 spin_lock_irq(&phba->hbalock); 998 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 999 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1000 spin_unlock_irq(&phba->hbalock); 1001 goto out; 1002 } 1003 } 1004 1005 flogifail: 1006 lpfc_nlp_put(ndlp); 1007 1008 if (!lpfc_error_lost_link(irsp)) { 1009 /* FLOGI failed, so just use loop map to make discovery list */ 1010 lpfc_disc_list_loopmap(vport); 1011 1012 /* Start discovery */ 1013 lpfc_disc_start(vport); 1014 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1015 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1016 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 1017 (phba->link_state != LPFC_CLEAR_LA)) { 1018 /* If FLOGI failed enable link interrupt. */ 1019 lpfc_issue_clear_la(phba, vport); 1020 } 1021 out: 1022 lpfc_els_free_iocb(phba, cmdiocb); 1023 } 1024 1025 /** 1026 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1027 * @vport: pointer to a host virtual N_Port data structure. 1028 * @ndlp: pointer to a node-list data structure. 1029 * @retry: number of retries to the command IOCB. 1030 * 1031 * This routine issues a Fabric Login (FLOGI) Request ELS command 1032 * for a @vport. The initiator service parameters are put into the payload 1033 * of the FLOGI Request IOCB and the top-level callback function pointer 1034 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1035 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1036 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1037 * 1038 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1039 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1040 * will be stored into the context1 field of the IOCB for the completion 1041 * callback function to the FLOGI ELS command. 1042 * 1043 * Return code 1044 * 0 - successfully issued flogi iocb for @vport 1045 * 1 - failed to issue flogi iocb for @vport 1046 **/ 1047 static int 1048 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1049 uint8_t retry) 1050 { 1051 struct lpfc_hba *phba = vport->phba; 1052 struct serv_parm *sp; 1053 IOCB_t *icmd; 1054 struct lpfc_iocbq *elsiocb; 1055 struct lpfc_sli_ring *pring; 1056 uint8_t *pcmd; 1057 uint16_t cmdsize; 1058 uint32_t tmo; 1059 int rc; 1060 1061 pring = &phba->sli.ring[LPFC_ELS_RING]; 1062 1063 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1064 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1065 ndlp->nlp_DID, ELS_CMD_FLOGI); 1066 1067 if (!elsiocb) 1068 return 1; 1069 1070 icmd = &elsiocb->iocb; 1071 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1072 1073 /* For FLOGI request, remainder of payload is service parameters */ 1074 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1075 pcmd += sizeof(uint32_t); 1076 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1077 sp = (struct serv_parm *) pcmd; 1078 1079 /* Setup CSPs accordingly for Fabric */ 1080 sp->cmn.e_d_tov = 0; 1081 sp->cmn.w2.r_a_tov = 0; 1082 sp->cls1.classValid = 0; 1083 sp->cls2.seqDelivery = 1; 1084 sp->cls3.seqDelivery = 1; 1085 if (sp->cmn.fcphLow < FC_PH3) 1086 sp->cmn.fcphLow = FC_PH3; 1087 if (sp->cmn.fcphHigh < FC_PH3) 1088 sp->cmn.fcphHigh = FC_PH3; 1089 1090 if (phba->sli_rev == LPFC_SLI_REV4) { 1091 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1092 LPFC_SLI_INTF_IF_TYPE_0) { 1093 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1094 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1095 /* FLOGI needs to be 3 for WQE FCFI */ 1096 /* Set the fcfi to the fcfi we registered with */ 1097 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1098 } 1099 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1100 sp->cmn.request_multiple_Nport = 1; 1101 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1102 icmd->ulpCt_h = 1; 1103 icmd->ulpCt_l = 0; 1104 } 1105 1106 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1107 icmd->un.elsreq64.myID = 0; 1108 icmd->un.elsreq64.fl = 1; 1109 } 1110 1111 tmo = phba->fc_ratov; 1112 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1113 lpfc_set_disctmo(vport); 1114 phba->fc_ratov = tmo; 1115 1116 phba->fc_stat.elsXmitFLOGI++; 1117 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1118 1119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1120 "Issue FLOGI: opt:x%x", 1121 phba->sli3_options, 0, 0); 1122 1123 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1124 if (rc == IOCB_ERROR) { 1125 lpfc_els_free_iocb(phba, elsiocb); 1126 return 1; 1127 } 1128 return 0; 1129 } 1130 1131 /** 1132 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1133 * @phba: pointer to lpfc hba data structure. 1134 * 1135 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1136 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1137 * list and issues an abort IOCB commond on each outstanding IOCB that 1138 * contains a active Fabric_DID ndlp. Note that this function is to issue 1139 * the abort IOCB command on all the outstanding IOCBs, thus when this 1140 * function returns, it does not guarantee all the IOCBs are actually aborted. 1141 * 1142 * Return code 1143 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1144 **/ 1145 int 1146 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1147 { 1148 struct lpfc_sli_ring *pring; 1149 struct lpfc_iocbq *iocb, *next_iocb; 1150 struct lpfc_nodelist *ndlp; 1151 IOCB_t *icmd; 1152 1153 /* Abort outstanding I/O on NPort <nlp_DID> */ 1154 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1155 "0201 Abort outstanding I/O on NPort x%x\n", 1156 Fabric_DID); 1157 1158 pring = &phba->sli.ring[LPFC_ELS_RING]; 1159 1160 /* 1161 * Check the txcmplq for an iocb that matches the nport the driver is 1162 * searching for. 1163 */ 1164 spin_lock_irq(&phba->hbalock); 1165 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1166 icmd = &iocb->iocb; 1167 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && 1168 icmd->un.elsreq64.bdl.ulpIoTag32) { 1169 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1170 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1171 (ndlp->nlp_DID == Fabric_DID)) 1172 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1173 } 1174 } 1175 spin_unlock_irq(&phba->hbalock); 1176 1177 return 0; 1178 } 1179 1180 /** 1181 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1182 * @vport: pointer to a host virtual N_Port data structure. 1183 * 1184 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1185 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1186 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1187 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1188 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1189 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1190 * @vport. 1191 * 1192 * Return code 1193 * 0 - failed to issue initial flogi for @vport 1194 * 1 - successfully issued initial flogi for @vport 1195 **/ 1196 int 1197 lpfc_initial_flogi(struct lpfc_vport *vport) 1198 { 1199 struct lpfc_hba *phba = vport->phba; 1200 struct lpfc_nodelist *ndlp; 1201 1202 vport->port_state = LPFC_FLOGI; 1203 lpfc_set_disctmo(vport); 1204 1205 /* First look for the Fabric ndlp */ 1206 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1207 if (!ndlp) { 1208 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1209 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1210 if (!ndlp) 1211 return 0; 1212 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1213 /* Set the node type */ 1214 ndlp->nlp_type |= NLP_FABRIC; 1215 /* Put ndlp onto node list */ 1216 lpfc_enqueue_node(vport, ndlp); 1217 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1218 /* re-setup ndlp without removing from node list */ 1219 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1220 if (!ndlp) 1221 return 0; 1222 } 1223 1224 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1225 /* This decrement of reference count to node shall kick off 1226 * the release of the node. 1227 */ 1228 lpfc_nlp_put(ndlp); 1229 return 0; 1230 } 1231 return 1; 1232 } 1233 1234 /** 1235 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1236 * @vport: pointer to a host virtual N_Port data structure. 1237 * 1238 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1239 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1240 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1241 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1242 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1243 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1244 * @vport. 1245 * 1246 * Return code 1247 * 0 - failed to issue initial fdisc for @vport 1248 * 1 - successfully issued initial fdisc for @vport 1249 **/ 1250 int 1251 lpfc_initial_fdisc(struct lpfc_vport *vport) 1252 { 1253 struct lpfc_hba *phba = vport->phba; 1254 struct lpfc_nodelist *ndlp; 1255 1256 /* First look for the Fabric ndlp */ 1257 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1258 if (!ndlp) { 1259 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1260 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1261 if (!ndlp) 1262 return 0; 1263 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1264 /* Put ndlp onto node list */ 1265 lpfc_enqueue_node(vport, ndlp); 1266 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1267 /* re-setup ndlp without removing from node list */ 1268 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1269 if (!ndlp) 1270 return 0; 1271 } 1272 1273 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1274 /* decrement node reference count to trigger the release of 1275 * the node. 1276 */ 1277 lpfc_nlp_put(ndlp); 1278 return 0; 1279 } 1280 return 1; 1281 } 1282 1283 /** 1284 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1285 * @vport: pointer to a host virtual N_Port data structure. 1286 * 1287 * This routine checks whether there are more remaining Port Logins 1288 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1289 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1290 * to issue ELS PLOGIs up to the configured discover threads with the 1291 * @vport (@vport->cfg_discovery_threads). The function also decrement 1292 * the @vport's num_disc_node by 1 if it is not already 0. 1293 **/ 1294 void 1295 lpfc_more_plogi(struct lpfc_vport *vport) 1296 { 1297 int sentplogi; 1298 1299 if (vport->num_disc_nodes) 1300 vport->num_disc_nodes--; 1301 1302 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1303 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1304 "0232 Continue discovery with %d PLOGIs to go " 1305 "Data: x%x x%x x%x\n", 1306 vport->num_disc_nodes, vport->fc_plogi_cnt, 1307 vport->fc_flag, vport->port_state); 1308 /* Check to see if there are more PLOGIs to be sent */ 1309 if (vport->fc_flag & FC_NLP_MORE) 1310 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1311 sentplogi = lpfc_els_disc_plogi(vport); 1312 1313 return; 1314 } 1315 1316 /** 1317 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1318 * @phba: pointer to lpfc hba data structure. 1319 * @prsp: pointer to response IOCB payload. 1320 * @ndlp: pointer to a node-list data structure. 1321 * 1322 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1323 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1324 * The following cases are considered N_Port confirmed: 1325 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1326 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1327 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1328 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1329 * 1) if there is a node on vport list other than the @ndlp with the same 1330 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1331 * on that node to release the RPI associated with the node; 2) if there is 1332 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1333 * into, a new node shall be allocated (or activated). In either case, the 1334 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1335 * be released and the new_ndlp shall be put on to the vport node list and 1336 * its pointer returned as the confirmed node. 1337 * 1338 * Note that before the @ndlp got "released", the keepDID from not-matching 1339 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1340 * of the @ndlp. This is because the release of @ndlp is actually to put it 1341 * into an inactive state on the vport node list and the vport node list 1342 * management algorithm does not allow two node with a same DID. 1343 * 1344 * Return code 1345 * pointer to the PLOGI N_Port @ndlp 1346 **/ 1347 static struct lpfc_nodelist * 1348 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1349 struct lpfc_nodelist *ndlp) 1350 { 1351 struct lpfc_vport *vport = ndlp->vport; 1352 struct lpfc_nodelist *new_ndlp; 1353 struct lpfc_rport_data *rdata; 1354 struct fc_rport *rport; 1355 struct serv_parm *sp; 1356 uint8_t name[sizeof(struct lpfc_name)]; 1357 uint32_t rc, keepDID = 0; 1358 int put_node; 1359 int put_rport; 1360 struct lpfc_node_rrqs rrq; 1361 1362 /* Fabric nodes can have the same WWPN so we don't bother searching 1363 * by WWPN. Just return the ndlp that was given to us. 1364 */ 1365 if (ndlp->nlp_type & NLP_FABRIC) 1366 return ndlp; 1367 1368 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1369 memset(name, 0, sizeof(struct lpfc_name)); 1370 1371 /* Now we find out if the NPort we are logging into, matches the WWPN 1372 * we have for that ndlp. If not, we have some work to do. 1373 */ 1374 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1375 1376 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1377 return ndlp; 1378 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1379 1380 if (!new_ndlp) { 1381 rc = memcmp(&ndlp->nlp_portname, name, 1382 sizeof(struct lpfc_name)); 1383 if (!rc) 1384 return ndlp; 1385 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 1386 if (!new_ndlp) 1387 return ndlp; 1388 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 1389 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1390 rc = memcmp(&ndlp->nlp_portname, name, 1391 sizeof(struct lpfc_name)); 1392 if (!rc) 1393 return ndlp; 1394 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1395 NLP_STE_UNUSED_NODE); 1396 if (!new_ndlp) 1397 return ndlp; 1398 keepDID = new_ndlp->nlp_DID; 1399 if (phba->sli_rev == LPFC_SLI_REV4) 1400 memcpy(&rrq.xri_bitmap, 1401 &new_ndlp->active_rrqs.xri_bitmap, 1402 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1403 } else { 1404 keepDID = new_ndlp->nlp_DID; 1405 if (phba->sli_rev == LPFC_SLI_REV4) 1406 memcpy(&rrq.xri_bitmap, 1407 &new_ndlp->active_rrqs.xri_bitmap, 1408 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1409 } 1410 1411 lpfc_unreg_rpi(vport, new_ndlp); 1412 new_ndlp->nlp_DID = ndlp->nlp_DID; 1413 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1414 if (phba->sli_rev == LPFC_SLI_REV4) 1415 memcpy(new_ndlp->active_rrqs.xri_bitmap, 1416 &ndlp->active_rrqs.xri_bitmap, 1417 sizeof(ndlp->active_rrqs.xri_bitmap)); 1418 1419 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1420 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1421 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1422 1423 /* Set state will put new_ndlp on to node list if not already done */ 1424 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1425 1426 /* Move this back to NPR state */ 1427 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1428 /* The new_ndlp is replacing ndlp totally, so we need 1429 * to put ndlp on UNUSED list and try to free it. 1430 */ 1431 1432 /* Fix up the rport accordingly */ 1433 rport = ndlp->rport; 1434 if (rport) { 1435 rdata = rport->dd_data; 1436 if (rdata->pnode == ndlp) { 1437 lpfc_nlp_put(ndlp); 1438 ndlp->rport = NULL; 1439 rdata->pnode = lpfc_nlp_get(new_ndlp); 1440 new_ndlp->rport = rport; 1441 } 1442 new_ndlp->nlp_type = ndlp->nlp_type; 1443 } 1444 /* We shall actually free the ndlp with both nlp_DID and 1445 * nlp_portname fields equals 0 to avoid any ndlp on the 1446 * nodelist never to be used. 1447 */ 1448 if (ndlp->nlp_DID == 0) { 1449 spin_lock_irq(&phba->ndlp_lock); 1450 NLP_SET_FREE_REQ(ndlp); 1451 spin_unlock_irq(&phba->ndlp_lock); 1452 } 1453 1454 /* Two ndlps cannot have the same did on the nodelist */ 1455 ndlp->nlp_DID = keepDID; 1456 if (phba->sli_rev == LPFC_SLI_REV4) 1457 memcpy(&ndlp->active_rrqs.xri_bitmap, 1458 &rrq.xri_bitmap, 1459 sizeof(ndlp->active_rrqs.xri_bitmap)); 1460 lpfc_drop_node(vport, ndlp); 1461 } 1462 else { 1463 lpfc_unreg_rpi(vport, ndlp); 1464 /* Two ndlps cannot have the same did */ 1465 ndlp->nlp_DID = keepDID; 1466 if (phba->sli_rev == LPFC_SLI_REV4) 1467 memcpy(&ndlp->active_rrqs.xri_bitmap, 1468 &rrq.xri_bitmap, 1469 sizeof(ndlp->active_rrqs.xri_bitmap)); 1470 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1471 /* Since we are swapping the ndlp passed in with the new one 1472 * and the did has already been swapped, copy over the 1473 * state and names. 1474 */ 1475 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname, 1476 sizeof(struct lpfc_name)); 1477 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, 1478 sizeof(struct lpfc_name)); 1479 new_ndlp->nlp_state = ndlp->nlp_state; 1480 /* Fix up the rport accordingly */ 1481 rport = ndlp->rport; 1482 if (rport) { 1483 rdata = rport->dd_data; 1484 put_node = rdata->pnode != NULL; 1485 put_rport = ndlp->rport != NULL; 1486 rdata->pnode = NULL; 1487 ndlp->rport = NULL; 1488 if (put_node) 1489 lpfc_nlp_put(ndlp); 1490 if (put_rport) 1491 put_device(&rport->dev); 1492 } 1493 } 1494 return new_ndlp; 1495 } 1496 1497 /** 1498 * lpfc_end_rscn - Check and handle more rscn for a vport 1499 * @vport: pointer to a host virtual N_Port data structure. 1500 * 1501 * This routine checks whether more Registration State Change 1502 * Notifications (RSCNs) came in while the discovery state machine was in 1503 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1504 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1505 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1506 * handling the RSCNs. 1507 **/ 1508 void 1509 lpfc_end_rscn(struct lpfc_vport *vport) 1510 { 1511 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1512 1513 if (vport->fc_flag & FC_RSCN_MODE) { 1514 /* 1515 * Check to see if more RSCNs came in while we were 1516 * processing this one. 1517 */ 1518 if (vport->fc_rscn_id_cnt || 1519 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1520 lpfc_els_handle_rscn(vport); 1521 else { 1522 spin_lock_irq(shost->host_lock); 1523 vport->fc_flag &= ~FC_RSCN_MODE; 1524 spin_unlock_irq(shost->host_lock); 1525 } 1526 } 1527 } 1528 1529 /** 1530 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1531 * @phba: pointer to lpfc hba data structure. 1532 * @cmdiocb: pointer to lpfc command iocb data structure. 1533 * @rspiocb: pointer to lpfc response iocb data structure. 1534 * 1535 * This routine will call the clear rrq function to free the rrq and 1536 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1537 * exist then the clear_rrq is still called because the rrq needs to 1538 * be freed. 1539 **/ 1540 1541 static void 1542 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1543 struct lpfc_iocbq *rspiocb) 1544 { 1545 struct lpfc_vport *vport = cmdiocb->vport; 1546 IOCB_t *irsp; 1547 struct lpfc_nodelist *ndlp; 1548 struct lpfc_node_rrq *rrq; 1549 1550 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1551 rrq = cmdiocb->context_un.rrq; 1552 cmdiocb->context_un.rsp_iocb = rspiocb; 1553 1554 irsp = &rspiocb->iocb; 1555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1556 "RRQ cmpl: status:x%x/x%x did:x%x", 1557 irsp->ulpStatus, irsp->un.ulpWord[4], 1558 irsp->un.elsreq64.remoteID); 1559 1560 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1561 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1562 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1563 "2882 RRQ completes to NPort x%x " 1564 "with no ndlp. Data: x%x x%x x%x\n", 1565 irsp->un.elsreq64.remoteID, 1566 irsp->ulpStatus, irsp->un.ulpWord[4], 1567 irsp->ulpIoTag); 1568 goto out; 1569 } 1570 1571 /* rrq completes to NPort <nlp_DID> */ 1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1573 "2880 RRQ completes to NPort x%x " 1574 "Data: x%x x%x x%x x%x x%x\n", 1575 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1576 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1577 1578 if (irsp->ulpStatus) { 1579 /* Check for retry */ 1580 /* RRQ failed Don't print the vport to vport rjts */ 1581 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1582 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1583 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1584 (phba)->pport->cfg_log_verbose & LOG_ELS) 1585 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1586 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1587 ndlp->nlp_DID, irsp->ulpStatus, 1588 irsp->un.ulpWord[4]); 1589 } 1590 out: 1591 if (rrq) 1592 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1593 lpfc_els_free_iocb(phba, cmdiocb); 1594 return; 1595 } 1596 /** 1597 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1598 * @phba: pointer to lpfc hba data structure. 1599 * @cmdiocb: pointer to lpfc command iocb data structure. 1600 * @rspiocb: pointer to lpfc response iocb data structure. 1601 * 1602 * This routine is the completion callback function for issuing the Port 1603 * Login (PLOGI) command. For PLOGI completion, there must be an active 1604 * ndlp on the vport node list that matches the remote node ID from the 1605 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1606 * ignored and command IOCB released. The PLOGI response IOCB status is 1607 * checked for error conditons. If there is error status reported, PLOGI 1608 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1609 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1610 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1611 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1612 * there are additional N_Port nodes with the vport that need to perform 1613 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1614 * PLOGIs. 1615 **/ 1616 static void 1617 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1618 struct lpfc_iocbq *rspiocb) 1619 { 1620 struct lpfc_vport *vport = cmdiocb->vport; 1621 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1622 IOCB_t *irsp; 1623 struct lpfc_nodelist *ndlp; 1624 struct lpfc_dmabuf *prsp; 1625 int disc, rc, did, type; 1626 1627 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1628 cmdiocb->context_un.rsp_iocb = rspiocb; 1629 1630 irsp = &rspiocb->iocb; 1631 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1632 "PLOGI cmpl: status:x%x/x%x did:x%x", 1633 irsp->ulpStatus, irsp->un.ulpWord[4], 1634 irsp->un.elsreq64.remoteID); 1635 1636 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1637 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1638 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1639 "0136 PLOGI completes to NPort x%x " 1640 "with no ndlp. Data: x%x x%x x%x\n", 1641 irsp->un.elsreq64.remoteID, 1642 irsp->ulpStatus, irsp->un.ulpWord[4], 1643 irsp->ulpIoTag); 1644 goto out; 1645 } 1646 1647 /* Since ndlp can be freed in the disc state machine, note if this node 1648 * is being used during discovery. 1649 */ 1650 spin_lock_irq(shost->host_lock); 1651 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1652 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1653 spin_unlock_irq(shost->host_lock); 1654 rc = 0; 1655 1656 /* PLOGI completes to NPort <nlp_DID> */ 1657 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1658 "0102 PLOGI completes to NPort x%x " 1659 "Data: x%x x%x x%x x%x x%x\n", 1660 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1661 irsp->ulpTimeout, disc, vport->num_disc_nodes); 1662 /* Check to see if link went down during discovery */ 1663 if (lpfc_els_chk_latt(vport)) { 1664 spin_lock_irq(shost->host_lock); 1665 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1666 spin_unlock_irq(shost->host_lock); 1667 goto out; 1668 } 1669 1670 /* ndlp could be freed in DSM, save these values now */ 1671 type = ndlp->nlp_type; 1672 did = ndlp->nlp_DID; 1673 1674 if (irsp->ulpStatus) { 1675 /* Check for retry */ 1676 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1677 /* ELS command is being retried */ 1678 if (disc) { 1679 spin_lock_irq(shost->host_lock); 1680 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1681 spin_unlock_irq(shost->host_lock); 1682 } 1683 goto out; 1684 } 1685 /* PLOGI failed Don't print the vport to vport rjts */ 1686 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1687 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1688 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1689 (phba)->pport->cfg_log_verbose & LOG_ELS) 1690 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1691 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1692 ndlp->nlp_DID, irsp->ulpStatus, 1693 irsp->un.ulpWord[4]); 1694 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1695 if (lpfc_error_lost_link(irsp)) 1696 rc = NLP_STE_FREED_NODE; 1697 else 1698 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1699 NLP_EVT_CMPL_PLOGI); 1700 } else { 1701 /* Good status, call state machine */ 1702 prsp = list_entry(((struct lpfc_dmabuf *) 1703 cmdiocb->context2)->list.next, 1704 struct lpfc_dmabuf, list); 1705 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 1706 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1707 NLP_EVT_CMPL_PLOGI); 1708 } 1709 1710 if (disc && vport->num_disc_nodes) { 1711 /* Check to see if there are more PLOGIs to be sent */ 1712 lpfc_more_plogi(vport); 1713 1714 if (vport->num_disc_nodes == 0) { 1715 spin_lock_irq(shost->host_lock); 1716 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1717 spin_unlock_irq(shost->host_lock); 1718 1719 lpfc_can_disctmo(vport); 1720 lpfc_end_rscn(vport); 1721 } 1722 } 1723 1724 out: 1725 lpfc_els_free_iocb(phba, cmdiocb); 1726 return; 1727 } 1728 1729 /** 1730 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 1731 * @vport: pointer to a host virtual N_Port data structure. 1732 * @did: destination port identifier. 1733 * @retry: number of retries to the command IOCB. 1734 * 1735 * This routine issues a Port Login (PLOGI) command to a remote N_Port 1736 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 1737 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 1738 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 1739 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 1740 * 1741 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1742 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1743 * will be stored into the context1 field of the IOCB for the completion 1744 * callback function to the PLOGI ELS command. 1745 * 1746 * Return code 1747 * 0 - Successfully issued a plogi for @vport 1748 * 1 - failed to issue a plogi for @vport 1749 **/ 1750 int 1751 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1752 { 1753 struct lpfc_hba *phba = vport->phba; 1754 struct serv_parm *sp; 1755 IOCB_t *icmd; 1756 struct lpfc_nodelist *ndlp; 1757 struct lpfc_iocbq *elsiocb; 1758 struct lpfc_sli *psli; 1759 uint8_t *pcmd; 1760 uint16_t cmdsize; 1761 int ret; 1762 1763 psli = &phba->sli; 1764 1765 ndlp = lpfc_findnode_did(vport, did); 1766 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1767 ndlp = NULL; 1768 1769 /* If ndlp is not NULL, we will bump the reference count on it */ 1770 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1771 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1772 ELS_CMD_PLOGI); 1773 if (!elsiocb) 1774 return 1; 1775 1776 icmd = &elsiocb->iocb; 1777 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1778 1779 /* For PLOGI request, remainder of payload is service parameters */ 1780 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 1781 pcmd += sizeof(uint32_t); 1782 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1783 sp = (struct serv_parm *) pcmd; 1784 1785 /* 1786 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 1787 * to device on remote loops work. 1788 */ 1789 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 1790 sp->cmn.altBbCredit = 1; 1791 1792 if (sp->cmn.fcphLow < FC_PH_4_3) 1793 sp->cmn.fcphLow = FC_PH_4_3; 1794 1795 if (sp->cmn.fcphHigh < FC_PH3) 1796 sp->cmn.fcphHigh = FC_PH3; 1797 1798 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1799 "Issue PLOGI: did:x%x", 1800 did, 0, 0); 1801 1802 phba->fc_stat.elsXmitPLOGI++; 1803 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1804 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 1805 1806 if (ret == IOCB_ERROR) { 1807 lpfc_els_free_iocb(phba, elsiocb); 1808 return 1; 1809 } 1810 return 0; 1811 } 1812 1813 /** 1814 * lpfc_cmpl_els_prli - Completion callback function for prli 1815 * @phba: pointer to lpfc hba data structure. 1816 * @cmdiocb: pointer to lpfc command iocb data structure. 1817 * @rspiocb: pointer to lpfc response iocb data structure. 1818 * 1819 * This routine is the completion callback function for a Process Login 1820 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 1821 * status. If there is error status reported, PRLI retry shall be attempted 1822 * by invoking the lpfc_els_retry() routine. Otherwise, the state 1823 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 1824 * ndlp to mark the PRLI completion. 1825 **/ 1826 static void 1827 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1828 struct lpfc_iocbq *rspiocb) 1829 { 1830 struct lpfc_vport *vport = cmdiocb->vport; 1831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1832 IOCB_t *irsp; 1833 struct lpfc_sli *psli; 1834 struct lpfc_nodelist *ndlp; 1835 1836 psli = &phba->sli; 1837 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1838 cmdiocb->context_un.rsp_iocb = rspiocb; 1839 1840 irsp = &(rspiocb->iocb); 1841 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1842 spin_lock_irq(shost->host_lock); 1843 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1844 spin_unlock_irq(shost->host_lock); 1845 1846 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1847 "PRLI cmpl: status:x%x/x%x did:x%x", 1848 irsp->ulpStatus, irsp->un.ulpWord[4], 1849 ndlp->nlp_DID); 1850 /* PRLI completes to NPort <nlp_DID> */ 1851 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1852 "0103 PRLI completes to NPort x%x " 1853 "Data: x%x x%x x%x x%x\n", 1854 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1855 irsp->ulpTimeout, vport->num_disc_nodes); 1856 1857 vport->fc_prli_sent--; 1858 /* Check to see if link went down during discovery */ 1859 if (lpfc_els_chk_latt(vport)) 1860 goto out; 1861 1862 if (irsp->ulpStatus) { 1863 /* Check for retry */ 1864 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1865 /* ELS command is being retried */ 1866 goto out; 1867 } 1868 /* PRLI failed */ 1869 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1870 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 1871 ndlp->nlp_DID, irsp->ulpStatus, 1872 irsp->un.ulpWord[4]); 1873 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1874 if (lpfc_error_lost_link(irsp)) 1875 goto out; 1876 else 1877 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1878 NLP_EVT_CMPL_PRLI); 1879 } else 1880 /* Good status, call state machine */ 1881 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1882 NLP_EVT_CMPL_PRLI); 1883 out: 1884 lpfc_els_free_iocb(phba, cmdiocb); 1885 return; 1886 } 1887 1888 /** 1889 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 1890 * @vport: pointer to a host virtual N_Port data structure. 1891 * @ndlp: pointer to a node-list data structure. 1892 * @retry: number of retries to the command IOCB. 1893 * 1894 * This routine issues a Process Login (PRLI) ELS command for the 1895 * @vport. The PRLI service parameters are set up in the payload of the 1896 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 1897 * is put to the IOCB completion callback func field before invoking the 1898 * routine lpfc_sli_issue_iocb() to send out PRLI command. 1899 * 1900 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1901 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1902 * will be stored into the context1 field of the IOCB for the completion 1903 * callback function to the PRLI ELS command. 1904 * 1905 * Return code 1906 * 0 - successfully issued prli iocb command for @vport 1907 * 1 - failed to issue prli iocb command for @vport 1908 **/ 1909 int 1910 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1911 uint8_t retry) 1912 { 1913 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1914 struct lpfc_hba *phba = vport->phba; 1915 PRLI *npr; 1916 IOCB_t *icmd; 1917 struct lpfc_iocbq *elsiocb; 1918 uint8_t *pcmd; 1919 uint16_t cmdsize; 1920 1921 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1922 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1923 ndlp->nlp_DID, ELS_CMD_PRLI); 1924 if (!elsiocb) 1925 return 1; 1926 1927 icmd = &elsiocb->iocb; 1928 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1929 1930 /* For PRLI request, remainder of payload is service parameters */ 1931 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); 1932 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1933 pcmd += sizeof(uint32_t); 1934 1935 /* For PRLI, remainder of payload is PRLI parameter page */ 1936 npr = (PRLI *) pcmd; 1937 /* 1938 * If our firmware version is 3.20 or later, 1939 * set the following bits for FC-TAPE support. 1940 */ 1941 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 1942 npr->ConfmComplAllowed = 1; 1943 npr->Retry = 1; 1944 npr->TaskRetryIdReq = 1; 1945 } 1946 npr->estabImagePair = 1; 1947 npr->readXferRdyDis = 1; 1948 1949 /* For FCP support */ 1950 npr->prliType = PRLI_FCP_TYPE; 1951 npr->initiatorFunc = 1; 1952 1953 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1954 "Issue PRLI: did:x%x", 1955 ndlp->nlp_DID, 0, 0); 1956 1957 phba->fc_stat.elsXmitPRLI++; 1958 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 1959 spin_lock_irq(shost->host_lock); 1960 ndlp->nlp_flag |= NLP_PRLI_SND; 1961 spin_unlock_irq(shost->host_lock); 1962 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 1963 IOCB_ERROR) { 1964 spin_lock_irq(shost->host_lock); 1965 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1966 spin_unlock_irq(shost->host_lock); 1967 lpfc_els_free_iocb(phba, elsiocb); 1968 return 1; 1969 } 1970 vport->fc_prli_sent++; 1971 return 0; 1972 } 1973 1974 /** 1975 * lpfc_rscn_disc - Perform rscn discovery for a vport 1976 * @vport: pointer to a host virtual N_Port data structure. 1977 * 1978 * This routine performs Registration State Change Notification (RSCN) 1979 * discovery for a @vport. If the @vport's node port recovery count is not 1980 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 1981 * the nodes that need recovery. If none of the PLOGI were needed through 1982 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 1983 * invoked to check and handle possible more RSCN came in during the period 1984 * of processing the current ones. 1985 **/ 1986 static void 1987 lpfc_rscn_disc(struct lpfc_vport *vport) 1988 { 1989 lpfc_can_disctmo(vport); 1990 1991 /* RSCN discovery */ 1992 /* go thru NPR nodes and issue ELS PLOGIs */ 1993 if (vport->fc_npr_cnt) 1994 if (lpfc_els_disc_plogi(vport)) 1995 return; 1996 1997 lpfc_end_rscn(vport); 1998 } 1999 2000 /** 2001 * lpfc_adisc_done - Complete the adisc phase of discovery 2002 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2003 * 2004 * This function is called when the final ADISC is completed during discovery. 2005 * This function handles clearing link attention or issuing reg_vpi depending 2006 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2007 * discovery. 2008 * This function is called with no locks held. 2009 **/ 2010 static void 2011 lpfc_adisc_done(struct lpfc_vport *vport) 2012 { 2013 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2014 struct lpfc_hba *phba = vport->phba; 2015 2016 /* 2017 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2018 * and continue discovery. 2019 */ 2020 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2021 !(vport->fc_flag & FC_RSCN_MODE) && 2022 (phba->sli_rev < LPFC_SLI_REV4)) { 2023 lpfc_issue_reg_vpi(phba, vport); 2024 return; 2025 } 2026 /* 2027 * For SLI2, we need to set port_state to READY 2028 * and continue discovery. 2029 */ 2030 if (vport->port_state < LPFC_VPORT_READY) { 2031 /* If we get here, there is nothing to ADISC */ 2032 if (vport->port_type == LPFC_PHYSICAL_PORT) 2033 lpfc_issue_clear_la(phba, vport); 2034 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2035 vport->num_disc_nodes = 0; 2036 /* go thru NPR list, issue ELS PLOGIs */ 2037 if (vport->fc_npr_cnt) 2038 lpfc_els_disc_plogi(vport); 2039 if (!vport->num_disc_nodes) { 2040 spin_lock_irq(shost->host_lock); 2041 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2042 spin_unlock_irq(shost->host_lock); 2043 lpfc_can_disctmo(vport); 2044 lpfc_end_rscn(vport); 2045 } 2046 } 2047 vport->port_state = LPFC_VPORT_READY; 2048 } else 2049 lpfc_rscn_disc(vport); 2050 } 2051 2052 /** 2053 * lpfc_more_adisc - Issue more adisc as needed 2054 * @vport: pointer to a host virtual N_Port data structure. 2055 * 2056 * This routine determines whether there are more ndlps on a @vport 2057 * node list need to have Address Discover (ADISC) issued. If so, it will 2058 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2059 * remaining nodes which need to have ADISC sent. 2060 **/ 2061 void 2062 lpfc_more_adisc(struct lpfc_vport *vport) 2063 { 2064 int sentadisc; 2065 2066 if (vport->num_disc_nodes) 2067 vport->num_disc_nodes--; 2068 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2069 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2070 "0210 Continue discovery with %d ADISCs to go " 2071 "Data: x%x x%x x%x\n", 2072 vport->num_disc_nodes, vport->fc_adisc_cnt, 2073 vport->fc_flag, vport->port_state); 2074 /* Check to see if there are more ADISCs to be sent */ 2075 if (vport->fc_flag & FC_NLP_MORE) { 2076 lpfc_set_disctmo(vport); 2077 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2078 sentadisc = lpfc_els_disc_adisc(vport); 2079 } 2080 if (!vport->num_disc_nodes) 2081 lpfc_adisc_done(vport); 2082 return; 2083 } 2084 2085 /** 2086 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2087 * @phba: pointer to lpfc hba data structure. 2088 * @cmdiocb: pointer to lpfc command iocb data structure. 2089 * @rspiocb: pointer to lpfc response iocb data structure. 2090 * 2091 * This routine is the completion function for issuing the Address Discover 2092 * (ADISC) command. It first checks to see whether link went down during 2093 * the discovery process. If so, the node will be marked as node port 2094 * recovery for issuing discover IOCB by the link attention handler and 2095 * exit. Otherwise, the response status is checked. If error was reported 2096 * in the response status, the ADISC command shall be retried by invoking 2097 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2098 * the response status, the state machine is invoked to set transition 2099 * with respect to NLP_EVT_CMPL_ADISC event. 2100 **/ 2101 static void 2102 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2103 struct lpfc_iocbq *rspiocb) 2104 { 2105 struct lpfc_vport *vport = cmdiocb->vport; 2106 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2107 IOCB_t *irsp; 2108 struct lpfc_nodelist *ndlp; 2109 int disc; 2110 2111 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2112 cmdiocb->context_un.rsp_iocb = rspiocb; 2113 2114 irsp = &(rspiocb->iocb); 2115 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2116 2117 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2118 "ADISC cmpl: status:x%x/x%x did:x%x", 2119 irsp->ulpStatus, irsp->un.ulpWord[4], 2120 ndlp->nlp_DID); 2121 2122 /* Since ndlp can be freed in the disc state machine, note if this node 2123 * is being used during discovery. 2124 */ 2125 spin_lock_irq(shost->host_lock); 2126 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2127 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2128 spin_unlock_irq(shost->host_lock); 2129 /* ADISC completes to NPort <nlp_DID> */ 2130 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2131 "0104 ADISC completes to NPort x%x " 2132 "Data: x%x x%x x%x x%x x%x\n", 2133 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2134 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2135 /* Check to see if link went down during discovery */ 2136 if (lpfc_els_chk_latt(vport)) { 2137 spin_lock_irq(shost->host_lock); 2138 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2139 spin_unlock_irq(shost->host_lock); 2140 goto out; 2141 } 2142 2143 if (irsp->ulpStatus) { 2144 /* Check for retry */ 2145 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2146 /* ELS command is being retried */ 2147 if (disc) { 2148 spin_lock_irq(shost->host_lock); 2149 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2150 spin_unlock_irq(shost->host_lock); 2151 lpfc_set_disctmo(vport); 2152 } 2153 goto out; 2154 } 2155 /* ADISC failed */ 2156 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2157 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2158 ndlp->nlp_DID, irsp->ulpStatus, 2159 irsp->un.ulpWord[4]); 2160 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2161 if (!lpfc_error_lost_link(irsp)) 2162 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2163 NLP_EVT_CMPL_ADISC); 2164 } else 2165 /* Good status, call state machine */ 2166 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2167 NLP_EVT_CMPL_ADISC); 2168 2169 /* Check to see if there are more ADISCs to be sent */ 2170 if (disc && vport->num_disc_nodes) 2171 lpfc_more_adisc(vport); 2172 out: 2173 lpfc_els_free_iocb(phba, cmdiocb); 2174 return; 2175 } 2176 2177 /** 2178 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2179 * @vport: pointer to a virtual N_Port data structure. 2180 * @ndlp: pointer to a node-list data structure. 2181 * @retry: number of retries to the command IOCB. 2182 * 2183 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2184 * @vport. It prepares the payload of the ADISC ELS command, updates the 2185 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2186 * to issue the ADISC ELS command. 2187 * 2188 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2189 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2190 * will be stored into the context1 field of the IOCB for the completion 2191 * callback function to the ADISC ELS command. 2192 * 2193 * Return code 2194 * 0 - successfully issued adisc 2195 * 1 - failed to issue adisc 2196 **/ 2197 int 2198 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2199 uint8_t retry) 2200 { 2201 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2202 struct lpfc_hba *phba = vport->phba; 2203 ADISC *ap; 2204 IOCB_t *icmd; 2205 struct lpfc_iocbq *elsiocb; 2206 uint8_t *pcmd; 2207 uint16_t cmdsize; 2208 2209 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2210 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2211 ndlp->nlp_DID, ELS_CMD_ADISC); 2212 if (!elsiocb) 2213 return 1; 2214 2215 icmd = &elsiocb->iocb; 2216 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2217 2218 /* For ADISC request, remainder of payload is service parameters */ 2219 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2220 pcmd += sizeof(uint32_t); 2221 2222 /* Fill in ADISC payload */ 2223 ap = (ADISC *) pcmd; 2224 ap->hardAL_PA = phba->fc_pref_ALPA; 2225 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2226 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2227 ap->DID = be32_to_cpu(vport->fc_myDID); 2228 2229 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2230 "Issue ADISC: did:x%x", 2231 ndlp->nlp_DID, 0, 0); 2232 2233 phba->fc_stat.elsXmitADISC++; 2234 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2235 spin_lock_irq(shost->host_lock); 2236 ndlp->nlp_flag |= NLP_ADISC_SND; 2237 spin_unlock_irq(shost->host_lock); 2238 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2239 IOCB_ERROR) { 2240 spin_lock_irq(shost->host_lock); 2241 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2242 spin_unlock_irq(shost->host_lock); 2243 lpfc_els_free_iocb(phba, elsiocb); 2244 return 1; 2245 } 2246 return 0; 2247 } 2248 2249 /** 2250 * lpfc_cmpl_els_logo - Completion callback function for logo 2251 * @phba: pointer to lpfc hba data structure. 2252 * @cmdiocb: pointer to lpfc command iocb data structure. 2253 * @rspiocb: pointer to lpfc response iocb data structure. 2254 * 2255 * This routine is the completion function for issuing the ELS Logout (LOGO) 2256 * command. If no error status was reported from the LOGO response, the 2257 * state machine of the associated ndlp shall be invoked for transition with 2258 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2259 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2260 **/ 2261 static void 2262 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2263 struct lpfc_iocbq *rspiocb) 2264 { 2265 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2266 struct lpfc_vport *vport = ndlp->vport; 2267 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2268 IOCB_t *irsp; 2269 struct lpfc_sli *psli; 2270 struct lpfcMboxq *mbox; 2271 2272 psli = &phba->sli; 2273 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2274 cmdiocb->context_un.rsp_iocb = rspiocb; 2275 2276 irsp = &(rspiocb->iocb); 2277 spin_lock_irq(shost->host_lock); 2278 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2279 spin_unlock_irq(shost->host_lock); 2280 2281 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2282 "LOGO cmpl: status:x%x/x%x did:x%x", 2283 irsp->ulpStatus, irsp->un.ulpWord[4], 2284 ndlp->nlp_DID); 2285 /* LOGO completes to NPort <nlp_DID> */ 2286 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2287 "0105 LOGO completes to NPort x%x " 2288 "Data: x%x x%x x%x x%x\n", 2289 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2290 irsp->ulpTimeout, vport->num_disc_nodes); 2291 /* Check to see if link went down during discovery */ 2292 if (lpfc_els_chk_latt(vport)) 2293 goto out; 2294 2295 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2296 /* NLP_EVT_DEVICE_RM should unregister the RPI 2297 * which should abort all outstanding IOs. 2298 */ 2299 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2300 NLP_EVT_DEVICE_RM); 2301 goto out; 2302 } 2303 2304 if (irsp->ulpStatus) { 2305 /* Check for retry */ 2306 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 2307 /* ELS command is being retried */ 2308 goto out; 2309 /* LOGO failed */ 2310 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2311 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2312 ndlp->nlp_DID, irsp->ulpStatus, 2313 irsp->un.ulpWord[4]); 2314 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2315 if (lpfc_error_lost_link(irsp)) 2316 goto out; 2317 else 2318 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2319 NLP_EVT_CMPL_LOGO); 2320 } else 2321 /* Good status, call state machine. 2322 * This will unregister the rpi if needed. 2323 */ 2324 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2325 NLP_EVT_CMPL_LOGO); 2326 out: 2327 lpfc_els_free_iocb(phba, cmdiocb); 2328 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2329 if ((vport->fc_flag & FC_PT2PT) && 2330 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2331 phba->pport->fc_myDID = 0; 2332 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2333 if (mbox) { 2334 lpfc_config_link(phba, mbox); 2335 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2336 mbox->vport = vport; 2337 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2338 MBX_NOT_FINISHED) { 2339 mempool_free(mbox, phba->mbox_mem_pool); 2340 } 2341 } 2342 } 2343 return; 2344 } 2345 2346 /** 2347 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2348 * @vport: pointer to a virtual N_Port data structure. 2349 * @ndlp: pointer to a node-list data structure. 2350 * @retry: number of retries to the command IOCB. 2351 * 2352 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2353 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2354 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2355 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2356 * 2357 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2358 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2359 * will be stored into the context1 field of the IOCB for the completion 2360 * callback function to the LOGO ELS command. 2361 * 2362 * Return code 2363 * 0 - successfully issued logo 2364 * 1 - failed to issue logo 2365 **/ 2366 int 2367 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2368 uint8_t retry) 2369 { 2370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2371 struct lpfc_hba *phba = vport->phba; 2372 IOCB_t *icmd; 2373 struct lpfc_iocbq *elsiocb; 2374 uint8_t *pcmd; 2375 uint16_t cmdsize; 2376 int rc; 2377 2378 spin_lock_irq(shost->host_lock); 2379 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2380 spin_unlock_irq(shost->host_lock); 2381 return 0; 2382 } 2383 spin_unlock_irq(shost->host_lock); 2384 2385 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2386 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2387 ndlp->nlp_DID, ELS_CMD_LOGO); 2388 if (!elsiocb) 2389 return 1; 2390 2391 icmd = &elsiocb->iocb; 2392 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2393 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2394 pcmd += sizeof(uint32_t); 2395 2396 /* Fill in LOGO payload */ 2397 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2398 pcmd += sizeof(uint32_t); 2399 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2400 2401 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2402 "Issue LOGO: did:x%x", 2403 ndlp->nlp_DID, 0, 0); 2404 2405 phba->fc_stat.elsXmitLOGO++; 2406 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2407 spin_lock_irq(shost->host_lock); 2408 ndlp->nlp_flag |= NLP_LOGO_SND; 2409 spin_unlock_irq(shost->host_lock); 2410 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2411 2412 if (rc == IOCB_ERROR) { 2413 spin_lock_irq(shost->host_lock); 2414 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2415 spin_unlock_irq(shost->host_lock); 2416 lpfc_els_free_iocb(phba, elsiocb); 2417 return 1; 2418 } 2419 return 0; 2420 } 2421 2422 /** 2423 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2424 * @phba: pointer to lpfc hba data structure. 2425 * @cmdiocb: pointer to lpfc command iocb data structure. 2426 * @rspiocb: pointer to lpfc response iocb data structure. 2427 * 2428 * This routine is a generic completion callback function for ELS commands. 2429 * Specifically, it is the callback function which does not need to perform 2430 * any command specific operations. It is currently used by the ELS command 2431 * issuing routines for the ELS State Change Request (SCR), 2432 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2433 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2434 * certain debug loggings, this callback function simply invokes the 2435 * lpfc_els_chk_latt() routine to check whether link went down during the 2436 * discovery process. 2437 **/ 2438 static void 2439 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2440 struct lpfc_iocbq *rspiocb) 2441 { 2442 struct lpfc_vport *vport = cmdiocb->vport; 2443 IOCB_t *irsp; 2444 2445 irsp = &rspiocb->iocb; 2446 2447 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2448 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2449 irsp->ulpStatus, irsp->un.ulpWord[4], 2450 irsp->un.elsreq64.remoteID); 2451 /* ELS cmd tag <ulpIoTag> completes */ 2452 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2453 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2454 irsp->ulpIoTag, irsp->ulpStatus, 2455 irsp->un.ulpWord[4], irsp->ulpTimeout); 2456 /* Check to see if link went down during discovery */ 2457 lpfc_els_chk_latt(vport); 2458 lpfc_els_free_iocb(phba, cmdiocb); 2459 return; 2460 } 2461 2462 /** 2463 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2464 * @vport: pointer to a host virtual N_Port data structure. 2465 * @nportid: N_Port identifier to the remote node. 2466 * @retry: number of retries to the command IOCB. 2467 * 2468 * This routine issues a State Change Request (SCR) to a fabric node 2469 * on a @vport. The remote node @nportid is passed into the function. It 2470 * first search the @vport node list to find the matching ndlp. If no such 2471 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 2472 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 2473 * routine is invoked to send the SCR IOCB. 2474 * 2475 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2476 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2477 * will be stored into the context1 field of the IOCB for the completion 2478 * callback function to the SCR ELS command. 2479 * 2480 * Return code 2481 * 0 - Successfully issued scr command 2482 * 1 - Failed to issue scr command 2483 **/ 2484 int 2485 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2486 { 2487 struct lpfc_hba *phba = vport->phba; 2488 IOCB_t *icmd; 2489 struct lpfc_iocbq *elsiocb; 2490 struct lpfc_sli *psli; 2491 uint8_t *pcmd; 2492 uint16_t cmdsize; 2493 struct lpfc_nodelist *ndlp; 2494 2495 psli = &phba->sli; 2496 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2497 2498 ndlp = lpfc_findnode_did(vport, nportid); 2499 if (!ndlp) { 2500 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2501 if (!ndlp) 2502 return 1; 2503 lpfc_nlp_init(vport, ndlp, nportid); 2504 lpfc_enqueue_node(vport, ndlp); 2505 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2506 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2507 if (!ndlp) 2508 return 1; 2509 } 2510 2511 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2512 ndlp->nlp_DID, ELS_CMD_SCR); 2513 2514 if (!elsiocb) { 2515 /* This will trigger the release of the node just 2516 * allocated 2517 */ 2518 lpfc_nlp_put(ndlp); 2519 return 1; 2520 } 2521 2522 icmd = &elsiocb->iocb; 2523 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2524 2525 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 2526 pcmd += sizeof(uint32_t); 2527 2528 /* For SCR, remainder of payload is SCR parameter page */ 2529 memset(pcmd, 0, sizeof(SCR)); 2530 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 2531 2532 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2533 "Issue SCR: did:x%x", 2534 ndlp->nlp_DID, 0, 0); 2535 2536 phba->fc_stat.elsXmitSCR++; 2537 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2538 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2539 IOCB_ERROR) { 2540 /* The additional lpfc_nlp_put will cause the following 2541 * lpfc_els_free_iocb routine to trigger the rlease of 2542 * the node. 2543 */ 2544 lpfc_nlp_put(ndlp); 2545 lpfc_els_free_iocb(phba, elsiocb); 2546 return 1; 2547 } 2548 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2549 * trigger the release of node. 2550 */ 2551 lpfc_nlp_put(ndlp); 2552 return 0; 2553 } 2554 2555 /** 2556 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 2557 * @vport: pointer to a host virtual N_Port data structure. 2558 * @nportid: N_Port identifier to the remote node. 2559 * @retry: number of retries to the command IOCB. 2560 * 2561 * This routine issues a Fibre Channel Address Resolution Response 2562 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 2563 * is passed into the function. It first search the @vport node list to find 2564 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 2565 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 2566 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 2567 * 2568 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2569 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2570 * will be stored into the context1 field of the IOCB for the completion 2571 * callback function to the PARPR ELS command. 2572 * 2573 * Return code 2574 * 0 - Successfully issued farpr command 2575 * 1 - Failed to issue farpr command 2576 **/ 2577 static int 2578 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 2579 { 2580 struct lpfc_hba *phba = vport->phba; 2581 IOCB_t *icmd; 2582 struct lpfc_iocbq *elsiocb; 2583 struct lpfc_sli *psli; 2584 FARP *fp; 2585 uint8_t *pcmd; 2586 uint32_t *lp; 2587 uint16_t cmdsize; 2588 struct lpfc_nodelist *ondlp; 2589 struct lpfc_nodelist *ndlp; 2590 2591 psli = &phba->sli; 2592 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2593 2594 ndlp = lpfc_findnode_did(vport, nportid); 2595 if (!ndlp) { 2596 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2597 if (!ndlp) 2598 return 1; 2599 lpfc_nlp_init(vport, ndlp, nportid); 2600 lpfc_enqueue_node(vport, ndlp); 2601 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 2602 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 2603 if (!ndlp) 2604 return 1; 2605 } 2606 2607 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2608 ndlp->nlp_DID, ELS_CMD_RNID); 2609 if (!elsiocb) { 2610 /* This will trigger the release of the node just 2611 * allocated 2612 */ 2613 lpfc_nlp_put(ndlp); 2614 return 1; 2615 } 2616 2617 icmd = &elsiocb->iocb; 2618 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2619 2620 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 2621 pcmd += sizeof(uint32_t); 2622 2623 /* Fill in FARPR payload */ 2624 fp = (FARP *) (pcmd); 2625 memset(fp, 0, sizeof(FARP)); 2626 lp = (uint32_t *) pcmd; 2627 *lp++ = be32_to_cpu(nportid); 2628 *lp++ = be32_to_cpu(vport->fc_myDID); 2629 fp->Rflags = 0; 2630 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 2631 2632 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 2633 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2634 ondlp = lpfc_findnode_did(vport, nportid); 2635 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 2636 memcpy(&fp->OportName, &ondlp->nlp_portname, 2637 sizeof(struct lpfc_name)); 2638 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 2639 sizeof(struct lpfc_name)); 2640 } 2641 2642 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2643 "Issue FARPR: did:x%x", 2644 ndlp->nlp_DID, 0, 0); 2645 2646 phba->fc_stat.elsXmitFARPR++; 2647 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2648 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2649 IOCB_ERROR) { 2650 /* The additional lpfc_nlp_put will cause the following 2651 * lpfc_els_free_iocb routine to trigger the release of 2652 * the node. 2653 */ 2654 lpfc_nlp_put(ndlp); 2655 lpfc_els_free_iocb(phba, elsiocb); 2656 return 1; 2657 } 2658 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2659 * trigger the release of the node. 2660 */ 2661 lpfc_nlp_put(ndlp); 2662 return 0; 2663 } 2664 2665 /** 2666 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 2667 * @vport: pointer to a host virtual N_Port data structure. 2668 * @nlp: pointer to a node-list data structure. 2669 * 2670 * This routine cancels the timer with a delayed IOCB-command retry for 2671 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 2672 * removes the ELS retry event if it presents. In addition, if the 2673 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 2674 * commands are sent for the @vport's nodes that require issuing discovery 2675 * ADISC. 2676 **/ 2677 void 2678 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 2679 { 2680 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2681 struct lpfc_work_evt *evtp; 2682 2683 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 2684 return; 2685 spin_lock_irq(shost->host_lock); 2686 nlp->nlp_flag &= ~NLP_DELAY_TMO; 2687 spin_unlock_irq(shost->host_lock); 2688 del_timer_sync(&nlp->nlp_delayfunc); 2689 nlp->nlp_last_elscmd = 0; 2690 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 2691 list_del_init(&nlp->els_retry_evt.evt_listp); 2692 /* Decrement nlp reference count held for the delayed retry */ 2693 evtp = &nlp->els_retry_evt; 2694 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 2695 } 2696 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 2697 spin_lock_irq(shost->host_lock); 2698 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2699 spin_unlock_irq(shost->host_lock); 2700 if (vport->num_disc_nodes) { 2701 if (vport->port_state < LPFC_VPORT_READY) { 2702 /* Check if there are more ADISCs to be sent */ 2703 lpfc_more_adisc(vport); 2704 } else { 2705 /* Check if there are more PLOGIs to be sent */ 2706 lpfc_more_plogi(vport); 2707 if (vport->num_disc_nodes == 0) { 2708 spin_lock_irq(shost->host_lock); 2709 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2710 spin_unlock_irq(shost->host_lock); 2711 lpfc_can_disctmo(vport); 2712 lpfc_end_rscn(vport); 2713 } 2714 } 2715 } 2716 } 2717 return; 2718 } 2719 2720 /** 2721 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 2722 * @ptr: holder for the pointer to the timer function associated data (ndlp). 2723 * 2724 * This routine is invoked by the ndlp delayed-function timer to check 2725 * whether there is any pending ELS retry event(s) with the node. If not, it 2726 * simply returns. Otherwise, if there is at least one ELS delayed event, it 2727 * adds the delayed events to the HBA work list and invokes the 2728 * lpfc_worker_wake_up() routine to wake up worker thread to process the 2729 * event. Note that lpfc_nlp_get() is called before posting the event to 2730 * the work list to hold reference count of ndlp so that it guarantees the 2731 * reference to ndlp will still be available when the worker thread gets 2732 * to the event associated with the ndlp. 2733 **/ 2734 void 2735 lpfc_els_retry_delay(unsigned long ptr) 2736 { 2737 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 2738 struct lpfc_vport *vport = ndlp->vport; 2739 struct lpfc_hba *phba = vport->phba; 2740 unsigned long flags; 2741 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 2742 2743 spin_lock_irqsave(&phba->hbalock, flags); 2744 if (!list_empty(&evtp->evt_listp)) { 2745 spin_unlock_irqrestore(&phba->hbalock, flags); 2746 return; 2747 } 2748 2749 /* We need to hold the node by incrementing the reference 2750 * count until the queued work is done 2751 */ 2752 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 2753 if (evtp->evt_arg1) { 2754 evtp->evt = LPFC_EVT_ELS_RETRY; 2755 list_add_tail(&evtp->evt_listp, &phba->work_list); 2756 lpfc_worker_wake_up(phba); 2757 } 2758 spin_unlock_irqrestore(&phba->hbalock, flags); 2759 return; 2760 } 2761 2762 /** 2763 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 2764 * @ndlp: pointer to a node-list data structure. 2765 * 2766 * This routine is the worker-thread handler for processing the @ndlp delayed 2767 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 2768 * the last ELS command from the associated ndlp and invokes the proper ELS 2769 * function according to the delayed ELS command to retry the command. 2770 **/ 2771 void 2772 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 2773 { 2774 struct lpfc_vport *vport = ndlp->vport; 2775 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2776 uint32_t cmd, did, retry; 2777 2778 spin_lock_irq(shost->host_lock); 2779 did = ndlp->nlp_DID; 2780 cmd = ndlp->nlp_last_elscmd; 2781 ndlp->nlp_last_elscmd = 0; 2782 2783 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2784 spin_unlock_irq(shost->host_lock); 2785 return; 2786 } 2787 2788 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2789 spin_unlock_irq(shost->host_lock); 2790 /* 2791 * If a discovery event readded nlp_delayfunc after timer 2792 * firing and before processing the timer, cancel the 2793 * nlp_delayfunc. 2794 */ 2795 del_timer_sync(&ndlp->nlp_delayfunc); 2796 retry = ndlp->nlp_retry; 2797 ndlp->nlp_retry = 0; 2798 2799 switch (cmd) { 2800 case ELS_CMD_FLOGI: 2801 lpfc_issue_els_flogi(vport, ndlp, retry); 2802 break; 2803 case ELS_CMD_PLOGI: 2804 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 2805 ndlp->nlp_prev_state = ndlp->nlp_state; 2806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2807 } 2808 break; 2809 case ELS_CMD_ADISC: 2810 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 2811 ndlp->nlp_prev_state = ndlp->nlp_state; 2812 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2813 } 2814 break; 2815 case ELS_CMD_PRLI: 2816 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 2817 ndlp->nlp_prev_state = ndlp->nlp_state; 2818 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2819 } 2820 break; 2821 case ELS_CMD_LOGO: 2822 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2823 ndlp->nlp_prev_state = ndlp->nlp_state; 2824 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2825 } 2826 break; 2827 case ELS_CMD_FDISC: 2828 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 2829 lpfc_issue_els_fdisc(vport, ndlp, retry); 2830 break; 2831 } 2832 return; 2833 } 2834 2835 /** 2836 * lpfc_els_retry - Make retry decision on an els command iocb 2837 * @phba: pointer to lpfc hba data structure. 2838 * @cmdiocb: pointer to lpfc command iocb data structure. 2839 * @rspiocb: pointer to lpfc response iocb data structure. 2840 * 2841 * This routine makes a retry decision on an ELS command IOCB, which has 2842 * failed. The following ELS IOCBs use this function for retrying the command 2843 * when previously issued command responsed with error status: FLOGI, PLOGI, 2844 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 2845 * returned error status, it makes the decision whether a retry shall be 2846 * issued for the command, and whether a retry shall be made immediately or 2847 * delayed. In the former case, the corresponding ELS command issuing-function 2848 * is called to retry the command. In the later case, the ELS command shall 2849 * be posted to the ndlp delayed event and delayed function timer set to the 2850 * ndlp for the delayed command issusing. 2851 * 2852 * Return code 2853 * 0 - No retry of els command is made 2854 * 1 - Immediate or delayed retry of els command is made 2855 **/ 2856 static int 2857 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2858 struct lpfc_iocbq *rspiocb) 2859 { 2860 struct lpfc_vport *vport = cmdiocb->vport; 2861 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2862 IOCB_t *irsp = &rspiocb->iocb; 2863 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2864 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2865 uint32_t *elscmd; 2866 struct ls_rjt stat; 2867 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 2868 int logerr = 0; 2869 uint32_t cmd = 0; 2870 uint32_t did; 2871 2872 2873 /* Note: context2 may be 0 for internal driver abort 2874 * of delays ELS command. 2875 */ 2876 2877 if (pcmd && pcmd->virt) { 2878 elscmd = (uint32_t *) (pcmd->virt); 2879 cmd = *elscmd++; 2880 } 2881 2882 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 2883 did = ndlp->nlp_DID; 2884 else { 2885 /* We should only hit this case for retrying PLOGI */ 2886 did = irsp->un.elsreq64.remoteID; 2887 ndlp = lpfc_findnode_did(vport, did); 2888 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 2889 && (cmd != ELS_CMD_PLOGI)) 2890 return 1; 2891 } 2892 2893 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2894 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 2895 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 2896 2897 switch (irsp->ulpStatus) { 2898 case IOSTAT_FCP_RSP_ERROR: 2899 break; 2900 case IOSTAT_REMOTE_STOP: 2901 if (phba->sli_rev == LPFC_SLI_REV4) { 2902 /* This IO was aborted by the target, we don't 2903 * know the rxid and because we did not send the 2904 * ABTS we cannot generate and RRQ. 2905 */ 2906 lpfc_set_rrq_active(phba, ndlp, 2907 cmdiocb->sli4_xritag, 0, 0); 2908 } 2909 break; 2910 case IOSTAT_LOCAL_REJECT: 2911 switch ((irsp->un.ulpWord[4] & 0xff)) { 2912 case IOERR_LOOP_OPEN_FAILURE: 2913 if (cmd == ELS_CMD_FLOGI) { 2914 if (PCI_DEVICE_ID_HORNET == 2915 phba->pcidev->device) { 2916 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 2917 phba->pport->fc_myDID = 0; 2918 phba->alpa_map[0] = 0; 2919 phba->alpa_map[1] = 0; 2920 } 2921 } 2922 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 2923 delay = 1000; 2924 retry = 1; 2925 break; 2926 2927 case IOERR_ILLEGAL_COMMAND: 2928 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2929 "0124 Retry illegal cmd x%x " 2930 "retry:x%x delay:x%x\n", 2931 cmd, cmdiocb->retry, delay); 2932 retry = 1; 2933 /* All command's retry policy */ 2934 maxretry = 8; 2935 if (cmdiocb->retry > 2) 2936 delay = 1000; 2937 break; 2938 2939 case IOERR_NO_RESOURCES: 2940 logerr = 1; /* HBA out of resources */ 2941 retry = 1; 2942 if (cmdiocb->retry > 100) 2943 delay = 100; 2944 maxretry = 250; 2945 break; 2946 2947 case IOERR_ILLEGAL_FRAME: 2948 delay = 100; 2949 retry = 1; 2950 break; 2951 2952 case IOERR_SEQUENCE_TIMEOUT: 2953 case IOERR_INVALID_RPI: 2954 retry = 1; 2955 break; 2956 } 2957 break; 2958 2959 case IOSTAT_NPORT_RJT: 2960 case IOSTAT_FABRIC_RJT: 2961 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 2962 retry = 1; 2963 break; 2964 } 2965 break; 2966 2967 case IOSTAT_NPORT_BSY: 2968 case IOSTAT_FABRIC_BSY: 2969 logerr = 1; /* Fabric / Remote NPort out of resources */ 2970 retry = 1; 2971 break; 2972 2973 case IOSTAT_LS_RJT: 2974 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 2975 /* Added for Vendor specifc support 2976 * Just keep retrying for these Rsn / Exp codes 2977 */ 2978 switch (stat.un.b.lsRjtRsnCode) { 2979 case LSRJT_UNABLE_TPC: 2980 if (stat.un.b.lsRjtRsnCodeExp == 2981 LSEXP_CMD_IN_PROGRESS) { 2982 if (cmd == ELS_CMD_PLOGI) { 2983 delay = 1000; 2984 maxretry = 48; 2985 } 2986 retry = 1; 2987 break; 2988 } 2989 if (stat.un.b.lsRjtRsnCodeExp == 2990 LSEXP_CANT_GIVE_DATA) { 2991 if (cmd == ELS_CMD_PLOGI) { 2992 delay = 1000; 2993 maxretry = 48; 2994 } 2995 retry = 1; 2996 break; 2997 } 2998 if (cmd == ELS_CMD_PLOGI) { 2999 delay = 1000; 3000 maxretry = lpfc_max_els_tries + 1; 3001 retry = 1; 3002 break; 3003 } 3004 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3005 (cmd == ELS_CMD_FDISC) && 3006 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3007 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3008 "0125 FDISC Failed (x%x). " 3009 "Fabric out of resources\n", 3010 stat.un.lsRjtError); 3011 lpfc_vport_set_state(vport, 3012 FC_VPORT_NO_FABRIC_RSCS); 3013 } 3014 break; 3015 3016 case LSRJT_LOGICAL_BSY: 3017 if ((cmd == ELS_CMD_PLOGI) || 3018 (cmd == ELS_CMD_PRLI)) { 3019 delay = 1000; 3020 maxretry = 48; 3021 } else if (cmd == ELS_CMD_FDISC) { 3022 /* FDISC retry policy */ 3023 maxretry = 48; 3024 if (cmdiocb->retry >= 32) 3025 delay = 1000; 3026 } 3027 retry = 1; 3028 break; 3029 3030 case LSRJT_LOGICAL_ERR: 3031 /* There are some cases where switches return this 3032 * error when they are not ready and should be returning 3033 * Logical Busy. We should delay every time. 3034 */ 3035 if (cmd == ELS_CMD_FDISC && 3036 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3037 maxretry = 3; 3038 delay = 1000; 3039 retry = 1; 3040 break; 3041 } 3042 case LSRJT_PROTOCOL_ERR: 3043 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3044 (cmd == ELS_CMD_FDISC) && 3045 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3046 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3047 ) { 3048 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3049 "0122 FDISC Failed (x%x). " 3050 "Fabric Detected Bad WWN\n", 3051 stat.un.lsRjtError); 3052 lpfc_vport_set_state(vport, 3053 FC_VPORT_FABRIC_REJ_WWN); 3054 } 3055 break; 3056 } 3057 break; 3058 3059 case IOSTAT_INTERMED_RSP: 3060 case IOSTAT_BA_RJT: 3061 break; 3062 3063 default: 3064 break; 3065 } 3066 3067 if (did == FDMI_DID) 3068 retry = 1; 3069 3070 if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && 3071 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3072 !lpfc_error_lost_link(irsp)) { 3073 /* FLOGI retry policy */ 3074 retry = 1; 3075 /* retry forever */ 3076 maxretry = 0; 3077 if (cmdiocb->retry >= 100) 3078 delay = 5000; 3079 else if (cmdiocb->retry >= 32) 3080 delay = 1000; 3081 } 3082 3083 cmdiocb->retry++; 3084 if (maxretry && (cmdiocb->retry >= maxretry)) { 3085 phba->fc_stat.elsRetryExceeded++; 3086 retry = 0; 3087 } 3088 3089 if ((vport->load_flag & FC_UNLOADING) != 0) 3090 retry = 0; 3091 3092 if (retry) { 3093 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3094 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3095 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3096 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3097 "2849 Stop retry ELS command " 3098 "x%x to remote NPORT x%x, " 3099 "Data: x%x x%x\n", cmd, did, 3100 cmdiocb->retry, delay); 3101 return 0; 3102 } 3103 } 3104 3105 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3106 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3107 "0107 Retry ELS command x%x to remote " 3108 "NPORT x%x Data: x%x x%x\n", 3109 cmd, did, cmdiocb->retry, delay); 3110 3111 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3112 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3113 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 3114 /* Don't reset timer for no resources */ 3115 3116 /* If discovery / RSCN timer is running, reset it */ 3117 if (timer_pending(&vport->fc_disctmo) || 3118 (vport->fc_flag & FC_RSCN_MODE)) 3119 lpfc_set_disctmo(vport); 3120 } 3121 3122 phba->fc_stat.elsXmitRetry++; 3123 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3124 phba->fc_stat.elsDelayRetry++; 3125 ndlp->nlp_retry = cmdiocb->retry; 3126 3127 /* delay is specified in milliseconds */ 3128 mod_timer(&ndlp->nlp_delayfunc, 3129 jiffies + msecs_to_jiffies(delay)); 3130 spin_lock_irq(shost->host_lock); 3131 ndlp->nlp_flag |= NLP_DELAY_TMO; 3132 spin_unlock_irq(shost->host_lock); 3133 3134 ndlp->nlp_prev_state = ndlp->nlp_state; 3135 if (cmd == ELS_CMD_PRLI) 3136 lpfc_nlp_set_state(vport, ndlp, 3137 NLP_STE_REG_LOGIN_ISSUE); 3138 else 3139 lpfc_nlp_set_state(vport, ndlp, 3140 NLP_STE_NPR_NODE); 3141 ndlp->nlp_last_elscmd = cmd; 3142 3143 return 1; 3144 } 3145 switch (cmd) { 3146 case ELS_CMD_FLOGI: 3147 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3148 return 1; 3149 case ELS_CMD_FDISC: 3150 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3151 return 1; 3152 case ELS_CMD_PLOGI: 3153 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3154 ndlp->nlp_prev_state = ndlp->nlp_state; 3155 lpfc_nlp_set_state(vport, ndlp, 3156 NLP_STE_PLOGI_ISSUE); 3157 } 3158 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3159 return 1; 3160 case ELS_CMD_ADISC: 3161 ndlp->nlp_prev_state = ndlp->nlp_state; 3162 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3163 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3164 return 1; 3165 case ELS_CMD_PRLI: 3166 ndlp->nlp_prev_state = ndlp->nlp_state; 3167 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3168 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3169 return 1; 3170 case ELS_CMD_LOGO: 3171 ndlp->nlp_prev_state = ndlp->nlp_state; 3172 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3173 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3174 return 1; 3175 } 3176 } 3177 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3178 if (logerr) { 3179 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3180 "0137 No retry ELS command x%x to remote " 3181 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3182 cmd, did, irsp->ulpStatus, 3183 irsp->un.ulpWord[4]); 3184 } 3185 else { 3186 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3187 "0108 No retry ELS command x%x to remote " 3188 "NPORT x%x Retried:%d Error:x%x/%x\n", 3189 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3190 irsp->un.ulpWord[4]); 3191 } 3192 return 0; 3193 } 3194 3195 /** 3196 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3197 * @phba: pointer to lpfc hba data structure. 3198 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3199 * 3200 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3201 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3202 * checks to see whether there is a lpfc DMA buffer associated with the 3203 * response of the command IOCB. If so, it will be released before releasing 3204 * the lpfc DMA buffer associated with the IOCB itself. 3205 * 3206 * Return code 3207 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3208 **/ 3209 static int 3210 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3211 { 3212 struct lpfc_dmabuf *buf_ptr; 3213 3214 /* Free the response before processing the command. */ 3215 if (!list_empty(&buf_ptr1->list)) { 3216 list_remove_head(&buf_ptr1->list, buf_ptr, 3217 struct lpfc_dmabuf, 3218 list); 3219 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3220 kfree(buf_ptr); 3221 } 3222 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3223 kfree(buf_ptr1); 3224 return 0; 3225 } 3226 3227 /** 3228 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3229 * @phba: pointer to lpfc hba data structure. 3230 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3231 * 3232 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3233 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3234 * pool. 3235 * 3236 * Return code 3237 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3238 **/ 3239 static int 3240 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3241 { 3242 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3243 kfree(buf_ptr); 3244 return 0; 3245 } 3246 3247 /** 3248 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3249 * @phba: pointer to lpfc hba data structure. 3250 * @elsiocb: pointer to lpfc els command iocb data structure. 3251 * 3252 * This routine frees a command IOCB and its associated resources. The 3253 * command IOCB data structure contains the reference to various associated 3254 * resources, these fields must be set to NULL if the associated reference 3255 * not present: 3256 * context1 - reference to ndlp 3257 * context2 - reference to cmd 3258 * context2->next - reference to rsp 3259 * context3 - reference to bpl 3260 * 3261 * It first properly decrements the reference count held on ndlp for the 3262 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3263 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3264 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3265 * adds the DMA buffer the @phba data structure for the delayed release. 3266 * If reference to the Buffer Pointer List (BPL) is present, the 3267 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3268 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3269 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3270 * 3271 * Return code 3272 * 0 - Success (currently, always return 0) 3273 **/ 3274 int 3275 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3276 { 3277 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3278 struct lpfc_nodelist *ndlp; 3279 3280 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3281 if (ndlp) { 3282 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3283 lpfc_nlp_put(ndlp); 3284 3285 /* If the ndlp is not being used by another discovery 3286 * thread, free it. 3287 */ 3288 if (!lpfc_nlp_not_used(ndlp)) { 3289 /* If ndlp is being used by another discovery 3290 * thread, just clear NLP_DEFER_RM 3291 */ 3292 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3293 } 3294 } 3295 else 3296 lpfc_nlp_put(ndlp); 3297 elsiocb->context1 = NULL; 3298 } 3299 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3300 if (elsiocb->context2) { 3301 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3302 /* Firmware could still be in progress of DMAing 3303 * payload, so don't free data buffer till after 3304 * a hbeat. 3305 */ 3306 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3307 buf_ptr = elsiocb->context2; 3308 elsiocb->context2 = NULL; 3309 if (buf_ptr) { 3310 buf_ptr1 = NULL; 3311 spin_lock_irq(&phba->hbalock); 3312 if (!list_empty(&buf_ptr->list)) { 3313 list_remove_head(&buf_ptr->list, 3314 buf_ptr1, struct lpfc_dmabuf, 3315 list); 3316 INIT_LIST_HEAD(&buf_ptr1->list); 3317 list_add_tail(&buf_ptr1->list, 3318 &phba->elsbuf); 3319 phba->elsbuf_cnt++; 3320 } 3321 INIT_LIST_HEAD(&buf_ptr->list); 3322 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3323 phba->elsbuf_cnt++; 3324 spin_unlock_irq(&phba->hbalock); 3325 } 3326 } else { 3327 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3328 lpfc_els_free_data(phba, buf_ptr1); 3329 } 3330 } 3331 3332 if (elsiocb->context3) { 3333 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3334 lpfc_els_free_bpl(phba, buf_ptr); 3335 } 3336 lpfc_sli_release_iocbq(phba, elsiocb); 3337 return 0; 3338 } 3339 3340 /** 3341 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 3342 * @phba: pointer to lpfc hba data structure. 3343 * @cmdiocb: pointer to lpfc command iocb data structure. 3344 * @rspiocb: pointer to lpfc response iocb data structure. 3345 * 3346 * This routine is the completion callback function to the Logout (LOGO) 3347 * Accept (ACC) Response ELS command. This routine is invoked to indicate 3348 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 3349 * release the ndlp if it has the last reference remaining (reference count 3350 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 3351 * field to NULL to inform the following lpfc_els_free_iocb() routine no 3352 * ndlp reference count needs to be decremented. Otherwise, the ndlp 3353 * reference use-count shall be decremented by the lpfc_els_free_iocb() 3354 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 3355 * IOCB data structure. 3356 **/ 3357 static void 3358 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3359 struct lpfc_iocbq *rspiocb) 3360 { 3361 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3362 struct lpfc_vport *vport = cmdiocb->vport; 3363 IOCB_t *irsp; 3364 3365 irsp = &rspiocb->iocb; 3366 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3367 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 3368 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 3369 /* ACC to LOGO completes to NPort <nlp_DID> */ 3370 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3371 "0109 ACC to LOGO completes to NPort x%x " 3372 "Data: x%x x%x x%x\n", 3373 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3374 ndlp->nlp_rpi); 3375 3376 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 3377 /* NPort Recovery mode or node is just allocated */ 3378 if (!lpfc_nlp_not_used(ndlp)) { 3379 /* If the ndlp is being used by another discovery 3380 * thread, just unregister the RPI. 3381 */ 3382 lpfc_unreg_rpi(vport, ndlp); 3383 } else { 3384 /* Indicate the node has already released, should 3385 * not reference to it from within lpfc_els_free_iocb. 3386 */ 3387 cmdiocb->context1 = NULL; 3388 } 3389 } 3390 lpfc_els_free_iocb(phba, cmdiocb); 3391 return; 3392 } 3393 3394 /** 3395 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 3396 * @phba: pointer to lpfc hba data structure. 3397 * @pmb: pointer to the driver internal queue element for mailbox command. 3398 * 3399 * This routine is the completion callback function for unregister default 3400 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 3401 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 3402 * decrements the ndlp reference count held for this completion callback 3403 * function. After that, it invokes the lpfc_nlp_not_used() to check 3404 * whether there is only one reference left on the ndlp. If so, it will 3405 * perform one more decrement and trigger the release of the ndlp. 3406 **/ 3407 void 3408 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3409 { 3410 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3411 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3412 3413 pmb->context1 = NULL; 3414 pmb->context2 = NULL; 3415 3416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3417 kfree(mp); 3418 mempool_free(pmb, phba->mbox_mem_pool); 3419 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3420 lpfc_nlp_put(ndlp); 3421 /* This is the end of the default RPI cleanup logic for this 3422 * ndlp. If no other discovery threads are using this ndlp. 3423 * we should free all resources associated with it. 3424 */ 3425 lpfc_nlp_not_used(ndlp); 3426 } 3427 3428 return; 3429 } 3430 3431 /** 3432 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 3433 * @phba: pointer to lpfc hba data structure. 3434 * @cmdiocb: pointer to lpfc command iocb data structure. 3435 * @rspiocb: pointer to lpfc response iocb data structure. 3436 * 3437 * This routine is the completion callback function for ELS Response IOCB 3438 * command. In normal case, this callback function just properly sets the 3439 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 3440 * field in the command IOCB is not NULL, the referred mailbox command will 3441 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 3442 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 3443 * link down event occurred during the discovery, the lpfc_nlp_not_used() 3444 * routine shall be invoked trying to release the ndlp if no other threads 3445 * are currently referring it. 3446 **/ 3447 static void 3448 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3449 struct lpfc_iocbq *rspiocb) 3450 { 3451 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3452 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 3453 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 3454 IOCB_t *irsp; 3455 uint8_t *pcmd; 3456 LPFC_MBOXQ_t *mbox = NULL; 3457 struct lpfc_dmabuf *mp = NULL; 3458 uint32_t ls_rjt = 0; 3459 3460 irsp = &rspiocb->iocb; 3461 3462 if (cmdiocb->context_un.mbox) 3463 mbox = cmdiocb->context_un.mbox; 3464 3465 /* First determine if this is a LS_RJT cmpl. Note, this callback 3466 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 3467 */ 3468 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 3469 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3470 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 3471 /* A LS_RJT associated with Default RPI cleanup has its own 3472 * separate code path. 3473 */ 3474 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3475 ls_rjt = 1; 3476 } 3477 3478 /* Check to see if link went down during discovery */ 3479 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 3480 if (mbox) { 3481 mp = (struct lpfc_dmabuf *) mbox->context1; 3482 if (mp) { 3483 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3484 kfree(mp); 3485 } 3486 mempool_free(mbox, phba->mbox_mem_pool); 3487 } 3488 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 3489 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 3490 if (lpfc_nlp_not_used(ndlp)) { 3491 ndlp = NULL; 3492 /* Indicate the node has already released, 3493 * should not reference to it from within 3494 * the routine lpfc_els_free_iocb. 3495 */ 3496 cmdiocb->context1 = NULL; 3497 } 3498 goto out; 3499 } 3500 3501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3502 "ELS rsp cmpl: status:x%x/x%x did:x%x", 3503 irsp->ulpStatus, irsp->un.ulpWord[4], 3504 cmdiocb->iocb.un.elsreq64.remoteID); 3505 /* ELS response tag <ulpIoTag> completes */ 3506 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3507 "0110 ELS response tag x%x completes " 3508 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 3509 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 3510 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 3511 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3512 ndlp->nlp_rpi); 3513 if (mbox) { 3514 if ((rspiocb->iocb.ulpStatus == 0) 3515 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 3516 lpfc_unreg_rpi(vport, ndlp); 3517 /* Increment reference count to ndlp to hold the 3518 * reference to ndlp for the callback function. 3519 */ 3520 mbox->context2 = lpfc_nlp_get(ndlp); 3521 mbox->vport = vport; 3522 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 3523 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 3524 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 3525 } 3526 else { 3527 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 3528 ndlp->nlp_prev_state = ndlp->nlp_state; 3529 lpfc_nlp_set_state(vport, ndlp, 3530 NLP_STE_REG_LOGIN_ISSUE); 3531 } 3532 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 3533 != MBX_NOT_FINISHED) 3534 goto out; 3535 else 3536 /* Decrement the ndlp reference count we 3537 * set for this failed mailbox command. 3538 */ 3539 lpfc_nlp_put(ndlp); 3540 3541 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 3542 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3543 "0138 ELS rsp: Cannot issue reg_login for x%x " 3544 "Data: x%x x%x x%x\n", 3545 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3546 ndlp->nlp_rpi); 3547 3548 if (lpfc_nlp_not_used(ndlp)) { 3549 ndlp = NULL; 3550 /* Indicate node has already been released, 3551 * should not reference to it from within 3552 * the routine lpfc_els_free_iocb. 3553 */ 3554 cmdiocb->context1 = NULL; 3555 } 3556 } else { 3557 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 3558 if (!lpfc_error_lost_link(irsp) && 3559 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 3560 if (lpfc_nlp_not_used(ndlp)) { 3561 ndlp = NULL; 3562 /* Indicate node has already been 3563 * released, should not reference 3564 * to it from within the routine 3565 * lpfc_els_free_iocb. 3566 */ 3567 cmdiocb->context1 = NULL; 3568 } 3569 } 3570 } 3571 mp = (struct lpfc_dmabuf *) mbox->context1; 3572 if (mp) { 3573 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3574 kfree(mp); 3575 } 3576 mempool_free(mbox, phba->mbox_mem_pool); 3577 } 3578 out: 3579 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3580 spin_lock_irq(shost->host_lock); 3581 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 3582 spin_unlock_irq(shost->host_lock); 3583 3584 /* If the node is not being used by another discovery thread, 3585 * and we are sending a reject, we are done with it. 3586 * Release driver reference count here and free associated 3587 * resources. 3588 */ 3589 if (ls_rjt) 3590 if (lpfc_nlp_not_used(ndlp)) 3591 /* Indicate node has already been released, 3592 * should not reference to it from within 3593 * the routine lpfc_els_free_iocb. 3594 */ 3595 cmdiocb->context1 = NULL; 3596 } 3597 3598 lpfc_els_free_iocb(phba, cmdiocb); 3599 return; 3600 } 3601 3602 /** 3603 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 3604 * @vport: pointer to a host virtual N_Port data structure. 3605 * @flag: the els command code to be accepted. 3606 * @oldiocb: pointer to the original lpfc command iocb data structure. 3607 * @ndlp: pointer to a node-list data structure. 3608 * @mbox: pointer to the driver internal queue element for mailbox command. 3609 * 3610 * This routine prepares and issues an Accept (ACC) response IOCB 3611 * command. It uses the @flag to properly set up the IOCB field for the 3612 * specific ACC response command to be issued and invokes the 3613 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 3614 * @mbox pointer is passed in, it will be put into the context_un.mbox 3615 * field of the IOCB for the completion callback function to issue the 3616 * mailbox command to the HBA later when callback is invoked. 3617 * 3618 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3619 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3620 * will be stored into the context1 field of the IOCB for the completion 3621 * callback function to the corresponding response ELS IOCB command. 3622 * 3623 * Return code 3624 * 0 - Successfully issued acc response 3625 * 1 - Failed to issue acc response 3626 **/ 3627 int 3628 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 3629 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3630 LPFC_MBOXQ_t *mbox) 3631 { 3632 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3633 struct lpfc_hba *phba = vport->phba; 3634 IOCB_t *icmd; 3635 IOCB_t *oldcmd; 3636 struct lpfc_iocbq *elsiocb; 3637 struct lpfc_sli *psli; 3638 uint8_t *pcmd; 3639 uint16_t cmdsize; 3640 int rc; 3641 ELS_PKT *els_pkt_ptr; 3642 3643 psli = &phba->sli; 3644 oldcmd = &oldiocb->iocb; 3645 3646 switch (flag) { 3647 case ELS_CMD_ACC: 3648 cmdsize = sizeof(uint32_t); 3649 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3650 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3651 if (!elsiocb) { 3652 spin_lock_irq(shost->host_lock); 3653 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3654 spin_unlock_irq(shost->host_lock); 3655 return 1; 3656 } 3657 3658 icmd = &elsiocb->iocb; 3659 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3662 pcmd += sizeof(uint32_t); 3663 3664 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3665 "Issue ACC: did:x%x flg:x%x", 3666 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3667 break; 3668 case ELS_CMD_PLOGI: 3669 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 3670 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3671 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 3672 if (!elsiocb) 3673 return 1; 3674 3675 icmd = &elsiocb->iocb; 3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3677 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3678 3679 if (mbox) 3680 elsiocb->context_un.mbox = mbox; 3681 3682 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3683 pcmd += sizeof(uint32_t); 3684 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 3685 3686 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3687 "Issue ACC PLOGI: did:x%x flg:x%x", 3688 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3689 break; 3690 case ELS_CMD_PRLO: 3691 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 3692 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 3693 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 3694 if (!elsiocb) 3695 return 1; 3696 3697 icmd = &elsiocb->iocb; 3698 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3699 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3700 3701 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3702 sizeof(uint32_t) + sizeof(PRLO)); 3703 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 3704 els_pkt_ptr = (ELS_PKT *) pcmd; 3705 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 3706 3707 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3708 "Issue ACC PRLO: did:x%x flg:x%x", 3709 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3710 break; 3711 default: 3712 return 1; 3713 } 3714 /* Xmit ELS ACC response tag <ulpIoTag> */ 3715 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3716 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 3717 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 3718 elsiocb->iotag, elsiocb->iocb.ulpContext, 3719 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3720 ndlp->nlp_rpi); 3721 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 3722 spin_lock_irq(shost->host_lock); 3723 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 3724 spin_unlock_irq(shost->host_lock); 3725 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 3726 } else { 3727 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3728 } 3729 3730 phba->fc_stat.elsXmitACC++; 3731 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3732 if (rc == IOCB_ERROR) { 3733 lpfc_els_free_iocb(phba, elsiocb); 3734 return 1; 3735 } 3736 return 0; 3737 } 3738 3739 /** 3740 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 3741 * @vport: pointer to a virtual N_Port data structure. 3742 * @rejectError: 3743 * @oldiocb: pointer to the original lpfc command iocb data structure. 3744 * @ndlp: pointer to a node-list data structure. 3745 * @mbox: pointer to the driver internal queue element for mailbox command. 3746 * 3747 * This routine prepares and issue an Reject (RJT) response IOCB 3748 * command. If a @mbox pointer is passed in, it will be put into the 3749 * context_un.mbox field of the IOCB for the completion callback function 3750 * to issue to the HBA later. 3751 * 3752 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3753 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3754 * will be stored into the context1 field of the IOCB for the completion 3755 * callback function to the reject response ELS IOCB command. 3756 * 3757 * Return code 3758 * 0 - Successfully issued reject response 3759 * 1 - Failed to issue reject response 3760 **/ 3761 int 3762 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 3763 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 3764 LPFC_MBOXQ_t *mbox) 3765 { 3766 struct lpfc_hba *phba = vport->phba; 3767 IOCB_t *icmd; 3768 IOCB_t *oldcmd; 3769 struct lpfc_iocbq *elsiocb; 3770 struct lpfc_sli *psli; 3771 uint8_t *pcmd; 3772 uint16_t cmdsize; 3773 int rc; 3774 3775 psli = &phba->sli; 3776 cmdsize = 2 * sizeof(uint32_t); 3777 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3778 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3779 if (!elsiocb) 3780 return 1; 3781 3782 icmd = &elsiocb->iocb; 3783 oldcmd = &oldiocb->iocb; 3784 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3785 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3786 3787 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3788 pcmd += sizeof(uint32_t); 3789 *((uint32_t *) (pcmd)) = rejectError; 3790 3791 if (mbox) 3792 elsiocb->context_un.mbox = mbox; 3793 3794 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 3795 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3796 "0129 Xmit ELS RJT x%x response tag x%x " 3797 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 3798 "rpi x%x\n", 3799 rejectError, elsiocb->iotag, 3800 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3801 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3802 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3803 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 3804 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 3805 3806 phba->fc_stat.elsXmitLSRJT++; 3807 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3808 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3809 3810 if (rc == IOCB_ERROR) { 3811 lpfc_els_free_iocb(phba, elsiocb); 3812 return 1; 3813 } 3814 return 0; 3815 } 3816 3817 /** 3818 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 3819 * @vport: pointer to a virtual N_Port data structure. 3820 * @oldiocb: pointer to the original lpfc command iocb data structure. 3821 * @ndlp: pointer to a node-list data structure. 3822 * 3823 * This routine prepares and issues an Accept (ACC) response to Address 3824 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 3825 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 3826 * 3827 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3828 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3829 * will be stored into the context1 field of the IOCB for the completion 3830 * callback function to the ADISC Accept response ELS IOCB command. 3831 * 3832 * Return code 3833 * 0 - Successfully issued acc adisc response 3834 * 1 - Failed to issue adisc acc response 3835 **/ 3836 int 3837 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3838 struct lpfc_nodelist *ndlp) 3839 { 3840 struct lpfc_hba *phba = vport->phba; 3841 ADISC *ap; 3842 IOCB_t *icmd, *oldcmd; 3843 struct lpfc_iocbq *elsiocb; 3844 uint8_t *pcmd; 3845 uint16_t cmdsize; 3846 int rc; 3847 3848 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 3849 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3850 ndlp->nlp_DID, ELS_CMD_ACC); 3851 if (!elsiocb) 3852 return 1; 3853 3854 icmd = &elsiocb->iocb; 3855 oldcmd = &oldiocb->iocb; 3856 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3857 3858 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3860 "0130 Xmit ADISC ACC response iotag x%x xri: " 3861 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 3862 elsiocb->iotag, elsiocb->iocb.ulpContext, 3863 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3864 ndlp->nlp_rpi); 3865 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3866 3867 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3868 pcmd += sizeof(uint32_t); 3869 3870 ap = (ADISC *) (pcmd); 3871 ap->hardAL_PA = phba->fc_pref_ALPA; 3872 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 3873 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3874 ap->DID = be32_to_cpu(vport->fc_myDID); 3875 3876 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3877 "Issue ACC ADISC: did:x%x flg:x%x", 3878 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3879 3880 phba->fc_stat.elsXmitACC++; 3881 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3882 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3883 if (rc == IOCB_ERROR) { 3884 lpfc_els_free_iocb(phba, elsiocb); 3885 return 1; 3886 } 3887 return 0; 3888 } 3889 3890 /** 3891 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 3892 * @vport: pointer to a virtual N_Port data structure. 3893 * @oldiocb: pointer to the original lpfc command iocb data structure. 3894 * @ndlp: pointer to a node-list data structure. 3895 * 3896 * This routine prepares and issues an Accept (ACC) response to Process 3897 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 3898 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 3899 * 3900 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3901 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3902 * will be stored into the context1 field of the IOCB for the completion 3903 * callback function to the PRLI Accept response ELS IOCB command. 3904 * 3905 * Return code 3906 * 0 - Successfully issued acc prli response 3907 * 1 - Failed to issue acc prli response 3908 **/ 3909 int 3910 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 3911 struct lpfc_nodelist *ndlp) 3912 { 3913 struct lpfc_hba *phba = vport->phba; 3914 PRLI *npr; 3915 lpfc_vpd_t *vpd; 3916 IOCB_t *icmd; 3917 IOCB_t *oldcmd; 3918 struct lpfc_iocbq *elsiocb; 3919 struct lpfc_sli *psli; 3920 uint8_t *pcmd; 3921 uint16_t cmdsize; 3922 int rc; 3923 3924 psli = &phba->sli; 3925 3926 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3927 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3928 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 3929 if (!elsiocb) 3930 return 1; 3931 3932 icmd = &elsiocb->iocb; 3933 oldcmd = &oldiocb->iocb; 3934 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3935 /* Xmit PRLI ACC response tag <ulpIoTag> */ 3936 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3937 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 3938 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3939 elsiocb->iotag, elsiocb->iocb.ulpContext, 3940 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3941 ndlp->nlp_rpi); 3942 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3943 3944 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 3945 pcmd += sizeof(uint32_t); 3946 3947 /* For PRLI, remainder of payload is PRLI parameter page */ 3948 memset(pcmd, 0, sizeof(PRLI)); 3949 3950 npr = (PRLI *) pcmd; 3951 vpd = &phba->vpd; 3952 /* 3953 * If the remote port is a target and our firmware version is 3.20 or 3954 * later, set the following bits for FC-TAPE support. 3955 */ 3956 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 3957 (vpd->rev.feaLevelHigh >= 0x02)) { 3958 npr->ConfmComplAllowed = 1; 3959 npr->Retry = 1; 3960 npr->TaskRetryIdReq = 1; 3961 } 3962 3963 npr->acceptRspCode = PRLI_REQ_EXECUTED; 3964 npr->estabImagePair = 1; 3965 npr->readXferRdyDis = 1; 3966 npr->ConfmComplAllowed = 1; 3967 3968 npr->prliType = PRLI_FCP_TYPE; 3969 npr->initiatorFunc = 1; 3970 3971 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 3972 "Issue ACC PRLI: did:x%x flg:x%x", 3973 ndlp->nlp_DID, ndlp->nlp_flag, 0); 3974 3975 phba->fc_stat.elsXmitACC++; 3976 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3977 3978 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3979 if (rc == IOCB_ERROR) { 3980 lpfc_els_free_iocb(phba, elsiocb); 3981 return 1; 3982 } 3983 return 0; 3984 } 3985 3986 /** 3987 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 3988 * @vport: pointer to a virtual N_Port data structure. 3989 * @format: rnid command format. 3990 * @oldiocb: pointer to the original lpfc command iocb data structure. 3991 * @ndlp: pointer to a node-list data structure. 3992 * 3993 * This routine issues a Request Node Identification Data (RNID) Accept 3994 * (ACC) response. It constructs the RNID ACC response command according to 3995 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 3996 * issue the response. Note that this command does not need to hold the ndlp 3997 * reference count for the callback. So, the ndlp reference count taken by 3998 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 3999 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4000 * there is no ndlp reference available. 4001 * 4002 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4003 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4004 * will be stored into the context1 field of the IOCB for the completion 4005 * callback function. However, for the RNID Accept Response ELS command, 4006 * this is undone later by this routine after the IOCB is allocated. 4007 * 4008 * Return code 4009 * 0 - Successfully issued acc rnid response 4010 * 1 - Failed to issue acc rnid response 4011 **/ 4012 static int 4013 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4014 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4015 { 4016 struct lpfc_hba *phba = vport->phba; 4017 RNID *rn; 4018 IOCB_t *icmd, *oldcmd; 4019 struct lpfc_iocbq *elsiocb; 4020 struct lpfc_sli *psli; 4021 uint8_t *pcmd; 4022 uint16_t cmdsize; 4023 int rc; 4024 4025 psli = &phba->sli; 4026 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4027 + (2 * sizeof(struct lpfc_name)); 4028 if (format) 4029 cmdsize += sizeof(RNID_TOP_DISC); 4030 4031 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4032 ndlp->nlp_DID, ELS_CMD_ACC); 4033 if (!elsiocb) 4034 return 1; 4035 4036 icmd = &elsiocb->iocb; 4037 oldcmd = &oldiocb->iocb; 4038 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 4039 /* Xmit RNID ACC response tag <ulpIoTag> */ 4040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4041 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4042 elsiocb->iotag, elsiocb->iocb.ulpContext); 4043 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4044 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4045 pcmd += sizeof(uint32_t); 4046 4047 memset(pcmd, 0, sizeof(RNID)); 4048 rn = (RNID *) (pcmd); 4049 rn->Format = format; 4050 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4051 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4052 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4053 switch (format) { 4054 case 0: 4055 rn->SpecificLen = 0; 4056 break; 4057 case RNID_TOPOLOGY_DISC: 4058 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4059 memcpy(&rn->un.topologyDisc.portName, 4060 &vport->fc_portname, sizeof(struct lpfc_name)); 4061 rn->un.topologyDisc.unitType = RNID_HBA; 4062 rn->un.topologyDisc.physPort = 0; 4063 rn->un.topologyDisc.attachedNodes = 0; 4064 break; 4065 default: 4066 rn->CommonLen = 0; 4067 rn->SpecificLen = 0; 4068 break; 4069 } 4070 4071 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4072 "Issue ACC RNID: did:x%x flg:x%x", 4073 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4074 4075 phba->fc_stat.elsXmitACC++; 4076 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4077 lpfc_nlp_put(ndlp); 4078 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 4079 * it could be freed */ 4080 4081 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4082 if (rc == IOCB_ERROR) { 4083 lpfc_els_free_iocb(phba, elsiocb); 4084 return 1; 4085 } 4086 return 0; 4087 } 4088 4089 /** 4090 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4091 * @vport: pointer to a virtual N_Port data structure. 4092 * @iocb: pointer to the lpfc command iocb data structure. 4093 * @ndlp: pointer to a node-list data structure. 4094 * 4095 * Return 4096 **/ 4097 static void 4098 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4099 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4100 { 4101 struct lpfc_hba *phba = vport->phba; 4102 uint8_t *pcmd; 4103 struct RRQ *rrq; 4104 uint16_t rxid; 4105 uint16_t xri; 4106 struct lpfc_node_rrq *prrq; 4107 4108 4109 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4110 pcmd += sizeof(uint32_t); 4111 rrq = (struct RRQ *)pcmd; 4112 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4113 rxid = bf_get(rrq_rxid, rrq); 4114 4115 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4116 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4117 " x%x x%x\n", 4118 be32_to_cpu(bf_get(rrq_did, rrq)), 4119 bf_get(rrq_oxid, rrq), 4120 rxid, 4121 iocb->iotag, iocb->iocb.ulpContext); 4122 4123 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4124 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4125 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4126 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4127 xri = bf_get(rrq_oxid, rrq); 4128 else 4129 xri = rxid; 4130 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4131 if (prrq) 4132 lpfc_clr_rrq_active(phba, xri, prrq); 4133 return; 4134 } 4135 4136 /** 4137 * lpfc_els_rsp_echo_acc - Issue echo acc response 4138 * @vport: pointer to a virtual N_Port data structure. 4139 * @data: pointer to echo data to return in the accept. 4140 * @oldiocb: pointer to the original lpfc command iocb data structure. 4141 * @ndlp: pointer to a node-list data structure. 4142 * 4143 * Return code 4144 * 0 - Successfully issued acc echo response 4145 * 1 - Failed to issue acc echo response 4146 **/ 4147 static int 4148 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4149 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4150 { 4151 struct lpfc_hba *phba = vport->phba; 4152 struct lpfc_iocbq *elsiocb; 4153 struct lpfc_sli *psli; 4154 uint8_t *pcmd; 4155 uint16_t cmdsize; 4156 int rc; 4157 4158 psli = &phba->sli; 4159 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4160 4161 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4162 ndlp->nlp_DID, ELS_CMD_ACC); 4163 if (!elsiocb) 4164 return 1; 4165 4166 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ 4167 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4169 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4170 elsiocb->iotag, elsiocb->iocb.ulpContext); 4171 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4172 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4173 pcmd += sizeof(uint32_t); 4174 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4175 4176 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4177 "Issue ACC ECHO: did:x%x flg:x%x", 4178 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4179 4180 phba->fc_stat.elsXmitACC++; 4181 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4182 lpfc_nlp_put(ndlp); 4183 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 4184 * it could be freed */ 4185 4186 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4187 if (rc == IOCB_ERROR) { 4188 lpfc_els_free_iocb(phba, elsiocb); 4189 return 1; 4190 } 4191 return 0; 4192 } 4193 4194 /** 4195 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 4196 * @vport: pointer to a host virtual N_Port data structure. 4197 * 4198 * This routine issues Address Discover (ADISC) ELS commands to those 4199 * N_Ports which are in node port recovery state and ADISC has not been issued 4200 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 4201 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 4202 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 4203 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 4204 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 4205 * IOCBs quit for later pick up. On the other hand, after walking through 4206 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 4207 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 4208 * no more ADISC need to be sent. 4209 * 4210 * Return code 4211 * The number of N_Ports with adisc issued. 4212 **/ 4213 int 4214 lpfc_els_disc_adisc(struct lpfc_vport *vport) 4215 { 4216 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4217 struct lpfc_nodelist *ndlp, *next_ndlp; 4218 int sentadisc = 0; 4219 4220 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 4221 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4222 if (!NLP_CHK_NODE_ACT(ndlp)) 4223 continue; 4224 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4225 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4226 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 4227 spin_lock_irq(shost->host_lock); 4228 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4229 spin_unlock_irq(shost->host_lock); 4230 ndlp->nlp_prev_state = ndlp->nlp_state; 4231 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4232 lpfc_issue_els_adisc(vport, ndlp, 0); 4233 sentadisc++; 4234 vport->num_disc_nodes++; 4235 if (vport->num_disc_nodes >= 4236 vport->cfg_discovery_threads) { 4237 spin_lock_irq(shost->host_lock); 4238 vport->fc_flag |= FC_NLP_MORE; 4239 spin_unlock_irq(shost->host_lock); 4240 break; 4241 } 4242 } 4243 } 4244 if (sentadisc == 0) { 4245 spin_lock_irq(shost->host_lock); 4246 vport->fc_flag &= ~FC_NLP_MORE; 4247 spin_unlock_irq(shost->host_lock); 4248 } 4249 return sentadisc; 4250 } 4251 4252 /** 4253 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 4254 * @vport: pointer to a host virtual N_Port data structure. 4255 * 4256 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 4257 * which are in node port recovery state, with a @vport. Each time an ELS 4258 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 4259 * the per @vport number of discover count (num_disc_nodes) shall be 4260 * incremented. If the num_disc_nodes reaches a pre-configured threshold 4261 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 4262 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 4263 * later pick up. On the other hand, after walking through all the ndlps with 4264 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 4265 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 4266 * PLOGI need to be sent. 4267 * 4268 * Return code 4269 * The number of N_Ports with plogi issued. 4270 **/ 4271 int 4272 lpfc_els_disc_plogi(struct lpfc_vport *vport) 4273 { 4274 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4275 struct lpfc_nodelist *ndlp, *next_ndlp; 4276 int sentplogi = 0; 4277 4278 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 4279 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 4280 if (!NLP_CHK_NODE_ACT(ndlp)) 4281 continue; 4282 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 4283 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 4284 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 4285 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 4286 ndlp->nlp_prev_state = ndlp->nlp_state; 4287 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4288 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4289 sentplogi++; 4290 vport->num_disc_nodes++; 4291 if (vport->num_disc_nodes >= 4292 vport->cfg_discovery_threads) { 4293 spin_lock_irq(shost->host_lock); 4294 vport->fc_flag |= FC_NLP_MORE; 4295 spin_unlock_irq(shost->host_lock); 4296 break; 4297 } 4298 } 4299 } 4300 if (sentplogi) { 4301 lpfc_set_disctmo(vport); 4302 } 4303 else { 4304 spin_lock_irq(shost->host_lock); 4305 vport->fc_flag &= ~FC_NLP_MORE; 4306 spin_unlock_irq(shost->host_lock); 4307 } 4308 return sentplogi; 4309 } 4310 4311 /** 4312 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 4313 * @vport: pointer to a host virtual N_Port data structure. 4314 * 4315 * This routine cleans up any Registration State Change Notification 4316 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 4317 * @vport together with the host_lock is used to prevent multiple thread 4318 * trying to access the RSCN array on a same @vport at the same time. 4319 **/ 4320 void 4321 lpfc_els_flush_rscn(struct lpfc_vport *vport) 4322 { 4323 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4324 struct lpfc_hba *phba = vport->phba; 4325 int i; 4326 4327 spin_lock_irq(shost->host_lock); 4328 if (vport->fc_rscn_flush) { 4329 /* Another thread is walking fc_rscn_id_list on this vport */ 4330 spin_unlock_irq(shost->host_lock); 4331 return; 4332 } 4333 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 4334 vport->fc_rscn_flush = 1; 4335 spin_unlock_irq(shost->host_lock); 4336 4337 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4338 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 4339 vport->fc_rscn_id_list[i] = NULL; 4340 } 4341 spin_lock_irq(shost->host_lock); 4342 vport->fc_rscn_id_cnt = 0; 4343 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 4344 spin_unlock_irq(shost->host_lock); 4345 lpfc_can_disctmo(vport); 4346 /* Indicate we are done walking this fc_rscn_id_list */ 4347 vport->fc_rscn_flush = 0; 4348 } 4349 4350 /** 4351 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 4352 * @vport: pointer to a host virtual N_Port data structure. 4353 * @did: remote destination port identifier. 4354 * 4355 * This routine checks whether there is any pending Registration State 4356 * Configuration Notification (RSCN) to a @did on @vport. 4357 * 4358 * Return code 4359 * None zero - The @did matched with a pending rscn 4360 * 0 - not able to match @did with a pending rscn 4361 **/ 4362 int 4363 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 4364 { 4365 D_ID ns_did; 4366 D_ID rscn_did; 4367 uint32_t *lp; 4368 uint32_t payload_len, i; 4369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4370 4371 ns_did.un.word = did; 4372 4373 /* Never match fabric nodes for RSCNs */ 4374 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 4375 return 0; 4376 4377 /* If we are doing a FULL RSCN rediscovery, match everything */ 4378 if (vport->fc_flag & FC_RSCN_DISCOVERY) 4379 return did; 4380 4381 spin_lock_irq(shost->host_lock); 4382 if (vport->fc_rscn_flush) { 4383 /* Another thread is walking fc_rscn_id_list on this vport */ 4384 spin_unlock_irq(shost->host_lock); 4385 return 0; 4386 } 4387 /* Indicate we are walking fc_rscn_id_list on this vport */ 4388 vport->fc_rscn_flush = 1; 4389 spin_unlock_irq(shost->host_lock); 4390 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 4391 lp = vport->fc_rscn_id_list[i]->virt; 4392 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4393 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4394 while (payload_len) { 4395 rscn_did.un.word = be32_to_cpu(*lp++); 4396 payload_len -= sizeof(uint32_t); 4397 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 4398 case RSCN_ADDRESS_FORMAT_PORT: 4399 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4400 && (ns_did.un.b.area == rscn_did.un.b.area) 4401 && (ns_did.un.b.id == rscn_did.un.b.id)) 4402 goto return_did_out; 4403 break; 4404 case RSCN_ADDRESS_FORMAT_AREA: 4405 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 4406 && (ns_did.un.b.area == rscn_did.un.b.area)) 4407 goto return_did_out; 4408 break; 4409 case RSCN_ADDRESS_FORMAT_DOMAIN: 4410 if (ns_did.un.b.domain == rscn_did.un.b.domain) 4411 goto return_did_out; 4412 break; 4413 case RSCN_ADDRESS_FORMAT_FABRIC: 4414 goto return_did_out; 4415 } 4416 } 4417 } 4418 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4419 vport->fc_rscn_flush = 0; 4420 return 0; 4421 return_did_out: 4422 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 4423 vport->fc_rscn_flush = 0; 4424 return did; 4425 } 4426 4427 /** 4428 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 4429 * @vport: pointer to a host virtual N_Port data structure. 4430 * 4431 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 4432 * state machine for a @vport's nodes that are with pending RSCN (Registration 4433 * State Change Notification). 4434 * 4435 * Return code 4436 * 0 - Successful (currently alway return 0) 4437 **/ 4438 static int 4439 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 4440 { 4441 struct lpfc_nodelist *ndlp = NULL; 4442 4443 /* Move all affected nodes by pending RSCNs to NPR state. */ 4444 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4445 if (!NLP_CHK_NODE_ACT(ndlp) || 4446 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 4447 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 4448 continue; 4449 lpfc_disc_state_machine(vport, ndlp, NULL, 4450 NLP_EVT_DEVICE_RECOVERY); 4451 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4452 } 4453 return 0; 4454 } 4455 4456 /** 4457 * lpfc_send_rscn_event - Send an RSCN event to management application 4458 * @vport: pointer to a host virtual N_Port data structure. 4459 * @cmdiocb: pointer to lpfc command iocb data structure. 4460 * 4461 * lpfc_send_rscn_event sends an RSCN netlink event to management 4462 * applications. 4463 */ 4464 static void 4465 lpfc_send_rscn_event(struct lpfc_vport *vport, 4466 struct lpfc_iocbq *cmdiocb) 4467 { 4468 struct lpfc_dmabuf *pcmd; 4469 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4470 uint32_t *payload_ptr; 4471 uint32_t payload_len; 4472 struct lpfc_rscn_event_header *rscn_event_data; 4473 4474 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4475 payload_ptr = (uint32_t *) pcmd->virt; 4476 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 4477 4478 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 4479 payload_len, GFP_KERNEL); 4480 if (!rscn_event_data) { 4481 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4482 "0147 Failed to allocate memory for RSCN event\n"); 4483 return; 4484 } 4485 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 4486 rscn_event_data->payload_length = payload_len; 4487 memcpy(rscn_event_data->rscn_payload, payload_ptr, 4488 payload_len); 4489 4490 fc_host_post_vendor_event(shost, 4491 fc_get_event_number(), 4492 sizeof(struct lpfc_els_event_header) + payload_len, 4493 (char *)rscn_event_data, 4494 LPFC_NL_VENDOR_ID); 4495 4496 kfree(rscn_event_data); 4497 } 4498 4499 /** 4500 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 4501 * @vport: pointer to a host virtual N_Port data structure. 4502 * @cmdiocb: pointer to lpfc command iocb data structure. 4503 * @ndlp: pointer to a node-list data structure. 4504 * 4505 * This routine processes an unsolicited RSCN (Registration State Change 4506 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 4507 * to invoke fc_host_post_event() routine to the FC transport layer. If the 4508 * discover state machine is about to begin discovery, it just accepts the 4509 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 4510 * contains N_Port IDs for other vports on this HBA, it just accepts the 4511 * RSCN and ignore processing it. If the state machine is in the recovery 4512 * state, the fc_rscn_id_list of this @vport is walked and the 4513 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 4514 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 4515 * routine is invoked to handle the RSCN event. 4516 * 4517 * Return code 4518 * 0 - Just sent the acc response 4519 * 1 - Sent the acc response and waited for name server completion 4520 **/ 4521 static int 4522 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4523 struct lpfc_nodelist *ndlp) 4524 { 4525 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4526 struct lpfc_hba *phba = vport->phba; 4527 struct lpfc_dmabuf *pcmd; 4528 uint32_t *lp, *datap; 4529 IOCB_t *icmd; 4530 uint32_t payload_len, length, nportid, *cmd; 4531 int rscn_cnt; 4532 int rscn_id = 0, hba_id = 0; 4533 int i; 4534 4535 icmd = &cmdiocb->iocb; 4536 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4537 lp = (uint32_t *) pcmd->virt; 4538 4539 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 4540 payload_len -= sizeof(uint32_t); /* take off word 0 */ 4541 /* RSCN received */ 4542 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4543 "0214 RSCN received Data: x%x x%x x%x x%x\n", 4544 vport->fc_flag, payload_len, *lp, 4545 vport->fc_rscn_id_cnt); 4546 4547 /* Send an RSCN event to the management application */ 4548 lpfc_send_rscn_event(vport, cmdiocb); 4549 4550 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 4551 fc_host_post_event(shost, fc_get_event_number(), 4552 FCH_EVT_RSCN, lp[i]); 4553 4554 /* If we are about to begin discovery, just ACC the RSCN. 4555 * Discovery processing will satisfy it. 4556 */ 4557 if (vport->port_state <= LPFC_NS_QRY) { 4558 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4559 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 4560 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4561 4562 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4563 return 0; 4564 } 4565 4566 /* If this RSCN just contains NPortIDs for other vports on this HBA, 4567 * just ACC and ignore it. 4568 */ 4569 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4570 !(vport->cfg_peer_port_login)) { 4571 i = payload_len; 4572 datap = lp; 4573 while (i > 0) { 4574 nportid = *datap++; 4575 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 4576 i -= sizeof(uint32_t); 4577 rscn_id++; 4578 if (lpfc_find_vport_by_did(phba, nportid)) 4579 hba_id++; 4580 } 4581 if (rscn_id == hba_id) { 4582 /* ALL NPortIDs in RSCN are on HBA */ 4583 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4584 "0219 Ignore RSCN " 4585 "Data: x%x x%x x%x x%x\n", 4586 vport->fc_flag, payload_len, 4587 *lp, vport->fc_rscn_id_cnt); 4588 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4589 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 4590 ndlp->nlp_DID, vport->port_state, 4591 ndlp->nlp_flag); 4592 4593 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 4594 ndlp, NULL); 4595 return 0; 4596 } 4597 } 4598 4599 spin_lock_irq(shost->host_lock); 4600 if (vport->fc_rscn_flush) { 4601 /* Another thread is walking fc_rscn_id_list on this vport */ 4602 vport->fc_flag |= FC_RSCN_DISCOVERY; 4603 spin_unlock_irq(shost->host_lock); 4604 /* Send back ACC */ 4605 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4606 return 0; 4607 } 4608 /* Indicate we are walking fc_rscn_id_list on this vport */ 4609 vport->fc_rscn_flush = 1; 4610 spin_unlock_irq(shost->host_lock); 4611 /* Get the array count after successfully have the token */ 4612 rscn_cnt = vport->fc_rscn_id_cnt; 4613 /* If we are already processing an RSCN, save the received 4614 * RSCN payload buffer, cmdiocb->context2 to process later. 4615 */ 4616 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 4617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4618 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 4619 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4620 4621 spin_lock_irq(shost->host_lock); 4622 vport->fc_flag |= FC_RSCN_DEFERRED; 4623 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 4624 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 4625 vport->fc_flag |= FC_RSCN_MODE; 4626 spin_unlock_irq(shost->host_lock); 4627 if (rscn_cnt) { 4628 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 4629 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 4630 } 4631 if ((rscn_cnt) && 4632 (payload_len + length <= LPFC_BPL_SIZE)) { 4633 *cmd &= ELS_CMD_MASK; 4634 *cmd |= cpu_to_be32(payload_len + length); 4635 memcpy(((uint8_t *)cmd) + length, lp, 4636 payload_len); 4637 } else { 4638 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 4639 vport->fc_rscn_id_cnt++; 4640 /* If we zero, cmdiocb->context2, the calling 4641 * routine will not try to free it. 4642 */ 4643 cmdiocb->context2 = NULL; 4644 } 4645 /* Deferred RSCN */ 4646 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4647 "0235 Deferred RSCN " 4648 "Data: x%x x%x x%x\n", 4649 vport->fc_rscn_id_cnt, vport->fc_flag, 4650 vport->port_state); 4651 } else { 4652 vport->fc_flag |= FC_RSCN_DISCOVERY; 4653 spin_unlock_irq(shost->host_lock); 4654 /* ReDiscovery RSCN */ 4655 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4656 "0234 ReDiscovery RSCN " 4657 "Data: x%x x%x x%x\n", 4658 vport->fc_rscn_id_cnt, vport->fc_flag, 4659 vport->port_state); 4660 } 4661 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4662 vport->fc_rscn_flush = 0; 4663 /* Send back ACC */ 4664 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4665 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4666 lpfc_rscn_recovery_check(vport); 4667 spin_lock_irq(shost->host_lock); 4668 vport->fc_flag &= ~FC_RSCN_DEFERRED; 4669 spin_unlock_irq(shost->host_lock); 4670 return 0; 4671 } 4672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4673 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 4674 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 4675 4676 spin_lock_irq(shost->host_lock); 4677 vport->fc_flag |= FC_RSCN_MODE; 4678 spin_unlock_irq(shost->host_lock); 4679 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 4680 /* Indicate we are done walking fc_rscn_id_list on this vport */ 4681 vport->fc_rscn_flush = 0; 4682 /* 4683 * If we zero, cmdiocb->context2, the calling routine will 4684 * not try to free it. 4685 */ 4686 cmdiocb->context2 = NULL; 4687 lpfc_set_disctmo(vport); 4688 /* Send back ACC */ 4689 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4690 /* send RECOVERY event for ALL nodes that match RSCN payload */ 4691 lpfc_rscn_recovery_check(vport); 4692 return lpfc_els_handle_rscn(vport); 4693 } 4694 4695 /** 4696 * lpfc_els_handle_rscn - Handle rscn for a vport 4697 * @vport: pointer to a host virtual N_Port data structure. 4698 * 4699 * This routine handles the Registration State Configuration Notification 4700 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 4701 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 4702 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 4703 * NameServer shall be issued. If CT command to the NameServer fails to be 4704 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 4705 * RSCN activities with the @vport. 4706 * 4707 * Return code 4708 * 0 - Cleaned up rscn on the @vport 4709 * 1 - Wait for plogi to name server before proceed 4710 **/ 4711 int 4712 lpfc_els_handle_rscn(struct lpfc_vport *vport) 4713 { 4714 struct lpfc_nodelist *ndlp; 4715 struct lpfc_hba *phba = vport->phba; 4716 4717 /* Ignore RSCN if the port is being torn down. */ 4718 if (vport->load_flag & FC_UNLOADING) { 4719 lpfc_els_flush_rscn(vport); 4720 return 0; 4721 } 4722 4723 /* Start timer for RSCN processing */ 4724 lpfc_set_disctmo(vport); 4725 4726 /* RSCN processed */ 4727 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4728 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 4729 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 4730 vport->port_state); 4731 4732 /* To process RSCN, first compare RSCN data with NameServer */ 4733 vport->fc_ns_retry = 0; 4734 vport->num_disc_nodes = 0; 4735 4736 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4737 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 4738 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 4739 /* Good ndlp, issue CT Request to NameServer */ 4740 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 4741 /* Wait for NameServer query cmpl before we can 4742 continue */ 4743 return 1; 4744 } else { 4745 /* If login to NameServer does not exist, issue one */ 4746 /* Good status, issue PLOGI to NameServer */ 4747 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4748 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 4749 /* Wait for NameServer login cmpl before we can 4750 continue */ 4751 return 1; 4752 4753 if (ndlp) { 4754 ndlp = lpfc_enable_node(vport, ndlp, 4755 NLP_STE_PLOGI_ISSUE); 4756 if (!ndlp) { 4757 lpfc_els_flush_rscn(vport); 4758 return 0; 4759 } 4760 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 4761 } else { 4762 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4763 if (!ndlp) { 4764 lpfc_els_flush_rscn(vport); 4765 return 0; 4766 } 4767 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4768 ndlp->nlp_prev_state = ndlp->nlp_state; 4769 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4770 } 4771 ndlp->nlp_type |= NLP_FABRIC; 4772 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 4773 /* Wait for NameServer login cmpl before we can 4774 * continue 4775 */ 4776 return 1; 4777 } 4778 4779 lpfc_els_flush_rscn(vport); 4780 return 0; 4781 } 4782 4783 /** 4784 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 4785 * @vport: pointer to a host virtual N_Port data structure. 4786 * @cmdiocb: pointer to lpfc command iocb data structure. 4787 * @ndlp: pointer to a node-list data structure. 4788 * 4789 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 4790 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 4791 * point topology. As an unsolicited FLOGI should not be received in a loop 4792 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 4793 * lpfc_check_sparm() routine is invoked to check the parameters in the 4794 * unsolicited FLOGI. If parameters validation failed, the routine 4795 * lpfc_els_rsp_reject() shall be called with reject reason code set to 4796 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 4797 * FLOGI shall be compared with the Port WWN of the @vport to determine who 4798 * will initiate PLOGI. The higher lexicographical value party shall has 4799 * higher priority (as the winning port) and will initiate PLOGI and 4800 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 4801 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 4802 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 4803 * 4804 * Return code 4805 * 0 - Successfully processed the unsolicited flogi 4806 * 1 - Failed to process the unsolicited flogi 4807 **/ 4808 static int 4809 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4810 struct lpfc_nodelist *ndlp) 4811 { 4812 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4813 struct lpfc_hba *phba = vport->phba; 4814 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4815 uint32_t *lp = (uint32_t *) pcmd->virt; 4816 IOCB_t *icmd = &cmdiocb->iocb; 4817 struct serv_parm *sp; 4818 LPFC_MBOXQ_t *mbox; 4819 struct ls_rjt stat; 4820 uint32_t cmd, did; 4821 int rc; 4822 4823 cmd = *lp++; 4824 sp = (struct serv_parm *) lp; 4825 4826 /* FLOGI received */ 4827 4828 lpfc_set_disctmo(vport); 4829 4830 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4831 /* We should never receive a FLOGI in loop mode, ignore it */ 4832 did = icmd->un.elsreq64.remoteID; 4833 4834 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 4835 Loop Mode */ 4836 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4837 "0113 An FLOGI ELS command x%x was " 4838 "received from DID x%x in Loop Mode\n", 4839 cmd, did); 4840 return 1; 4841 } 4842 4843 did = Fabric_DID; 4844 4845 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { 4846 /* For a FLOGI we accept, then if our portname is greater 4847 * then the remote portname we initiate Nport login. 4848 */ 4849 4850 rc = memcmp(&vport->fc_portname, &sp->portName, 4851 sizeof(struct lpfc_name)); 4852 4853 if (!rc) { 4854 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4855 if (!mbox) 4856 return 1; 4857 4858 lpfc_linkdown(phba); 4859 lpfc_init_link(phba, mbox, 4860 phba->cfg_topology, 4861 phba->cfg_link_speed); 4862 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 4863 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4864 mbox->vport = vport; 4865 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4866 lpfc_set_loopback_flag(phba); 4867 if (rc == MBX_NOT_FINISHED) { 4868 mempool_free(mbox, phba->mbox_mem_pool); 4869 } 4870 return 1; 4871 } else if (rc > 0) { /* greater than */ 4872 spin_lock_irq(shost->host_lock); 4873 vport->fc_flag |= FC_PT2PT_PLOGI; 4874 spin_unlock_irq(shost->host_lock); 4875 } 4876 spin_lock_irq(shost->host_lock); 4877 vport->fc_flag |= FC_PT2PT; 4878 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 4879 spin_unlock_irq(shost->host_lock); 4880 } else { 4881 /* Reject this request because invalid parameters */ 4882 stat.un.b.lsRjtRsvd0 = 0; 4883 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4884 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 4885 stat.un.b.vendorUnique = 0; 4886 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 4887 NULL); 4888 return 1; 4889 } 4890 4891 /* Send back ACC */ 4892 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 4893 4894 return 0; 4895 } 4896 4897 /** 4898 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 4899 * @vport: pointer to a host virtual N_Port data structure. 4900 * @cmdiocb: pointer to lpfc command iocb data structure. 4901 * @ndlp: pointer to a node-list data structure. 4902 * 4903 * This routine processes Request Node Identification Data (RNID) IOCB 4904 * received as an ELS unsolicited event. Only when the RNID specified format 4905 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 4906 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 4907 * Accept (ACC) the RNID ELS command. All the other RNID formats are 4908 * rejected by invoking the lpfc_els_rsp_reject() routine. 4909 * 4910 * Return code 4911 * 0 - Successfully processed rnid iocb (currently always return 0) 4912 **/ 4913 static int 4914 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4915 struct lpfc_nodelist *ndlp) 4916 { 4917 struct lpfc_dmabuf *pcmd; 4918 uint32_t *lp; 4919 IOCB_t *icmd; 4920 RNID *rn; 4921 struct ls_rjt stat; 4922 uint32_t cmd, did; 4923 4924 icmd = &cmdiocb->iocb; 4925 did = icmd->un.elsreq64.remoteID; 4926 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4927 lp = (uint32_t *) pcmd->virt; 4928 4929 cmd = *lp++; 4930 rn = (RNID *) lp; 4931 4932 /* RNID received */ 4933 4934 switch (rn->Format) { 4935 case 0: 4936 case RNID_TOPOLOGY_DISC: 4937 /* Send back ACC */ 4938 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 4939 break; 4940 default: 4941 /* Reject this request because format not supported */ 4942 stat.un.b.lsRjtRsvd0 = 0; 4943 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4944 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4945 stat.un.b.vendorUnique = 0; 4946 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 4947 NULL); 4948 } 4949 return 0; 4950 } 4951 4952 /** 4953 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 4954 * @vport: pointer to a host virtual N_Port data structure. 4955 * @cmdiocb: pointer to lpfc command iocb data structure. 4956 * @ndlp: pointer to a node-list data structure. 4957 * 4958 * Return code 4959 * 0 - Successfully processed echo iocb (currently always return 0) 4960 **/ 4961 static int 4962 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4963 struct lpfc_nodelist *ndlp) 4964 { 4965 uint8_t *pcmd; 4966 4967 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 4968 4969 /* skip over first word of echo command to find echo data */ 4970 pcmd += sizeof(uint32_t); 4971 4972 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 4973 return 0; 4974 } 4975 4976 /** 4977 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 4978 * @vport: pointer to a host virtual N_Port data structure. 4979 * @cmdiocb: pointer to lpfc command iocb data structure. 4980 * @ndlp: pointer to a node-list data structure. 4981 * 4982 * This routine processes a Link Incident Report Registration(LIRR) IOCB 4983 * received as an ELS unsolicited event. Currently, this function just invokes 4984 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 4985 * 4986 * Return code 4987 * 0 - Successfully processed lirr iocb (currently always return 0) 4988 **/ 4989 static int 4990 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 4991 struct lpfc_nodelist *ndlp) 4992 { 4993 struct ls_rjt stat; 4994 4995 /* For now, unconditionally reject this command */ 4996 stat.un.b.lsRjtRsvd0 = 0; 4997 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 4998 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 4999 stat.un.b.vendorUnique = 0; 5000 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5001 return 0; 5002 } 5003 5004 /** 5005 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 5006 * @vport: pointer to a host virtual N_Port data structure. 5007 * @cmdiocb: pointer to lpfc command iocb data structure. 5008 * @ndlp: pointer to a node-list data structure. 5009 * 5010 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 5011 * received as an ELS unsolicited event. A request to RRQ shall only 5012 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 5013 * Nx_Port N_Port_ID of the target Exchange is the same as the 5014 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 5015 * not accepted, an LS_RJT with reason code "Unable to perform 5016 * command request" and reason code explanation "Invalid Originator 5017 * S_ID" shall be returned. For now, we just unconditionally accept 5018 * RRQ from the target. 5019 **/ 5020 static void 5021 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5022 struct lpfc_nodelist *ndlp) 5023 { 5024 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5025 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5026 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 5027 } 5028 5029 /** 5030 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5031 * @phba: pointer to lpfc hba data structure. 5032 * @pmb: pointer to the driver internal queue element for mailbox command. 5033 * 5034 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5035 * mailbox command. This callback function is to actually send the Accept 5036 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5037 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5038 * mailbox command, constructs the RPS response with the link statistics 5039 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5040 * response to the RPS. 5041 * 5042 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5043 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5044 * will be stored into the context1 field of the IOCB for the completion 5045 * callback function to the RPS Accept Response ELS IOCB command. 5046 * 5047 **/ 5048 static void 5049 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5050 { 5051 MAILBOX_t *mb; 5052 IOCB_t *icmd; 5053 struct RLS_RSP *rls_rsp; 5054 uint8_t *pcmd; 5055 struct lpfc_iocbq *elsiocb; 5056 struct lpfc_nodelist *ndlp; 5057 uint16_t xri; 5058 uint32_t cmdsize; 5059 5060 mb = &pmb->u.mb; 5061 5062 ndlp = (struct lpfc_nodelist *) pmb->context2; 5063 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5064 pmb->context1 = NULL; 5065 pmb->context2 = NULL; 5066 5067 if (mb->mbxStatus) { 5068 mempool_free(pmb, phba->mbox_mem_pool); 5069 return; 5070 } 5071 5072 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 5073 mempool_free(pmb, phba->mbox_mem_pool); 5074 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5075 lpfc_max_els_tries, ndlp, 5076 ndlp->nlp_DID, ELS_CMD_ACC); 5077 5078 /* Decrement the ndlp reference count from previous mbox command */ 5079 lpfc_nlp_put(ndlp); 5080 5081 if (!elsiocb) 5082 return; 5083 5084 icmd = &elsiocb->iocb; 5085 icmd->ulpContext = xri; 5086 5087 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5088 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5089 pcmd += sizeof(uint32_t); /* Skip past command */ 5090 rls_rsp = (struct RLS_RSP *)pcmd; 5091 5092 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5093 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5094 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5095 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5096 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5097 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5098 5099 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5100 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5101 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 5102 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5103 elsiocb->iotag, elsiocb->iocb.ulpContext, 5104 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5105 ndlp->nlp_rpi); 5106 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5107 phba->fc_stat.elsXmitACC++; 5108 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5109 lpfc_els_free_iocb(phba, elsiocb); 5110 } 5111 5112 /** 5113 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 5114 * @phba: pointer to lpfc hba data structure. 5115 * @pmb: pointer to the driver internal queue element for mailbox command. 5116 * 5117 * This routine is the completion callback function for the MBX_READ_LNK_STAT 5118 * mailbox command. This callback function is to actually send the Accept 5119 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 5120 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 5121 * mailbox command, constructs the RPS response with the link statistics 5122 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 5123 * response to the RPS. 5124 * 5125 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5126 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5127 * will be stored into the context1 field of the IOCB for the completion 5128 * callback function to the RPS Accept Response ELS IOCB command. 5129 * 5130 **/ 5131 static void 5132 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5133 { 5134 MAILBOX_t *mb; 5135 IOCB_t *icmd; 5136 RPS_RSP *rps_rsp; 5137 uint8_t *pcmd; 5138 struct lpfc_iocbq *elsiocb; 5139 struct lpfc_nodelist *ndlp; 5140 uint16_t xri, status; 5141 uint32_t cmdsize; 5142 5143 mb = &pmb->u.mb; 5144 5145 ndlp = (struct lpfc_nodelist *) pmb->context2; 5146 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5147 pmb->context1 = NULL; 5148 pmb->context2 = NULL; 5149 5150 if (mb->mbxStatus) { 5151 mempool_free(pmb, phba->mbox_mem_pool); 5152 return; 5153 } 5154 5155 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 5156 mempool_free(pmb, phba->mbox_mem_pool); 5157 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5158 lpfc_max_els_tries, ndlp, 5159 ndlp->nlp_DID, ELS_CMD_ACC); 5160 5161 /* Decrement the ndlp reference count from previous mbox command */ 5162 lpfc_nlp_put(ndlp); 5163 5164 if (!elsiocb) 5165 return; 5166 5167 icmd = &elsiocb->iocb; 5168 icmd->ulpContext = xri; 5169 5170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5171 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5172 pcmd += sizeof(uint32_t); /* Skip past command */ 5173 rps_rsp = (RPS_RSP *)pcmd; 5174 5175 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5176 status = 0x10; 5177 else 5178 status = 0x8; 5179 if (phba->pport->fc_flag & FC_FABRIC) 5180 status |= 0x4; 5181 5182 rps_rsp->rsvd1 = 0; 5183 rps_rsp->portStatus = cpu_to_be16(status); 5184 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 5185 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 5186 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 5187 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 5188 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 5189 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 5190 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 5191 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5192 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 5193 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5194 elsiocb->iotag, elsiocb->iocb.ulpContext, 5195 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5196 ndlp->nlp_rpi); 5197 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5198 phba->fc_stat.elsXmitACC++; 5199 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5200 lpfc_els_free_iocb(phba, elsiocb); 5201 return; 5202 } 5203 5204 /** 5205 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 5206 * @vport: pointer to a host virtual N_Port data structure. 5207 * @cmdiocb: pointer to lpfc command iocb data structure. 5208 * @ndlp: pointer to a node-list data structure. 5209 * 5210 * This routine processes Read Port Status (RPL) IOCB received as an 5211 * ELS unsolicited event. It first checks the remote port state. If the 5212 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5213 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5214 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5215 * for reading the HBA link statistics. It is for the callback function, 5216 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 5217 * to actually sending out RPL Accept (ACC) response. 5218 * 5219 * Return codes 5220 * 0 - Successfully processed rls iocb (currently always return 0) 5221 **/ 5222 static int 5223 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5224 struct lpfc_nodelist *ndlp) 5225 { 5226 struct lpfc_hba *phba = vport->phba; 5227 LPFC_MBOXQ_t *mbox; 5228 struct lpfc_dmabuf *pcmd; 5229 struct ls_rjt stat; 5230 5231 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5232 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5233 /* reject the unsolicited RPS request and done with it */ 5234 goto reject_out; 5235 5236 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5237 5238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5239 if (mbox) { 5240 lpfc_read_lnk_stat(phba, mbox); 5241 mbox->context1 = 5242 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5243 mbox->context2 = lpfc_nlp_get(ndlp); 5244 mbox->vport = vport; 5245 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5246 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5247 != MBX_NOT_FINISHED) 5248 /* Mbox completion will send ELS Response */ 5249 return 0; 5250 /* Decrement reference count used for the failed mbox 5251 * command. 5252 */ 5253 lpfc_nlp_put(ndlp); 5254 mempool_free(mbox, phba->mbox_mem_pool); 5255 } 5256 reject_out: 5257 /* issue rejection response */ 5258 stat.un.b.lsRjtRsvd0 = 0; 5259 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5260 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5261 stat.un.b.vendorUnique = 0; 5262 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5263 return 0; 5264 } 5265 5266 /** 5267 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 5268 * @vport: pointer to a host virtual N_Port data structure. 5269 * @cmdiocb: pointer to lpfc command iocb data structure. 5270 * @ndlp: pointer to a node-list data structure. 5271 * 5272 * This routine processes Read Timout Value (RTV) IOCB received as an 5273 * ELS unsolicited event. It first checks the remote port state. If the 5274 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5275 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 5276 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 5277 * Value (RTV) unsolicited IOCB event. 5278 * 5279 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5280 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5281 * will be stored into the context1 field of the IOCB for the completion 5282 * callback function to the RPS Accept Response ELS IOCB command. 5283 * 5284 * Return codes 5285 * 0 - Successfully processed rtv iocb (currently always return 0) 5286 **/ 5287 static int 5288 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5289 struct lpfc_nodelist *ndlp) 5290 { 5291 struct lpfc_hba *phba = vport->phba; 5292 struct ls_rjt stat; 5293 struct RTV_RSP *rtv_rsp; 5294 uint8_t *pcmd; 5295 struct lpfc_iocbq *elsiocb; 5296 uint32_t cmdsize; 5297 5298 5299 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5300 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5301 /* reject the unsolicited RPS request and done with it */ 5302 goto reject_out; 5303 5304 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 5305 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5306 lpfc_max_els_tries, ndlp, 5307 ndlp->nlp_DID, ELS_CMD_ACC); 5308 5309 if (!elsiocb) 5310 return 1; 5311 5312 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5313 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5314 pcmd += sizeof(uint32_t); /* Skip past command */ 5315 5316 /* use the command's xri in the response */ 5317 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; 5318 5319 rtv_rsp = (struct RTV_RSP *)pcmd; 5320 5321 /* populate RTV payload */ 5322 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 5323 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 5324 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 5325 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 5326 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 5327 5328 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 5329 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 5330 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 5331 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 5332 "Data: x%x x%x x%x\n", 5333 elsiocb->iotag, elsiocb->iocb.ulpContext, 5334 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5335 ndlp->nlp_rpi, 5336 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 5337 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5338 phba->fc_stat.elsXmitACC++; 5339 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 5340 lpfc_els_free_iocb(phba, elsiocb); 5341 return 0; 5342 5343 reject_out: 5344 /* issue rejection response */ 5345 stat.un.b.lsRjtRsvd0 = 0; 5346 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5347 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5348 stat.un.b.vendorUnique = 0; 5349 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5350 return 0; 5351 } 5352 5353 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 5354 * @vport: pointer to a host virtual N_Port data structure. 5355 * @cmdiocb: pointer to lpfc command iocb data structure. 5356 * @ndlp: pointer to a node-list data structure. 5357 * 5358 * This routine processes Read Port Status (RPS) IOCB received as an 5359 * ELS unsolicited event. It first checks the remote port state. If the 5360 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 5361 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 5362 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 5363 * for reading the HBA link statistics. It is for the callback function, 5364 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 5365 * to actually sending out RPS Accept (ACC) response. 5366 * 5367 * Return codes 5368 * 0 - Successfully processed rps iocb (currently always return 0) 5369 **/ 5370 static int 5371 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5372 struct lpfc_nodelist *ndlp) 5373 { 5374 struct lpfc_hba *phba = vport->phba; 5375 uint32_t *lp; 5376 uint8_t flag; 5377 LPFC_MBOXQ_t *mbox; 5378 struct lpfc_dmabuf *pcmd; 5379 RPS *rps; 5380 struct ls_rjt stat; 5381 5382 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5383 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 5384 /* reject the unsolicited RPS request and done with it */ 5385 goto reject_out; 5386 5387 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5388 lp = (uint32_t *) pcmd->virt; 5389 flag = (be32_to_cpu(*lp++) & 0xf); 5390 rps = (RPS *) lp; 5391 5392 if ((flag == 0) || 5393 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 5394 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 5395 sizeof(struct lpfc_name)) == 0))) { 5396 5397 printk("Fix me....\n"); 5398 dump_stack(); 5399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5400 if (mbox) { 5401 lpfc_read_lnk_stat(phba, mbox); 5402 mbox->context1 = 5403 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5404 mbox->context2 = lpfc_nlp_get(ndlp); 5405 mbox->vport = vport; 5406 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5407 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5408 != MBX_NOT_FINISHED) 5409 /* Mbox completion will send ELS Response */ 5410 return 0; 5411 /* Decrement reference count used for the failed mbox 5412 * command. 5413 */ 5414 lpfc_nlp_put(ndlp); 5415 mempool_free(mbox, phba->mbox_mem_pool); 5416 } 5417 } 5418 5419 reject_out: 5420 /* issue rejection response */ 5421 stat.un.b.lsRjtRsvd0 = 0; 5422 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5423 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5424 stat.un.b.vendorUnique = 0; 5425 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5426 return 0; 5427 } 5428 5429 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 5430 * @vport: pointer to a host virtual N_Port data structure. 5431 * @ndlp: pointer to a node-list data structure. 5432 * @did: DID of the target. 5433 * @rrq: Pointer to the rrq struct. 5434 * 5435 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 5436 * Successful the the completion handler will clear the RRQ. 5437 * 5438 * Return codes 5439 * 0 - Successfully sent rrq els iocb. 5440 * 1 - Failed to send rrq els iocb. 5441 **/ 5442 static int 5443 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5444 uint32_t did, struct lpfc_node_rrq *rrq) 5445 { 5446 struct lpfc_hba *phba = vport->phba; 5447 struct RRQ *els_rrq; 5448 IOCB_t *icmd; 5449 struct lpfc_iocbq *elsiocb; 5450 uint8_t *pcmd; 5451 uint16_t cmdsize; 5452 int ret; 5453 5454 5455 if (ndlp != rrq->ndlp) 5456 ndlp = rrq->ndlp; 5457 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 5458 return 1; 5459 5460 /* If ndlp is not NULL, we will bump the reference count on it */ 5461 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 5462 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 5463 ELS_CMD_RRQ); 5464 if (!elsiocb) 5465 return 1; 5466 5467 icmd = &elsiocb->iocb; 5468 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5469 5470 /* For RRQ request, remainder of payload is Exchange IDs */ 5471 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 5472 pcmd += sizeof(uint32_t); 5473 els_rrq = (struct RRQ *) pcmd; 5474 5475 bf_set(rrq_oxid, els_rrq, rrq->xritag); 5476 bf_set(rrq_rxid, els_rrq, rrq->rxid); 5477 bf_set(rrq_did, els_rrq, vport->fc_myDID); 5478 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 5479 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 5480 5481 5482 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5483 "Issue RRQ: did:x%x", 5484 did, rrq->xritag, rrq->rxid); 5485 elsiocb->context_un.rrq = rrq; 5486 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 5487 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5488 5489 if (ret == IOCB_ERROR) { 5490 lpfc_els_free_iocb(phba, elsiocb); 5491 return 1; 5492 } 5493 return 0; 5494 } 5495 5496 /** 5497 * lpfc_send_rrq - Sends ELS RRQ if needed. 5498 * @phba: pointer to lpfc hba data structure. 5499 * @rrq: pointer to the active rrq. 5500 * 5501 * This routine will call the lpfc_issue_els_rrq if the rrq is 5502 * still active for the xri. If this function returns a failure then 5503 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 5504 * 5505 * Returns 0 Success. 5506 * 1 Failure. 5507 **/ 5508 int 5509 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 5510 { 5511 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 5512 rrq->nlp_DID); 5513 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 5514 return lpfc_issue_els_rrq(rrq->vport, ndlp, 5515 rrq->nlp_DID, rrq); 5516 else 5517 return 1; 5518 } 5519 5520 /** 5521 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 5522 * @vport: pointer to a host virtual N_Port data structure. 5523 * @cmdsize: size of the ELS command. 5524 * @oldiocb: pointer to the original lpfc command iocb data structure. 5525 * @ndlp: pointer to a node-list data structure. 5526 * 5527 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 5528 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 5529 * 5530 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5531 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5532 * will be stored into the context1 field of the IOCB for the completion 5533 * callback function to the RPL Accept Response ELS command. 5534 * 5535 * Return code 5536 * 0 - Successfully issued ACC RPL ELS command 5537 * 1 - Failed to issue ACC RPL ELS command 5538 **/ 5539 static int 5540 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 5541 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5542 { 5543 struct lpfc_hba *phba = vport->phba; 5544 IOCB_t *icmd, *oldcmd; 5545 RPL_RSP rpl_rsp; 5546 struct lpfc_iocbq *elsiocb; 5547 uint8_t *pcmd; 5548 5549 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5550 ndlp->nlp_DID, ELS_CMD_ACC); 5551 5552 if (!elsiocb) 5553 return 1; 5554 5555 icmd = &elsiocb->iocb; 5556 oldcmd = &oldiocb->iocb; 5557 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 5558 5559 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5560 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5561 pcmd += sizeof(uint16_t); 5562 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 5563 pcmd += sizeof(uint16_t); 5564 5565 /* Setup the RPL ACC payload */ 5566 rpl_rsp.listLen = be32_to_cpu(1); 5567 rpl_rsp.index = 0; 5568 rpl_rsp.port_num_blk.portNum = 0; 5569 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 5570 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 5571 sizeof(struct lpfc_name)); 5572 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 5573 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 5574 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5575 "0120 Xmit ELS RPL ACC response tag x%x " 5576 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5577 "rpi x%x\n", 5578 elsiocb->iotag, elsiocb->iocb.ulpContext, 5579 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5580 ndlp->nlp_rpi); 5581 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5582 phba->fc_stat.elsXmitACC++; 5583 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 5584 IOCB_ERROR) { 5585 lpfc_els_free_iocb(phba, elsiocb); 5586 return 1; 5587 } 5588 return 0; 5589 } 5590 5591 /** 5592 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 5593 * @vport: pointer to a host virtual N_Port data structure. 5594 * @cmdiocb: pointer to lpfc command iocb data structure. 5595 * @ndlp: pointer to a node-list data structure. 5596 * 5597 * This routine processes Read Port List (RPL) IOCB received as an ELS 5598 * unsolicited event. It first checks the remote port state. If the remote 5599 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 5600 * invokes the lpfc_els_rsp_reject() routine to send reject response. 5601 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 5602 * to accept the RPL. 5603 * 5604 * Return code 5605 * 0 - Successfully processed rpl iocb (currently always return 0) 5606 **/ 5607 static int 5608 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5609 struct lpfc_nodelist *ndlp) 5610 { 5611 struct lpfc_dmabuf *pcmd; 5612 uint32_t *lp; 5613 uint32_t maxsize; 5614 uint16_t cmdsize; 5615 RPL *rpl; 5616 struct ls_rjt stat; 5617 5618 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 5619 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 5620 /* issue rejection response */ 5621 stat.un.b.lsRjtRsvd0 = 0; 5622 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5623 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 5624 stat.un.b.vendorUnique = 0; 5625 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 5626 NULL); 5627 /* rejected the unsolicited RPL request and done with it */ 5628 return 0; 5629 } 5630 5631 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5632 lp = (uint32_t *) pcmd->virt; 5633 rpl = (RPL *) (lp + 1); 5634 maxsize = be32_to_cpu(rpl->maxsize); 5635 5636 /* We support only one port */ 5637 if ((rpl->index == 0) && 5638 ((maxsize == 0) || 5639 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 5640 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 5641 } else { 5642 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 5643 } 5644 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 5645 5646 return 0; 5647 } 5648 5649 /** 5650 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 5651 * @vport: pointer to a virtual N_Port data structure. 5652 * @cmdiocb: pointer to lpfc command iocb data structure. 5653 * @ndlp: pointer to a node-list data structure. 5654 * 5655 * This routine processes Fibre Channel Address Resolution Protocol 5656 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 5657 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 5658 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 5659 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 5660 * remote PortName is compared against the FC PortName stored in the @vport 5661 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 5662 * compared against the FC NodeName stored in the @vport data structure. 5663 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 5664 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 5665 * invoked to send out FARP Response to the remote node. Before sending the 5666 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 5667 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 5668 * routine is invoked to log into the remote port first. 5669 * 5670 * Return code 5671 * 0 - Either the FARP Match Mode not supported or successfully processed 5672 **/ 5673 static int 5674 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5675 struct lpfc_nodelist *ndlp) 5676 { 5677 struct lpfc_dmabuf *pcmd; 5678 uint32_t *lp; 5679 IOCB_t *icmd; 5680 FARP *fp; 5681 uint32_t cmd, cnt, did; 5682 5683 icmd = &cmdiocb->iocb; 5684 did = icmd->un.elsreq64.remoteID; 5685 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5686 lp = (uint32_t *) pcmd->virt; 5687 5688 cmd = *lp++; 5689 fp = (FARP *) lp; 5690 /* FARP-REQ received from DID <did> */ 5691 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5692 "0601 FARP-REQ received from DID x%x\n", did); 5693 /* We will only support match on WWPN or WWNN */ 5694 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 5695 return 0; 5696 } 5697 5698 cnt = 0; 5699 /* If this FARP command is searching for my portname */ 5700 if (fp->Mflags & FARP_MATCH_PORT) { 5701 if (memcmp(&fp->RportName, &vport->fc_portname, 5702 sizeof(struct lpfc_name)) == 0) 5703 cnt = 1; 5704 } 5705 5706 /* If this FARP command is searching for my nodename */ 5707 if (fp->Mflags & FARP_MATCH_NODE) { 5708 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 5709 sizeof(struct lpfc_name)) == 0) 5710 cnt = 1; 5711 } 5712 5713 if (cnt) { 5714 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 5715 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 5716 /* Log back into the node before sending the FARP. */ 5717 if (fp->Rflags & FARP_REQUEST_PLOGI) { 5718 ndlp->nlp_prev_state = ndlp->nlp_state; 5719 lpfc_nlp_set_state(vport, ndlp, 5720 NLP_STE_PLOGI_ISSUE); 5721 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5722 } 5723 5724 /* Send a FARP response to that node */ 5725 if (fp->Rflags & FARP_REQUEST_FARPR) 5726 lpfc_issue_els_farpr(vport, did, 0); 5727 } 5728 } 5729 return 0; 5730 } 5731 5732 /** 5733 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 5734 * @vport: pointer to a host virtual N_Port data structure. 5735 * @cmdiocb: pointer to lpfc command iocb data structure. 5736 * @ndlp: pointer to a node-list data structure. 5737 * 5738 * This routine processes Fibre Channel Address Resolution Protocol 5739 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 5740 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 5741 * the FARP response request. 5742 * 5743 * Return code 5744 * 0 - Successfully processed FARPR IOCB (currently always return 0) 5745 **/ 5746 static int 5747 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5748 struct lpfc_nodelist *ndlp) 5749 { 5750 struct lpfc_dmabuf *pcmd; 5751 uint32_t *lp; 5752 IOCB_t *icmd; 5753 uint32_t cmd, did; 5754 5755 icmd = &cmdiocb->iocb; 5756 did = icmd->un.elsreq64.remoteID; 5757 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5758 lp = (uint32_t *) pcmd->virt; 5759 5760 cmd = *lp++; 5761 /* FARP-RSP received from DID <did> */ 5762 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5763 "0600 FARP-RSP received from DID x%x\n", did); 5764 /* ACCEPT the Farp resp request */ 5765 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 5766 5767 return 0; 5768 } 5769 5770 /** 5771 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 5772 * @vport: pointer to a host virtual N_Port data structure. 5773 * @cmdiocb: pointer to lpfc command iocb data structure. 5774 * @fan_ndlp: pointer to a node-list data structure. 5775 * 5776 * This routine processes a Fabric Address Notification (FAN) IOCB 5777 * command received as an ELS unsolicited event. The FAN ELS command will 5778 * only be processed on a physical port (i.e., the @vport represents the 5779 * physical port). The fabric NodeName and PortName from the FAN IOCB are 5780 * compared against those in the phba data structure. If any of those is 5781 * different, the lpfc_initial_flogi() routine is invoked to initialize 5782 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 5783 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 5784 * is invoked to register login to the fabric. 5785 * 5786 * Return code 5787 * 0 - Successfully processed fan iocb (currently always return 0). 5788 **/ 5789 static int 5790 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5791 struct lpfc_nodelist *fan_ndlp) 5792 { 5793 struct lpfc_hba *phba = vport->phba; 5794 uint32_t *lp; 5795 FAN *fp; 5796 5797 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 5798 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 5799 fp = (FAN *) ++lp; 5800 /* FAN received; Fan does not have a reply sequence */ 5801 if ((vport == phba->pport) && 5802 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 5803 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 5804 sizeof(struct lpfc_name))) || 5805 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 5806 sizeof(struct lpfc_name)))) { 5807 /* This port has switched fabrics. FLOGI is required */ 5808 lpfc_issue_init_vfi(vport); 5809 } else { 5810 /* FAN verified - skip FLOGI */ 5811 vport->fc_myDID = vport->fc_prevDID; 5812 if (phba->sli_rev < LPFC_SLI_REV4) 5813 lpfc_issue_fabric_reglogin(vport); 5814 else 5815 lpfc_issue_reg_vfi(vport); 5816 } 5817 } 5818 return 0; 5819 } 5820 5821 /** 5822 * lpfc_els_timeout - Handler funciton to the els timer 5823 * @ptr: holder for the timer function associated data. 5824 * 5825 * This routine is invoked by the ELS timer after timeout. It posts the ELS 5826 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 5827 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 5828 * up the worker thread. It is for the worker thread to invoke the routine 5829 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 5830 **/ 5831 void 5832 lpfc_els_timeout(unsigned long ptr) 5833 { 5834 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 5835 struct lpfc_hba *phba = vport->phba; 5836 uint32_t tmo_posted; 5837 unsigned long iflag; 5838 5839 spin_lock_irqsave(&vport->work_port_lock, iflag); 5840 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 5841 if (!tmo_posted) 5842 vport->work_port_events |= WORKER_ELS_TMO; 5843 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 5844 5845 if (!tmo_posted) 5846 lpfc_worker_wake_up(phba); 5847 return; 5848 } 5849 5850 5851 /** 5852 * lpfc_els_timeout_handler - Process an els timeout event 5853 * @vport: pointer to a virtual N_Port data structure. 5854 * 5855 * This routine is the actual handler function that processes an ELS timeout 5856 * event. It walks the ELS ring to get and abort all the IOCBs (except the 5857 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 5858 * invoking the lpfc_sli_issue_abort_iotag() routine. 5859 **/ 5860 void 5861 lpfc_els_timeout_handler(struct lpfc_vport *vport) 5862 { 5863 struct lpfc_hba *phba = vport->phba; 5864 struct lpfc_sli_ring *pring; 5865 struct lpfc_iocbq *tmp_iocb, *piocb; 5866 IOCB_t *cmd = NULL; 5867 struct lpfc_dmabuf *pcmd; 5868 uint32_t els_command = 0; 5869 uint32_t timeout; 5870 uint32_t remote_ID = 0xffffffff; 5871 LIST_HEAD(txcmplq_completions); 5872 LIST_HEAD(abort_list); 5873 5874 5875 timeout = (uint32_t)(phba->fc_ratov << 1); 5876 5877 pring = &phba->sli.ring[LPFC_ELS_RING]; 5878 5879 spin_lock_irq(&phba->hbalock); 5880 list_splice_init(&pring->txcmplq, &txcmplq_completions); 5881 spin_unlock_irq(&phba->hbalock); 5882 5883 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { 5884 cmd = &piocb->iocb; 5885 5886 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 5887 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 5888 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 5889 continue; 5890 5891 if (piocb->vport != vport) 5892 continue; 5893 5894 pcmd = (struct lpfc_dmabuf *) piocb->context2; 5895 if (pcmd) 5896 els_command = *(uint32_t *) (pcmd->virt); 5897 5898 if (els_command == ELS_CMD_FARP || 5899 els_command == ELS_CMD_FARPR || 5900 els_command == ELS_CMD_FDISC) 5901 continue; 5902 5903 if (piocb->drvrTimeout > 0) { 5904 if (piocb->drvrTimeout >= timeout) 5905 piocb->drvrTimeout -= timeout; 5906 else 5907 piocb->drvrTimeout = 0; 5908 continue; 5909 } 5910 5911 remote_ID = 0xffffffff; 5912 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 5913 remote_ID = cmd->un.elsreq64.remoteID; 5914 else { 5915 struct lpfc_nodelist *ndlp; 5916 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 5917 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 5918 remote_ID = ndlp->nlp_DID; 5919 } 5920 list_add_tail(&piocb->dlist, &abort_list); 5921 } 5922 spin_lock_irq(&phba->hbalock); 5923 list_splice(&txcmplq_completions, &pring->txcmplq); 5924 spin_unlock_irq(&phba->hbalock); 5925 5926 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 5927 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 5928 "0127 ELS timeout Data: x%x x%x x%x " 5929 "x%x\n", els_command, 5930 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 5931 spin_lock_irq(&phba->hbalock); 5932 list_del_init(&piocb->dlist); 5933 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 5934 spin_unlock_irq(&phba->hbalock); 5935 } 5936 5937 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 5938 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 5939 } 5940 5941 /** 5942 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 5943 * @vport: pointer to a host virtual N_Port data structure. 5944 * 5945 * This routine is used to clean up all the outstanding ELS commands on a 5946 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 5947 * routine. After that, it walks the ELS transmit queue to remove all the 5948 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 5949 * the IOCBs with a non-NULL completion callback function, the callback 5950 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 5951 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 5952 * callback function, the IOCB will simply be released. Finally, it walks 5953 * the ELS transmit completion queue to issue an abort IOCB to any transmit 5954 * completion queue IOCB that is associated with the @vport and is not 5955 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 5956 * part of the discovery state machine) out to HBA by invoking the 5957 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 5958 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 5959 * the IOCBs are aborted when this function returns. 5960 **/ 5961 void 5962 lpfc_els_flush_cmd(struct lpfc_vport *vport) 5963 { 5964 LIST_HEAD(completions); 5965 struct lpfc_hba *phba = vport->phba; 5966 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5967 struct lpfc_iocbq *tmp_iocb, *piocb; 5968 IOCB_t *cmd = NULL; 5969 5970 lpfc_fabric_abort_vport(vport); 5971 5972 spin_lock_irq(&phba->hbalock); 5973 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 5974 cmd = &piocb->iocb; 5975 5976 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 5977 continue; 5978 } 5979 5980 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 5981 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 5982 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 5983 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 5984 cmd->ulpCommand == CMD_ABORT_XRI_CN) 5985 continue; 5986 5987 if (piocb->vport != vport) 5988 continue; 5989 5990 list_move_tail(&piocb->list, &completions); 5991 pring->txq_cnt--; 5992 } 5993 5994 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 5995 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 5996 continue; 5997 } 5998 5999 if (piocb->vport != vport) 6000 continue; 6001 6002 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6003 } 6004 spin_unlock_irq(&phba->hbalock); 6005 6006 /* Cancell all the IOCBs from the completions list */ 6007 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6008 IOERR_SLI_ABORTED); 6009 6010 return; 6011 } 6012 6013 /** 6014 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 6015 * @phba: pointer to lpfc hba data structure. 6016 * 6017 * This routine is used to clean up all the outstanding ELS commands on a 6018 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 6019 * routine. After that, it walks the ELS transmit queue to remove all the 6020 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 6021 * the IOCBs with the completion callback function associated, the callback 6022 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 6023 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 6024 * callback function associated, the IOCB will simply be released. Finally, 6025 * it walks the ELS transmit completion queue to issue an abort IOCB to any 6026 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 6027 * management plane IOCBs that are not part of the discovery state machine) 6028 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 6029 **/ 6030 void 6031 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 6032 { 6033 LIST_HEAD(completions); 6034 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6035 struct lpfc_iocbq *tmp_iocb, *piocb; 6036 IOCB_t *cmd = NULL; 6037 6038 lpfc_fabric_abort_hba(phba); 6039 spin_lock_irq(&phba->hbalock); 6040 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6041 cmd = &piocb->iocb; 6042 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6043 continue; 6044 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 6045 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 6046 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 6047 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 6048 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6049 continue; 6050 list_move_tail(&piocb->list, &completions); 6051 pring->txq_cnt--; 6052 } 6053 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6054 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6055 continue; 6056 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 6057 } 6058 spin_unlock_irq(&phba->hbalock); 6059 6060 /* Cancel all the IOCBs from the completions list */ 6061 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6062 IOERR_SLI_ABORTED); 6063 6064 return; 6065 } 6066 6067 /** 6068 * lpfc_send_els_failure_event - Posts an ELS command failure event 6069 * @phba: Pointer to hba context object. 6070 * @cmdiocbp: Pointer to command iocb which reported error. 6071 * @rspiocbp: Pointer to response iocb which reported error. 6072 * 6073 * This function sends an event when there is an ELS command 6074 * failure. 6075 **/ 6076 void 6077 lpfc_send_els_failure_event(struct lpfc_hba *phba, 6078 struct lpfc_iocbq *cmdiocbp, 6079 struct lpfc_iocbq *rspiocbp) 6080 { 6081 struct lpfc_vport *vport = cmdiocbp->vport; 6082 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6083 struct lpfc_lsrjt_event lsrjt_event; 6084 struct lpfc_fabric_event_header fabric_event; 6085 struct ls_rjt stat; 6086 struct lpfc_nodelist *ndlp; 6087 uint32_t *pcmd; 6088 6089 ndlp = cmdiocbp->context1; 6090 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 6091 return; 6092 6093 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 6094 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 6095 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 6096 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 6097 sizeof(struct lpfc_name)); 6098 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 6099 sizeof(struct lpfc_name)); 6100 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 6101 cmdiocbp->context2)->virt); 6102 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 6103 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 6104 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 6105 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 6106 fc_host_post_vendor_event(shost, 6107 fc_get_event_number(), 6108 sizeof(lsrjt_event), 6109 (char *)&lsrjt_event, 6110 LPFC_NL_VENDOR_ID); 6111 return; 6112 } 6113 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 6114 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 6115 fabric_event.event_type = FC_REG_FABRIC_EVENT; 6116 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 6117 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 6118 else 6119 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 6120 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 6121 sizeof(struct lpfc_name)); 6122 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 6123 sizeof(struct lpfc_name)); 6124 fc_host_post_vendor_event(shost, 6125 fc_get_event_number(), 6126 sizeof(fabric_event), 6127 (char *)&fabric_event, 6128 LPFC_NL_VENDOR_ID); 6129 return; 6130 } 6131 6132 } 6133 6134 /** 6135 * lpfc_send_els_event - Posts unsolicited els event 6136 * @vport: Pointer to vport object. 6137 * @ndlp: Pointer FC node object. 6138 * @cmd: ELS command code. 6139 * 6140 * This function posts an event when there is an incoming 6141 * unsolicited ELS command. 6142 **/ 6143 static void 6144 lpfc_send_els_event(struct lpfc_vport *vport, 6145 struct lpfc_nodelist *ndlp, 6146 uint32_t *payload) 6147 { 6148 struct lpfc_els_event_header *els_data = NULL; 6149 struct lpfc_logo_event *logo_data = NULL; 6150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6151 6152 if (*payload == ELS_CMD_LOGO) { 6153 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 6154 if (!logo_data) { 6155 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6156 "0148 Failed to allocate memory " 6157 "for LOGO event\n"); 6158 return; 6159 } 6160 els_data = &logo_data->header; 6161 } else { 6162 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 6163 GFP_KERNEL); 6164 if (!els_data) { 6165 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6166 "0149 Failed to allocate memory " 6167 "for ELS event\n"); 6168 return; 6169 } 6170 } 6171 els_data->event_type = FC_REG_ELS_EVENT; 6172 switch (*payload) { 6173 case ELS_CMD_PLOGI: 6174 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 6175 break; 6176 case ELS_CMD_PRLO: 6177 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 6178 break; 6179 case ELS_CMD_ADISC: 6180 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 6181 break; 6182 case ELS_CMD_LOGO: 6183 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 6184 /* Copy the WWPN in the LOGO payload */ 6185 memcpy(logo_data->logo_wwpn, &payload[2], 6186 sizeof(struct lpfc_name)); 6187 break; 6188 default: 6189 kfree(els_data); 6190 return; 6191 } 6192 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 6193 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 6194 if (*payload == ELS_CMD_LOGO) { 6195 fc_host_post_vendor_event(shost, 6196 fc_get_event_number(), 6197 sizeof(struct lpfc_logo_event), 6198 (char *)logo_data, 6199 LPFC_NL_VENDOR_ID); 6200 kfree(logo_data); 6201 } else { 6202 fc_host_post_vendor_event(shost, 6203 fc_get_event_number(), 6204 sizeof(struct lpfc_els_event_header), 6205 (char *)els_data, 6206 LPFC_NL_VENDOR_ID); 6207 kfree(els_data); 6208 } 6209 6210 return; 6211 } 6212 6213 6214 /** 6215 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 6216 * @phba: pointer to lpfc hba data structure. 6217 * @pring: pointer to a SLI ring. 6218 * @vport: pointer to a host virtual N_Port data structure. 6219 * @elsiocb: pointer to lpfc els command iocb data structure. 6220 * 6221 * This routine is used for processing the IOCB associated with a unsolicited 6222 * event. It first determines whether there is an existing ndlp that matches 6223 * the DID from the unsolicited IOCB. If not, it will create a new one with 6224 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 6225 * IOCB is then used to invoke the proper routine and to set up proper state 6226 * of the discovery state machine. 6227 **/ 6228 static void 6229 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6230 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 6231 { 6232 struct Scsi_Host *shost; 6233 struct lpfc_nodelist *ndlp; 6234 struct ls_rjt stat; 6235 uint32_t *payload; 6236 uint32_t cmd, did, newnode, rjt_err = 0; 6237 IOCB_t *icmd = &elsiocb->iocb; 6238 6239 if (!vport || !(elsiocb->context2)) 6240 goto dropit; 6241 6242 newnode = 0; 6243 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 6244 cmd = *payload; 6245 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 6246 lpfc_post_buffer(phba, pring, 1); 6247 6248 did = icmd->un.rcvels.remoteID; 6249 if (icmd->ulpStatus) { 6250 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6251 "RCV Unsol ELS: status:x%x/x%x did:x%x", 6252 icmd->ulpStatus, icmd->un.ulpWord[4], did); 6253 goto dropit; 6254 } 6255 6256 /* Check to see if link went down during discovery */ 6257 if (lpfc_els_chk_latt(vport)) 6258 goto dropit; 6259 6260 /* Ignore traffic received during vport shutdown. */ 6261 if (vport->load_flag & FC_UNLOADING) 6262 goto dropit; 6263 6264 /* If NPort discovery is delayed drop incoming ELS */ 6265 if ((vport->fc_flag & FC_DISC_DELAYED) && 6266 (cmd != ELS_CMD_PLOGI)) 6267 goto dropit; 6268 6269 ndlp = lpfc_findnode_did(vport, did); 6270 if (!ndlp) { 6271 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6272 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6273 if (!ndlp) 6274 goto dropit; 6275 6276 lpfc_nlp_init(vport, ndlp, did); 6277 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6278 newnode = 1; 6279 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6280 ndlp->nlp_type |= NLP_FABRIC; 6281 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6282 ndlp = lpfc_enable_node(vport, ndlp, 6283 NLP_STE_UNUSED_NODE); 6284 if (!ndlp) 6285 goto dropit; 6286 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6287 newnode = 1; 6288 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6289 ndlp->nlp_type |= NLP_FABRIC; 6290 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 6291 /* This is similar to the new node path */ 6292 ndlp = lpfc_nlp_get(ndlp); 6293 if (!ndlp) 6294 goto dropit; 6295 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6296 newnode = 1; 6297 } 6298 6299 phba->fc_stat.elsRcvFrame++; 6300 6301 elsiocb->context1 = lpfc_nlp_get(ndlp); 6302 elsiocb->vport = vport; 6303 6304 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 6305 cmd &= ELS_CMD_MASK; 6306 } 6307 /* ELS command <elsCmd> received from NPORT <did> */ 6308 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6309 "0112 ELS command x%x received from NPORT x%x " 6310 "Data: x%x\n", cmd, did, vport->port_state); 6311 switch (cmd) { 6312 case ELS_CMD_PLOGI: 6313 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6314 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 6315 did, vport->port_state, ndlp->nlp_flag); 6316 6317 phba->fc_stat.elsRcvPLOGI++; 6318 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6319 6320 lpfc_send_els_event(vport, ndlp, payload); 6321 6322 /* If Nport discovery is delayed, reject PLOGIs */ 6323 if (vport->fc_flag & FC_DISC_DELAYED) { 6324 rjt_err = LSRJT_UNABLE_TPC; 6325 break; 6326 } 6327 if (vport->port_state < LPFC_DISC_AUTH) { 6328 if (!(phba->pport->fc_flag & FC_PT2PT) || 6329 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6330 rjt_err = LSRJT_UNABLE_TPC; 6331 break; 6332 } 6333 /* We get here, and drop thru, if we are PT2PT with 6334 * another NPort and the other side has initiated 6335 * the PLOGI before responding to our FLOGI. 6336 */ 6337 } 6338 6339 shost = lpfc_shost_from_vport(vport); 6340 spin_lock_irq(shost->host_lock); 6341 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6342 spin_unlock_irq(shost->host_lock); 6343 6344 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6345 NLP_EVT_RCV_PLOGI); 6346 6347 break; 6348 case ELS_CMD_FLOGI: 6349 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6350 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 6351 did, vport->port_state, ndlp->nlp_flag); 6352 6353 phba->fc_stat.elsRcvFLOGI++; 6354 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 6355 if (newnode) 6356 lpfc_nlp_put(ndlp); 6357 break; 6358 case ELS_CMD_LOGO: 6359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6360 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 6361 did, vport->port_state, ndlp->nlp_flag); 6362 6363 phba->fc_stat.elsRcvLOGO++; 6364 lpfc_send_els_event(vport, ndlp, payload); 6365 if (vport->port_state < LPFC_DISC_AUTH) { 6366 rjt_err = LSRJT_UNABLE_TPC; 6367 break; 6368 } 6369 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 6370 break; 6371 case ELS_CMD_PRLO: 6372 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6373 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 6374 did, vport->port_state, ndlp->nlp_flag); 6375 6376 phba->fc_stat.elsRcvPRLO++; 6377 lpfc_send_els_event(vport, ndlp, payload); 6378 if (vport->port_state < LPFC_DISC_AUTH) { 6379 rjt_err = LSRJT_UNABLE_TPC; 6380 break; 6381 } 6382 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 6383 break; 6384 case ELS_CMD_RSCN: 6385 phba->fc_stat.elsRcvRSCN++; 6386 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 6387 if (newnode) 6388 lpfc_nlp_put(ndlp); 6389 break; 6390 case ELS_CMD_ADISC: 6391 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6392 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 6393 did, vport->port_state, ndlp->nlp_flag); 6394 6395 lpfc_send_els_event(vport, ndlp, payload); 6396 phba->fc_stat.elsRcvADISC++; 6397 if (vport->port_state < LPFC_DISC_AUTH) { 6398 rjt_err = LSRJT_UNABLE_TPC; 6399 break; 6400 } 6401 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6402 NLP_EVT_RCV_ADISC); 6403 break; 6404 case ELS_CMD_PDISC: 6405 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6406 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 6407 did, vport->port_state, ndlp->nlp_flag); 6408 6409 phba->fc_stat.elsRcvPDISC++; 6410 if (vport->port_state < LPFC_DISC_AUTH) { 6411 rjt_err = LSRJT_UNABLE_TPC; 6412 break; 6413 } 6414 lpfc_disc_state_machine(vport, ndlp, elsiocb, 6415 NLP_EVT_RCV_PDISC); 6416 break; 6417 case ELS_CMD_FARPR: 6418 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6419 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 6420 did, vport->port_state, ndlp->nlp_flag); 6421 6422 phba->fc_stat.elsRcvFARPR++; 6423 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 6424 break; 6425 case ELS_CMD_FARP: 6426 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6427 "RCV FARP: did:x%x/ste:x%x flg:x%x", 6428 did, vport->port_state, ndlp->nlp_flag); 6429 6430 phba->fc_stat.elsRcvFARP++; 6431 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 6432 break; 6433 case ELS_CMD_FAN: 6434 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6435 "RCV FAN: did:x%x/ste:x%x flg:x%x", 6436 did, vport->port_state, ndlp->nlp_flag); 6437 6438 phba->fc_stat.elsRcvFAN++; 6439 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 6440 break; 6441 case ELS_CMD_PRLI: 6442 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6443 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 6444 did, vport->port_state, ndlp->nlp_flag); 6445 6446 phba->fc_stat.elsRcvPRLI++; 6447 if (vport->port_state < LPFC_DISC_AUTH) { 6448 rjt_err = LSRJT_UNABLE_TPC; 6449 break; 6450 } 6451 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 6452 break; 6453 case ELS_CMD_LIRR: 6454 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6455 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 6456 did, vport->port_state, ndlp->nlp_flag); 6457 6458 phba->fc_stat.elsRcvLIRR++; 6459 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 6460 if (newnode) 6461 lpfc_nlp_put(ndlp); 6462 break; 6463 case ELS_CMD_RLS: 6464 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6465 "RCV RLS: did:x%x/ste:x%x flg:x%x", 6466 did, vport->port_state, ndlp->nlp_flag); 6467 6468 phba->fc_stat.elsRcvRLS++; 6469 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 6470 if (newnode) 6471 lpfc_nlp_put(ndlp); 6472 break; 6473 case ELS_CMD_RPS: 6474 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6475 "RCV RPS: did:x%x/ste:x%x flg:x%x", 6476 did, vport->port_state, ndlp->nlp_flag); 6477 6478 phba->fc_stat.elsRcvRPS++; 6479 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 6480 if (newnode) 6481 lpfc_nlp_put(ndlp); 6482 break; 6483 case ELS_CMD_RPL: 6484 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6485 "RCV RPL: did:x%x/ste:x%x flg:x%x", 6486 did, vport->port_state, ndlp->nlp_flag); 6487 6488 phba->fc_stat.elsRcvRPL++; 6489 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 6490 if (newnode) 6491 lpfc_nlp_put(ndlp); 6492 break; 6493 case ELS_CMD_RNID: 6494 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6495 "RCV RNID: did:x%x/ste:x%x flg:x%x", 6496 did, vport->port_state, ndlp->nlp_flag); 6497 6498 phba->fc_stat.elsRcvRNID++; 6499 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 6500 if (newnode) 6501 lpfc_nlp_put(ndlp); 6502 break; 6503 case ELS_CMD_RTV: 6504 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6505 "RCV RTV: did:x%x/ste:x%x flg:x%x", 6506 did, vport->port_state, ndlp->nlp_flag); 6507 phba->fc_stat.elsRcvRTV++; 6508 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 6509 if (newnode) 6510 lpfc_nlp_put(ndlp); 6511 break; 6512 case ELS_CMD_RRQ: 6513 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6514 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 6515 did, vport->port_state, ndlp->nlp_flag); 6516 6517 phba->fc_stat.elsRcvRRQ++; 6518 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 6519 if (newnode) 6520 lpfc_nlp_put(ndlp); 6521 break; 6522 case ELS_CMD_ECHO: 6523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6524 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 6525 did, vport->port_state, ndlp->nlp_flag); 6526 6527 phba->fc_stat.elsRcvECHO++; 6528 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 6529 if (newnode) 6530 lpfc_nlp_put(ndlp); 6531 break; 6532 default: 6533 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6534 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6535 cmd, did, vport->port_state); 6536 6537 /* Unsupported ELS command, reject */ 6538 rjt_err = LSRJT_CMD_UNSUPPORTED; 6539 6540 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 6541 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6542 "0115 Unknown ELS command x%x " 6543 "received from NPORT x%x\n", cmd, did); 6544 if (newnode) 6545 lpfc_nlp_put(ndlp); 6546 break; 6547 } 6548 6549 /* check if need to LS_RJT received ELS cmd */ 6550 if (rjt_err) { 6551 memset(&stat, 0, sizeof(stat)); 6552 stat.un.b.lsRjtRsnCode = rjt_err; 6553 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 6554 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 6555 NULL); 6556 } 6557 6558 lpfc_nlp_put(elsiocb->context1); 6559 elsiocb->context1 = NULL; 6560 return; 6561 6562 dropit: 6563 if (vport && !(vport->load_flag & FC_UNLOADING)) 6564 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6565 "0111 Dropping received ELS cmd " 6566 "Data: x%x x%x x%x\n", 6567 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 6568 phba->fc_stat.elsRcvDrop++; 6569 } 6570 6571 /** 6572 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier 6573 * @phba: pointer to lpfc hba data structure. 6574 * @vpi: host virtual N_Port identifier. 6575 * 6576 * This routine finds a vport on a HBA (referred by @phba) through a 6577 * @vpi. The function walks the HBA's vport list and returns the address 6578 * of the vport with the matching @vpi. 6579 * 6580 * Return code 6581 * NULL - No vport with the matching @vpi found 6582 * Otherwise - Address to the vport with the matching @vpi. 6583 **/ 6584 struct lpfc_vport * 6585 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 6586 { 6587 struct lpfc_vport *vport; 6588 unsigned long flags; 6589 int i; 6590 6591 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6592 if (vpi > 0) { 6593 /* 6594 * Translate the physical vpi to the logical vpi. The 6595 * vport stores the logical vpi. 6596 */ 6597 for (i = 0; i < phba->max_vpi; i++) { 6598 if (vpi == phba->vpi_ids[i]) 6599 break; 6600 } 6601 6602 if (i >= phba->max_vpi) { 6603 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 6604 "2936 Could not find Vport mapped " 6605 "to vpi %d\n", vpi); 6606 return NULL; 6607 } 6608 } 6609 6610 spin_lock_irqsave(&phba->hbalock, flags); 6611 list_for_each_entry(vport, &phba->port_list, listentry) { 6612 if (vport->vpi == vpi) { 6613 spin_unlock_irqrestore(&phba->hbalock, flags); 6614 return vport; 6615 } 6616 } 6617 spin_unlock_irqrestore(&phba->hbalock, flags); 6618 return NULL; 6619 } 6620 6621 /** 6622 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 6623 * @phba: pointer to lpfc hba data structure. 6624 * @pring: pointer to a SLI ring. 6625 * @elsiocb: pointer to lpfc els iocb data structure. 6626 * 6627 * This routine is used to process an unsolicited event received from a SLI 6628 * (Service Level Interface) ring. The actual processing of the data buffer 6629 * associated with the unsolicited event is done by invoking the routine 6630 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 6631 * SLI ring on which the unsolicited event was received. 6632 **/ 6633 void 6634 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6635 struct lpfc_iocbq *elsiocb) 6636 { 6637 struct lpfc_vport *vport = phba->pport; 6638 IOCB_t *icmd = &elsiocb->iocb; 6639 dma_addr_t paddr; 6640 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 6641 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 6642 6643 elsiocb->context1 = NULL; 6644 elsiocb->context2 = NULL; 6645 elsiocb->context3 = NULL; 6646 6647 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6648 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6649 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6650 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 6651 phba->fc_stat.NoRcvBuf++; 6652 /* Not enough posted buffers; Try posting more buffers */ 6653 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6654 lpfc_post_buffer(phba, pring, 0); 6655 return; 6656 } 6657 6658 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6659 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 6660 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 6661 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 6662 vport = phba->pport; 6663 else 6664 vport = lpfc_find_vport_by_vpid(phba, 6665 icmd->unsli3.rcvsli3.vpi); 6666 } 6667 6668 /* If there are no BDEs associated 6669 * with this IOCB, there is nothing to do. 6670 */ 6671 if (icmd->ulpBdeCount == 0) 6672 return; 6673 6674 /* type of ELS cmd is first 32bit word 6675 * in packet 6676 */ 6677 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 6678 elsiocb->context2 = bdeBuf1; 6679 } else { 6680 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 6681 icmd->un.cont64[0].addrLow); 6682 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 6683 paddr); 6684 } 6685 6686 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6687 /* 6688 * The different unsolicited event handlers would tell us 6689 * if they are done with "mp" by setting context2 to NULL. 6690 */ 6691 if (elsiocb->context2) { 6692 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 6693 elsiocb->context2 = NULL; 6694 } 6695 6696 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 6697 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 6698 icmd->ulpBdeCount == 2) { 6699 elsiocb->context2 = bdeBuf2; 6700 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 6701 /* free mp if we are done with it */ 6702 if (elsiocb->context2) { 6703 lpfc_in_buf_free(phba, elsiocb->context2); 6704 elsiocb->context2 = NULL; 6705 } 6706 } 6707 } 6708 6709 /** 6710 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 6711 * @phba: pointer to lpfc hba data structure. 6712 * @vport: pointer to a virtual N_Port data structure. 6713 * 6714 * This routine issues a Port Login (PLOGI) to the Name Server with 6715 * State Change Request (SCR) for a @vport. This routine will create an 6716 * ndlp for the Name Server associated to the @vport if such node does 6717 * not already exist. The PLOGI to Name Server is issued by invoking the 6718 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 6719 * (FDMI) is configured to the @vport, a FDMI node will be created and 6720 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 6721 **/ 6722 void 6723 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 6724 { 6725 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 6726 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6727 6728 /* 6729 * If lpfc_delay_discovery parameter is set and the clean address 6730 * bit is cleared and fc fabric parameters chenged, delay FC NPort 6731 * discovery. 6732 */ 6733 spin_lock_irq(shost->host_lock); 6734 if (vport->fc_flag & FC_DISC_DELAYED) { 6735 spin_unlock_irq(shost->host_lock); 6736 mod_timer(&vport->delayed_disc_tmo, 6737 jiffies + HZ * phba->fc_ratov); 6738 return; 6739 } 6740 spin_unlock_irq(shost->host_lock); 6741 6742 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6743 if (!ndlp) { 6744 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 6745 if (!ndlp) { 6746 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6747 lpfc_disc_start(vport); 6748 return; 6749 } 6750 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6751 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6752 "0251 NameServer login: no memory\n"); 6753 return; 6754 } 6755 lpfc_nlp_init(vport, ndlp, NameServer_DID); 6756 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 6757 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 6758 if (!ndlp) { 6759 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6760 lpfc_disc_start(vport); 6761 return; 6762 } 6763 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6764 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6765 "0348 NameServer login: node freed\n"); 6766 return; 6767 } 6768 } 6769 ndlp->nlp_type |= NLP_FABRIC; 6770 6771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6772 6773 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 6774 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6775 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6776 "0252 Cannot issue NameServer login\n"); 6777 return; 6778 } 6779 6780 if (vport->cfg_fdmi_on) { 6781 /* If this is the first time, allocate an ndlp and initialize 6782 * it. Otherwise, make sure the node is enabled and then do the 6783 * login. 6784 */ 6785 ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); 6786 if (!ndlp_fdmi) { 6787 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 6788 GFP_KERNEL); 6789 if (ndlp_fdmi) { 6790 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 6791 ndlp_fdmi->nlp_type |= NLP_FABRIC; 6792 } else 6793 return; 6794 } 6795 if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) 6796 ndlp_fdmi = lpfc_enable_node(vport, 6797 ndlp_fdmi, 6798 NLP_STE_NPR_NODE); 6799 6800 if (ndlp_fdmi) { 6801 lpfc_nlp_set_state(vport, ndlp_fdmi, 6802 NLP_STE_PLOGI_ISSUE); 6803 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); 6804 } 6805 } 6806 } 6807 6808 /** 6809 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 6810 * @phba: pointer to lpfc hba data structure. 6811 * @pmb: pointer to the driver internal queue element for mailbox command. 6812 * 6813 * This routine is the completion callback function to register new vport 6814 * mailbox command. If the new vport mailbox command completes successfully, 6815 * the fabric registration login shall be performed on physical port (the 6816 * new vport created is actually a physical port, with VPI 0) or the port 6817 * login to Name Server for State Change Request (SCR) will be performed 6818 * on virtual port (real virtual port, with VPI greater than 0). 6819 **/ 6820 static void 6821 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6822 { 6823 struct lpfc_vport *vport = pmb->vport; 6824 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6825 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 6826 MAILBOX_t *mb = &pmb->u.mb; 6827 int rc; 6828 6829 spin_lock_irq(shost->host_lock); 6830 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6831 spin_unlock_irq(shost->host_lock); 6832 6833 if (mb->mbxStatus) { 6834 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 6835 "0915 Register VPI failed : Status: x%x" 6836 " upd bit: x%x \n", mb->mbxStatus, 6837 mb->un.varRegVpi.upd); 6838 if (phba->sli_rev == LPFC_SLI_REV4 && 6839 mb->un.varRegVpi.upd) 6840 goto mbox_err_exit ; 6841 6842 switch (mb->mbxStatus) { 6843 case 0x11: /* unsupported feature */ 6844 case 0x9603: /* max_vpi exceeded */ 6845 case 0x9602: /* Link event since CLEAR_LA */ 6846 /* giving up on vport registration */ 6847 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6848 spin_lock_irq(shost->host_lock); 6849 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 6850 spin_unlock_irq(shost->host_lock); 6851 lpfc_can_disctmo(vport); 6852 break; 6853 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 6854 case 0x20: 6855 spin_lock_irq(shost->host_lock); 6856 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6857 spin_unlock_irq(shost->host_lock); 6858 lpfc_init_vpi(phba, pmb, vport->vpi); 6859 pmb->vport = vport; 6860 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 6861 rc = lpfc_sli_issue_mbox(phba, pmb, 6862 MBX_NOWAIT); 6863 if (rc == MBX_NOT_FINISHED) { 6864 lpfc_printf_vlog(vport, 6865 KERN_ERR, LOG_MBOX, 6866 "2732 Failed to issue INIT_VPI" 6867 " mailbox command\n"); 6868 } else { 6869 lpfc_nlp_put(ndlp); 6870 return; 6871 } 6872 6873 default: 6874 /* Try to recover from this error */ 6875 if (phba->sli_rev == LPFC_SLI_REV4) 6876 lpfc_sli4_unreg_all_rpis(vport); 6877 lpfc_mbx_unreg_vpi(vport); 6878 spin_lock_irq(shost->host_lock); 6879 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6880 spin_unlock_irq(shost->host_lock); 6881 if (vport->port_type == LPFC_PHYSICAL_PORT 6882 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 6883 lpfc_issue_init_vfi(vport); 6884 else 6885 lpfc_initial_fdisc(vport); 6886 break; 6887 } 6888 } else { 6889 spin_lock_irq(shost->host_lock); 6890 vport->vpi_state |= LPFC_VPI_REGISTERED; 6891 spin_unlock_irq(shost->host_lock); 6892 if (vport == phba->pport) { 6893 if (phba->sli_rev < LPFC_SLI_REV4) 6894 lpfc_issue_fabric_reglogin(vport); 6895 else { 6896 /* 6897 * If the physical port is instantiated using 6898 * FDISC, do not start vport discovery. 6899 */ 6900 if (vport->port_state != LPFC_FDISC) 6901 lpfc_start_fdiscs(phba); 6902 lpfc_do_scr_ns_plogi(phba, vport); 6903 } 6904 } else 6905 lpfc_do_scr_ns_plogi(phba, vport); 6906 } 6907 mbox_err_exit: 6908 /* Now, we decrement the ndlp reference count held for this 6909 * callback function 6910 */ 6911 lpfc_nlp_put(ndlp); 6912 6913 mempool_free(pmb, phba->mbox_mem_pool); 6914 return; 6915 } 6916 6917 /** 6918 * lpfc_register_new_vport - Register a new vport with a HBA 6919 * @phba: pointer to lpfc hba data structure. 6920 * @vport: pointer to a host virtual N_Port data structure. 6921 * @ndlp: pointer to a node-list data structure. 6922 * 6923 * This routine registers the @vport as a new virtual port with a HBA. 6924 * It is done through a registering vpi mailbox command. 6925 **/ 6926 void 6927 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 6928 struct lpfc_nodelist *ndlp) 6929 { 6930 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6931 LPFC_MBOXQ_t *mbox; 6932 6933 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6934 if (mbox) { 6935 lpfc_reg_vpi(vport, mbox); 6936 mbox->vport = vport; 6937 mbox->context2 = lpfc_nlp_get(ndlp); 6938 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 6939 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 6940 == MBX_NOT_FINISHED) { 6941 /* mailbox command not success, decrement ndlp 6942 * reference count for this command 6943 */ 6944 lpfc_nlp_put(ndlp); 6945 mempool_free(mbox, phba->mbox_mem_pool); 6946 6947 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 6948 "0253 Register VPI: Can't send mbox\n"); 6949 goto mbox_err_exit; 6950 } 6951 } else { 6952 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 6953 "0254 Register VPI: no memory\n"); 6954 goto mbox_err_exit; 6955 } 6956 return; 6957 6958 mbox_err_exit: 6959 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 6960 spin_lock_irq(shost->host_lock); 6961 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 6962 spin_unlock_irq(shost->host_lock); 6963 return; 6964 } 6965 6966 /** 6967 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 6968 * @phba: pointer to lpfc hba data structure. 6969 * 6970 * This routine cancels the retry delay timers to all the vports. 6971 **/ 6972 void 6973 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 6974 { 6975 struct lpfc_vport **vports; 6976 struct lpfc_nodelist *ndlp; 6977 uint32_t link_state; 6978 int i; 6979 6980 /* Treat this failure as linkdown for all vports */ 6981 link_state = phba->link_state; 6982 lpfc_linkdown(phba); 6983 phba->link_state = link_state; 6984 6985 vports = lpfc_create_vport_work_array(phba); 6986 6987 if (vports) { 6988 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 6989 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 6990 if (ndlp) 6991 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 6992 lpfc_els_flush_cmd(vports[i]); 6993 } 6994 lpfc_destroy_vport_work_array(phba, vports); 6995 } 6996 } 6997 6998 /** 6999 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 7000 * @phba: pointer to lpfc hba data structure. 7001 * 7002 * This routine abort all pending discovery commands and 7003 * start a timer to retry FLOGI for the physical port 7004 * discovery. 7005 **/ 7006 void 7007 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 7008 { 7009 struct lpfc_nodelist *ndlp; 7010 struct Scsi_Host *shost; 7011 7012 /* Cancel the all vports retry delay retry timers */ 7013 lpfc_cancel_all_vport_retry_delay_timer(phba); 7014 7015 /* If fabric require FLOGI, then re-instantiate physical login */ 7016 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 7017 if (!ndlp) 7018 return; 7019 7020 shost = lpfc_shost_from_vport(phba->pport); 7021 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7022 spin_lock_irq(shost->host_lock); 7023 ndlp->nlp_flag |= NLP_DELAY_TMO; 7024 spin_unlock_irq(shost->host_lock); 7025 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 7026 phba->pport->port_state = LPFC_FLOGI; 7027 return; 7028 } 7029 7030 /** 7031 * lpfc_fabric_login_reqd - Check if FLOGI required. 7032 * @phba: pointer to lpfc hba data structure. 7033 * @cmdiocb: pointer to FDISC command iocb. 7034 * @rspiocb: pointer to FDISC response iocb. 7035 * 7036 * This routine checks if a FLOGI is reguired for FDISC 7037 * to succeed. 7038 **/ 7039 static int 7040 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 7041 struct lpfc_iocbq *cmdiocb, 7042 struct lpfc_iocbq *rspiocb) 7043 { 7044 7045 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 7046 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 7047 return 0; 7048 else 7049 return 1; 7050 } 7051 7052 /** 7053 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 7054 * @phba: pointer to lpfc hba data structure. 7055 * @cmdiocb: pointer to lpfc command iocb data structure. 7056 * @rspiocb: pointer to lpfc response iocb data structure. 7057 * 7058 * This routine is the completion callback function to a Fabric Discover 7059 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 7060 * single threaded, each FDISC completion callback function will reset 7061 * the discovery timer for all vports such that the timers will not get 7062 * unnecessary timeout. The function checks the FDISC IOCB status. If error 7063 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 7064 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 7065 * assigned to the vport has been changed with the completion of the FDISC 7066 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 7067 * are unregistered from the HBA, and then the lpfc_register_new_vport() 7068 * routine is invoked to register new vport with the HBA. Otherwise, the 7069 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 7070 * Server for State Change Request (SCR). 7071 **/ 7072 static void 7073 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7074 struct lpfc_iocbq *rspiocb) 7075 { 7076 struct lpfc_vport *vport = cmdiocb->vport; 7077 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7078 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 7079 struct lpfc_nodelist *np; 7080 struct lpfc_nodelist *next_np; 7081 IOCB_t *irsp = &rspiocb->iocb; 7082 struct lpfc_iocbq *piocb; 7083 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 7084 struct serv_parm *sp; 7085 uint8_t fabric_param_changed; 7086 7087 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7088 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 7089 irsp->ulpStatus, irsp->un.ulpWord[4], 7090 vport->fc_prevDID); 7091 /* Since all FDISCs are being single threaded, we 7092 * must reset the discovery timer for ALL vports 7093 * waiting to send FDISC when one completes. 7094 */ 7095 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 7096 lpfc_set_disctmo(piocb->vport); 7097 } 7098 7099 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7100 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 7101 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 7102 7103 if (irsp->ulpStatus) { 7104 7105 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 7106 lpfc_retry_pport_discovery(phba); 7107 goto out; 7108 } 7109 7110 /* Check for retry */ 7111 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 7112 goto out; 7113 /* FDISC failed */ 7114 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7115 "0126 FDISC failed. (%d/%d)\n", 7116 irsp->ulpStatus, irsp->un.ulpWord[4]); 7117 goto fdisc_failed; 7118 } 7119 spin_lock_irq(shost->host_lock); 7120 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 7121 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 7122 vport->fc_flag |= FC_FABRIC; 7123 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 7124 vport->fc_flag |= FC_PUBLIC_LOOP; 7125 spin_unlock_irq(shost->host_lock); 7126 7127 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 7128 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 7129 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 7130 sp = prsp->virt + sizeof(uint32_t); 7131 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 7132 memcpy(&vport->fabric_portname, &sp->portName, 7133 sizeof(struct lpfc_name)); 7134 memcpy(&vport->fabric_nodename, &sp->nodeName, 7135 sizeof(struct lpfc_name)); 7136 if (fabric_param_changed && 7137 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7138 /* If our NportID changed, we need to ensure all 7139 * remaining NPORTs get unreg_login'ed so we can 7140 * issue unreg_vpi. 7141 */ 7142 list_for_each_entry_safe(np, next_np, 7143 &vport->fc_nodes, nlp_listp) { 7144 if (!NLP_CHK_NODE_ACT(ndlp) || 7145 (np->nlp_state != NLP_STE_NPR_NODE) || 7146 !(np->nlp_flag & NLP_NPR_ADISC)) 7147 continue; 7148 spin_lock_irq(shost->host_lock); 7149 np->nlp_flag &= ~NLP_NPR_ADISC; 7150 spin_unlock_irq(shost->host_lock); 7151 lpfc_unreg_rpi(vport, np); 7152 } 7153 lpfc_cleanup_pending_mbox(vport); 7154 7155 if (phba->sli_rev == LPFC_SLI_REV4) 7156 lpfc_sli4_unreg_all_rpis(vport); 7157 7158 lpfc_mbx_unreg_vpi(vport); 7159 spin_lock_irq(shost->host_lock); 7160 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 7161 if (phba->sli_rev == LPFC_SLI_REV4) 7162 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 7163 else 7164 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 7165 spin_unlock_irq(shost->host_lock); 7166 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 7167 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 7168 /* 7169 * Driver needs to re-reg VPI in order for f/w 7170 * to update the MAC address. 7171 */ 7172 lpfc_register_new_vport(phba, vport, ndlp); 7173 goto out; 7174 } 7175 7176 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 7177 lpfc_issue_init_vpi(vport); 7178 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 7179 lpfc_register_new_vport(phba, vport, ndlp); 7180 else 7181 lpfc_do_scr_ns_plogi(phba, vport); 7182 goto out; 7183 fdisc_failed: 7184 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7185 /* Cancel discovery timer */ 7186 lpfc_can_disctmo(vport); 7187 lpfc_nlp_put(ndlp); 7188 out: 7189 lpfc_els_free_iocb(phba, cmdiocb); 7190 } 7191 7192 /** 7193 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 7194 * @vport: pointer to a virtual N_Port data structure. 7195 * @ndlp: pointer to a node-list data structure. 7196 * @retry: number of retries to the command IOCB. 7197 * 7198 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 7199 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 7200 * routine to issue the IOCB, which makes sure only one outstanding fabric 7201 * IOCB will be sent off HBA at any given time. 7202 * 7203 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7204 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7205 * will be stored into the context1 field of the IOCB for the completion 7206 * callback function to the FDISC ELS command. 7207 * 7208 * Return code 7209 * 0 - Successfully issued fdisc iocb command 7210 * 1 - Failed to issue fdisc iocb command 7211 **/ 7212 static int 7213 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7214 uint8_t retry) 7215 { 7216 struct lpfc_hba *phba = vport->phba; 7217 IOCB_t *icmd; 7218 struct lpfc_iocbq *elsiocb; 7219 struct serv_parm *sp; 7220 uint8_t *pcmd; 7221 uint16_t cmdsize; 7222 int did = ndlp->nlp_DID; 7223 int rc; 7224 7225 vport->port_state = LPFC_FDISC; 7226 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 7227 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 7228 ELS_CMD_FDISC); 7229 if (!elsiocb) { 7230 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7231 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7232 "0255 Issue FDISC: no IOCB\n"); 7233 return 1; 7234 } 7235 7236 icmd = &elsiocb->iocb; 7237 icmd->un.elsreq64.myID = 0; 7238 icmd->un.elsreq64.fl = 1; 7239 7240 if ((phba->sli_rev == LPFC_SLI_REV4) && 7241 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7242 LPFC_SLI_INTF_IF_TYPE_0)) { 7243 /* FDISC needs to be 1 for WQE VPI */ 7244 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7245 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7246 /* Set the ulpContext to the vpi */ 7247 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi]; 7248 } else { 7249 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 7250 icmd->ulpCt_h = 1; 7251 icmd->ulpCt_l = 0; 7252 } 7253 7254 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7255 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 7256 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 7257 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 7258 sp = (struct serv_parm *) pcmd; 7259 /* Setup CSPs accordingly for Fabric */ 7260 sp->cmn.e_d_tov = 0; 7261 sp->cmn.w2.r_a_tov = 0; 7262 sp->cls1.classValid = 0; 7263 sp->cls2.seqDelivery = 1; 7264 sp->cls3.seqDelivery = 1; 7265 7266 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 7267 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 7268 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 7269 pcmd += sizeof(uint32_t); /* Port Name */ 7270 memcpy(pcmd, &vport->fc_portname, 8); 7271 pcmd += sizeof(uint32_t); /* Node Name */ 7272 pcmd += sizeof(uint32_t); /* Node Name */ 7273 memcpy(pcmd, &vport->fc_nodename, 8); 7274 7275 lpfc_set_disctmo(vport); 7276 7277 phba->fc_stat.elsXmitFDISC++; 7278 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 7279 7280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7281 "Issue FDISC: did:x%x", 7282 did, 0, 0); 7283 7284 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 7285 if (rc == IOCB_ERROR) { 7286 lpfc_els_free_iocb(phba, elsiocb); 7287 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 7288 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7289 "0256 Issue FDISC: Cannot send IOCB\n"); 7290 return 1; 7291 } 7292 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 7293 return 0; 7294 } 7295 7296 /** 7297 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 7298 * @phba: pointer to lpfc hba data structure. 7299 * @cmdiocb: pointer to lpfc command iocb data structure. 7300 * @rspiocb: pointer to lpfc response iocb data structure. 7301 * 7302 * This routine is the completion callback function to the issuing of a LOGO 7303 * ELS command off a vport. It frees the command IOCB and then decrement the 7304 * reference count held on ndlp for this completion function, indicating that 7305 * the reference to the ndlp is no long needed. Note that the 7306 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 7307 * callback function and an additional explicit ndlp reference decrementation 7308 * will trigger the actual release of the ndlp. 7309 **/ 7310 static void 7311 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7312 struct lpfc_iocbq *rspiocb) 7313 { 7314 struct lpfc_vport *vport = cmdiocb->vport; 7315 IOCB_t *irsp; 7316 struct lpfc_nodelist *ndlp; 7317 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7318 7319 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 7320 irsp = &rspiocb->iocb; 7321 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7322 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 7323 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 7324 7325 lpfc_els_free_iocb(phba, cmdiocb); 7326 vport->unreg_vpi_cmpl = VPORT_ERROR; 7327 7328 /* Trigger the release of the ndlp after logo */ 7329 lpfc_nlp_put(ndlp); 7330 7331 /* NPIV LOGO completes to NPort <nlp_DID> */ 7332 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7333 "2928 NPIV LOGO completes to NPort x%x " 7334 "Data: x%x x%x x%x x%x\n", 7335 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 7336 irsp->ulpTimeout, vport->num_disc_nodes); 7337 7338 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 7339 spin_lock_irq(shost->host_lock); 7340 vport->fc_flag &= ~FC_FABRIC; 7341 spin_unlock_irq(shost->host_lock); 7342 } 7343 } 7344 7345 /** 7346 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 7347 * @vport: pointer to a virtual N_Port data structure. 7348 * @ndlp: pointer to a node-list data structure. 7349 * 7350 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 7351 * 7352 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7353 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7354 * will be stored into the context1 field of the IOCB for the completion 7355 * callback function to the LOGO ELS command. 7356 * 7357 * Return codes 7358 * 0 - Successfully issued logo off the @vport 7359 * 1 - Failed to issue logo off the @vport 7360 **/ 7361 int 7362 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7363 { 7364 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7365 struct lpfc_hba *phba = vport->phba; 7366 IOCB_t *icmd; 7367 struct lpfc_iocbq *elsiocb; 7368 uint8_t *pcmd; 7369 uint16_t cmdsize; 7370 7371 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 7372 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 7373 ELS_CMD_LOGO); 7374 if (!elsiocb) 7375 return 1; 7376 7377 icmd = &elsiocb->iocb; 7378 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7379 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 7380 pcmd += sizeof(uint32_t); 7381 7382 /* Fill in LOGO payload */ 7383 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 7384 pcmd += sizeof(uint32_t); 7385 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 7386 7387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7388 "Issue LOGO npiv did:x%x flg:x%x", 7389 ndlp->nlp_DID, ndlp->nlp_flag, 0); 7390 7391 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 7392 spin_lock_irq(shost->host_lock); 7393 ndlp->nlp_flag |= NLP_LOGO_SND; 7394 spin_unlock_irq(shost->host_lock); 7395 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7396 IOCB_ERROR) { 7397 spin_lock_irq(shost->host_lock); 7398 ndlp->nlp_flag &= ~NLP_LOGO_SND; 7399 spin_unlock_irq(shost->host_lock); 7400 lpfc_els_free_iocb(phba, elsiocb); 7401 return 1; 7402 } 7403 return 0; 7404 } 7405 7406 /** 7407 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 7408 * @ptr: holder for the timer function associated data. 7409 * 7410 * This routine is invoked by the fabric iocb block timer after 7411 * timeout. It posts the fabric iocb block timeout event by setting the 7412 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 7413 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 7414 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 7415 * posted event WORKER_FABRIC_BLOCK_TMO. 7416 **/ 7417 void 7418 lpfc_fabric_block_timeout(unsigned long ptr) 7419 { 7420 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7421 unsigned long iflags; 7422 uint32_t tmo_posted; 7423 7424 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 7425 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 7426 if (!tmo_posted) 7427 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 7428 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 7429 7430 if (!tmo_posted) 7431 lpfc_worker_wake_up(phba); 7432 return; 7433 } 7434 7435 /** 7436 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 7437 * @phba: pointer to lpfc hba data structure. 7438 * 7439 * This routine issues one fabric iocb from the driver internal list to 7440 * the HBA. It first checks whether it's ready to issue one fabric iocb to 7441 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 7442 * remove one pending fabric iocb from the driver internal list and invokes 7443 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 7444 **/ 7445 static void 7446 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 7447 { 7448 struct lpfc_iocbq *iocb; 7449 unsigned long iflags; 7450 int ret; 7451 IOCB_t *cmd; 7452 7453 repeat: 7454 iocb = NULL; 7455 spin_lock_irqsave(&phba->hbalock, iflags); 7456 /* Post any pending iocb to the SLI layer */ 7457 if (atomic_read(&phba->fabric_iocb_count) == 0) { 7458 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 7459 list); 7460 if (iocb) 7461 /* Increment fabric iocb count to hold the position */ 7462 atomic_inc(&phba->fabric_iocb_count); 7463 } 7464 spin_unlock_irqrestore(&phba->hbalock, iflags); 7465 if (iocb) { 7466 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7467 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7468 iocb->iocb_flag |= LPFC_IO_FABRIC; 7469 7470 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7471 "Fabric sched1: ste:x%x", 7472 iocb->vport->port_state, 0, 0); 7473 7474 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7475 7476 if (ret == IOCB_ERROR) { 7477 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7478 iocb->fabric_iocb_cmpl = NULL; 7479 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7480 cmd = &iocb->iocb; 7481 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 7482 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 7483 iocb->iocb_cmpl(phba, iocb, iocb); 7484 7485 atomic_dec(&phba->fabric_iocb_count); 7486 goto repeat; 7487 } 7488 } 7489 7490 return; 7491 } 7492 7493 /** 7494 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 7495 * @phba: pointer to lpfc hba data structure. 7496 * 7497 * This routine unblocks the issuing fabric iocb command. The function 7498 * will clear the fabric iocb block bit and then invoke the routine 7499 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 7500 * from the driver internal fabric iocb list. 7501 **/ 7502 void 7503 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 7504 { 7505 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7506 7507 lpfc_resume_fabric_iocbs(phba); 7508 return; 7509 } 7510 7511 /** 7512 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 7513 * @phba: pointer to lpfc hba data structure. 7514 * 7515 * This routine blocks the issuing fabric iocb for a specified amount of 7516 * time (currently 100 ms). This is done by set the fabric iocb block bit 7517 * and set up a timeout timer for 100ms. When the block bit is set, no more 7518 * fabric iocb will be issued out of the HBA. 7519 **/ 7520 static void 7521 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 7522 { 7523 int blocked; 7524 7525 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7526 /* Start a timer to unblock fabric iocbs after 100ms */ 7527 if (!blocked) 7528 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7529 7530 return; 7531 } 7532 7533 /** 7534 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 7535 * @phba: pointer to lpfc hba data structure. 7536 * @cmdiocb: pointer to lpfc command iocb data structure. 7537 * @rspiocb: pointer to lpfc response iocb data structure. 7538 * 7539 * This routine is the callback function that is put to the fabric iocb's 7540 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 7541 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 7542 * function first restores and invokes the original iocb's callback function 7543 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 7544 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 7545 **/ 7546 static void 7547 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7548 struct lpfc_iocbq *rspiocb) 7549 { 7550 struct ls_rjt stat; 7551 7552 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 7553 BUG(); 7554 7555 switch (rspiocb->iocb.ulpStatus) { 7556 case IOSTAT_NPORT_RJT: 7557 case IOSTAT_FABRIC_RJT: 7558 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 7559 lpfc_block_fabric_iocbs(phba); 7560 } 7561 break; 7562 7563 case IOSTAT_NPORT_BSY: 7564 case IOSTAT_FABRIC_BSY: 7565 lpfc_block_fabric_iocbs(phba); 7566 break; 7567 7568 case IOSTAT_LS_RJT: 7569 stat.un.lsRjtError = 7570 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 7571 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 7572 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 7573 lpfc_block_fabric_iocbs(phba); 7574 break; 7575 } 7576 7577 if (atomic_read(&phba->fabric_iocb_count) == 0) 7578 BUG(); 7579 7580 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 7581 cmdiocb->fabric_iocb_cmpl = NULL; 7582 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 7583 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 7584 7585 atomic_dec(&phba->fabric_iocb_count); 7586 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 7587 /* Post any pending iocbs to HBA */ 7588 lpfc_resume_fabric_iocbs(phba); 7589 } 7590 } 7591 7592 /** 7593 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 7594 * @phba: pointer to lpfc hba data structure. 7595 * @iocb: pointer to lpfc command iocb data structure. 7596 * 7597 * This routine is used as the top-level API for issuing a fabric iocb command 7598 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 7599 * function makes sure that only one fabric bound iocb will be outstanding at 7600 * any given time. As such, this function will first check to see whether there 7601 * is already an outstanding fabric iocb on the wire. If so, it will put the 7602 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 7603 * issued later. Otherwise, it will issue the iocb on the wire and update the 7604 * fabric iocb count it indicate that there is one fabric iocb on the wire. 7605 * 7606 * Note, this implementation has a potential sending out fabric IOCBs out of 7607 * order. The problem is caused by the construction of the "ready" boolen does 7608 * not include the condition that the internal fabric IOCB list is empty. As 7609 * such, it is possible a fabric IOCB issued by this routine might be "jump" 7610 * ahead of the fabric IOCBs in the internal list. 7611 * 7612 * Return code 7613 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 7614 * IOCB_ERROR - failed to issue fabric iocb 7615 **/ 7616 static int 7617 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 7618 { 7619 unsigned long iflags; 7620 int ready; 7621 int ret; 7622 7623 if (atomic_read(&phba->fabric_iocb_count) > 1) 7624 BUG(); 7625 7626 spin_lock_irqsave(&phba->hbalock, iflags); 7627 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 7628 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7629 7630 if (ready) 7631 /* Increment fabric iocb count to hold the position */ 7632 atomic_inc(&phba->fabric_iocb_count); 7633 spin_unlock_irqrestore(&phba->hbalock, iflags); 7634 if (ready) { 7635 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 7636 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 7637 iocb->iocb_flag |= LPFC_IO_FABRIC; 7638 7639 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 7640 "Fabric sched2: ste:x%x", 7641 iocb->vport->port_state, 0, 0); 7642 7643 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 7644 7645 if (ret == IOCB_ERROR) { 7646 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 7647 iocb->fabric_iocb_cmpl = NULL; 7648 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 7649 atomic_dec(&phba->fabric_iocb_count); 7650 } 7651 } else { 7652 spin_lock_irqsave(&phba->hbalock, iflags); 7653 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 7654 spin_unlock_irqrestore(&phba->hbalock, iflags); 7655 ret = IOCB_SUCCESS; 7656 } 7657 return ret; 7658 } 7659 7660 /** 7661 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 7662 * @vport: pointer to a virtual N_Port data structure. 7663 * 7664 * This routine aborts all the IOCBs associated with a @vport from the 7665 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7666 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7667 * list, removes each IOCB associated with the @vport off the list, set the 7668 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7669 * associated with the IOCB. 7670 **/ 7671 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 7672 { 7673 LIST_HEAD(completions); 7674 struct lpfc_hba *phba = vport->phba; 7675 struct lpfc_iocbq *tmp_iocb, *piocb; 7676 7677 spin_lock_irq(&phba->hbalock); 7678 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7679 list) { 7680 7681 if (piocb->vport != vport) 7682 continue; 7683 7684 list_move_tail(&piocb->list, &completions); 7685 } 7686 spin_unlock_irq(&phba->hbalock); 7687 7688 /* Cancel all the IOCBs from the completions list */ 7689 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7690 IOERR_SLI_ABORTED); 7691 } 7692 7693 /** 7694 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 7695 * @ndlp: pointer to a node-list data structure. 7696 * 7697 * This routine aborts all the IOCBs associated with an @ndlp from the 7698 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 7699 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 7700 * list, removes each IOCB associated with the @ndlp off the list, set the 7701 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 7702 * associated with the IOCB. 7703 **/ 7704 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 7705 { 7706 LIST_HEAD(completions); 7707 struct lpfc_hba *phba = ndlp->phba; 7708 struct lpfc_iocbq *tmp_iocb, *piocb; 7709 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7710 7711 spin_lock_irq(&phba->hbalock); 7712 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 7713 list) { 7714 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 7715 7716 list_move_tail(&piocb->list, &completions); 7717 } 7718 } 7719 spin_unlock_irq(&phba->hbalock); 7720 7721 /* Cancel all the IOCBs from the completions list */ 7722 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7723 IOERR_SLI_ABORTED); 7724 } 7725 7726 /** 7727 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 7728 * @phba: pointer to lpfc hba data structure. 7729 * 7730 * This routine aborts all the IOCBs currently on the driver internal 7731 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 7732 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 7733 * list, removes IOCBs off the list, set the status feild to 7734 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 7735 * the IOCB. 7736 **/ 7737 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 7738 { 7739 LIST_HEAD(completions); 7740 7741 spin_lock_irq(&phba->hbalock); 7742 list_splice_init(&phba->fabric_iocb_list, &completions); 7743 spin_unlock_irq(&phba->hbalock); 7744 7745 /* Cancel all the IOCBs from the completions list */ 7746 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7747 IOERR_SLI_ABORTED); 7748 } 7749 7750 /** 7751 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 7752 * @vport: pointer to lpfc vport data structure. 7753 * 7754 * This routine is invoked by the vport cleanup for deletions and the cleanup 7755 * for an ndlp on removal. 7756 **/ 7757 void 7758 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 7759 { 7760 struct lpfc_hba *phba = vport->phba; 7761 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7762 unsigned long iflag = 0; 7763 7764 spin_lock_irqsave(&phba->hbalock, iflag); 7765 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7766 list_for_each_entry_safe(sglq_entry, sglq_next, 7767 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7768 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 7769 sglq_entry->ndlp = NULL; 7770 } 7771 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7772 spin_unlock_irqrestore(&phba->hbalock, iflag); 7773 return; 7774 } 7775 7776 /** 7777 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 7778 * @phba: pointer to lpfc hba data structure. 7779 * @axri: pointer to the els xri abort wcqe structure. 7780 * 7781 * This routine is invoked by the worker thread to process a SLI4 slow-path 7782 * ELS aborted xri. 7783 **/ 7784 void 7785 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 7786 struct sli4_wcqe_xri_aborted *axri) 7787 { 7788 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7789 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7790 7791 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7792 unsigned long iflag = 0; 7793 struct lpfc_nodelist *ndlp; 7794 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7795 7796 spin_lock_irqsave(&phba->hbalock, iflag); 7797 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 7798 list_for_each_entry_safe(sglq_entry, sglq_next, 7799 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 7800 if (sglq_entry->sli4_xritag == xri) { 7801 list_del(&sglq_entry->list); 7802 ndlp = sglq_entry->ndlp; 7803 sglq_entry->ndlp = NULL; 7804 list_add_tail(&sglq_entry->list, 7805 &phba->sli4_hba.lpfc_sgl_list); 7806 sglq_entry->state = SGL_FREED; 7807 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7808 spin_unlock_irqrestore(&phba->hbalock, iflag); 7809 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 7810 7811 /* Check if TXQ queue needs to be serviced */ 7812 if (pring->txq_cnt) 7813 lpfc_worker_wake_up(phba); 7814 return; 7815 } 7816 } 7817 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7818 sglq_entry = __lpfc_get_active_sglq(phba, xri); 7819 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7820 spin_unlock_irqrestore(&phba->hbalock, iflag); 7821 return; 7822 } 7823 sglq_entry->state = SGL_XRI_ABORTED; 7824 spin_unlock_irqrestore(&phba->hbalock, iflag); 7825 return; 7826 } 7827