1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 35 #include "lpfc_hw4.h" 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_sli4.h" 39 #include "lpfc_nl.h" 40 #include "lpfc_disc.h" 41 #include "lpfc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc_nvme.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_debugfs.h" 48 49 50 /* Called to verify a rcv'ed ADISC was intended for us. */ 51 static int 52 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 53 struct lpfc_name *nn, struct lpfc_name *pn) 54 { 55 /* First, we MUST have a RPI registered */ 56 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) 57 return 0; 58 59 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 60 * table entry for that node. 61 */ 62 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) 63 return 0; 64 65 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) 66 return 0; 67 68 /* we match, return success */ 69 return 1; 70 } 71 72 int 73 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 74 struct serv_parm *sp, uint32_t class, int flogi) 75 { 76 volatile struct serv_parm *hsp = &vport->fc_sparam; 77 uint16_t hsp_value, ssp_value = 0; 78 79 /* 80 * The receive data field size and buffer-to-buffer receive data field 81 * size entries are 16 bits but are represented as two 8-bit fields in 82 * the driver data structure to account for rsvd bits and other control 83 * bits. Reconstruct and compare the fields as a 16-bit values before 84 * correcting the byte values. 85 */ 86 if (sp->cls1.classValid) { 87 if (!flogi) { 88 hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) | 89 hsp->cls1.rcvDataSizeLsb); 90 ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) | 91 sp->cls1.rcvDataSizeLsb); 92 if (!ssp_value) 93 goto bad_service_param; 94 if (ssp_value > hsp_value) { 95 sp->cls1.rcvDataSizeLsb = 96 hsp->cls1.rcvDataSizeLsb; 97 sp->cls1.rcvDataSizeMsb = 98 hsp->cls1.rcvDataSizeMsb; 99 } 100 } 101 } else if (class == CLASS1) 102 goto bad_service_param; 103 if (sp->cls2.classValid) { 104 if (!flogi) { 105 hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) | 106 hsp->cls2.rcvDataSizeLsb); 107 ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) | 108 sp->cls2.rcvDataSizeLsb); 109 if (!ssp_value) 110 goto bad_service_param; 111 if (ssp_value > hsp_value) { 112 sp->cls2.rcvDataSizeLsb = 113 hsp->cls2.rcvDataSizeLsb; 114 sp->cls2.rcvDataSizeMsb = 115 hsp->cls2.rcvDataSizeMsb; 116 } 117 } 118 } else if (class == CLASS2) 119 goto bad_service_param; 120 if (sp->cls3.classValid) { 121 if (!flogi) { 122 hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) | 123 hsp->cls3.rcvDataSizeLsb); 124 ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) | 125 sp->cls3.rcvDataSizeLsb); 126 if (!ssp_value) 127 goto bad_service_param; 128 if (ssp_value > hsp_value) { 129 sp->cls3.rcvDataSizeLsb = 130 hsp->cls3.rcvDataSizeLsb; 131 sp->cls3.rcvDataSizeMsb = 132 hsp->cls3.rcvDataSizeMsb; 133 } 134 } 135 } else if (class == CLASS3) 136 goto bad_service_param; 137 138 /* 139 * Preserve the upper four bits of the MSB from the PLOGI response. 140 * These bits contain the Buffer-to-Buffer State Change Number 141 * from the target and need to be passed to the FW. 142 */ 143 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 144 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 145 if (ssp_value > hsp_value) { 146 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 147 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 148 (hsp->cmn.bbRcvSizeMsb & 0x0F); 149 } 150 151 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 152 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 153 return 1; 154 bad_service_param: 155 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 156 "0207 Device %x " 157 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 158 "invalid service parameters. Ignoring device.\n", 159 ndlp->nlp_DID, 160 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], 161 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], 162 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], 163 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); 164 return 0; 165 } 166 167 static void * 168 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 169 struct lpfc_iocbq *rspiocb) 170 { 171 struct lpfc_dmabuf *pcmd, *prsp; 172 uint32_t *lp; 173 void *ptr = NULL; 174 IOCB_t *irsp; 175 176 irsp = &rspiocb->iocb; 177 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 178 179 /* For lpfc_els_abort, context2 could be zero'ed to delay 180 * freeing associated memory till after ABTS completes. 181 */ 182 if (pcmd) { 183 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 184 list); 185 if (prsp) { 186 lp = (uint32_t *) prsp->virt; 187 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 188 } 189 } else { 190 /* Force ulpStatus error since we are returning NULL ptr */ 191 if (!(irsp->ulpStatus)) { 192 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 193 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 194 } 195 ptr = NULL; 196 } 197 return ptr; 198 } 199 200 201 202 /* 203 * Free resources / clean up outstanding I/Os 204 * associated with a LPFC_NODELIST entry. This 205 * routine effectively results in a "software abort". 206 */ 207 void 208 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 209 { 210 LIST_HEAD(abort_list); 211 struct lpfc_sli_ring *pring; 212 struct lpfc_iocbq *iocb, *next_iocb; 213 214 pring = lpfc_phba_elsring(phba); 215 216 /* In case of error recovery path, we might have a NULL pring here */ 217 if (unlikely(!pring)) 218 return; 219 220 /* Abort outstanding I/O on NPort <nlp_DID> */ 221 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 222 "2819 Abort outstanding I/O on NPort x%x " 223 "Data: x%x x%x x%x\n", 224 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 225 ndlp->nlp_rpi); 226 /* Clean up all fabric IOs first.*/ 227 lpfc_fabric_abort_nport(ndlp); 228 229 /* 230 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list 231 * of all ELS IOs that need an ABTS. The IOs need to stay on the 232 * txcmplq so that the abort operation completes them successfully. 233 */ 234 spin_lock_irq(&phba->hbalock); 235 if (phba->sli_rev == LPFC_SLI_REV4) 236 spin_lock(&pring->ring_lock); 237 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 238 /* Add to abort_list on on NDLP match. */ 239 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) 240 list_add_tail(&iocb->dlist, &abort_list); 241 } 242 if (phba->sli_rev == LPFC_SLI_REV4) 243 spin_unlock(&pring->ring_lock); 244 spin_unlock_irq(&phba->hbalock); 245 246 /* Abort the targeted IOs and remove them from the abort list. */ 247 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { 248 spin_lock_irq(&phba->hbalock); 249 list_del_init(&iocb->dlist); 250 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 251 spin_unlock_irq(&phba->hbalock); 252 } 253 254 INIT_LIST_HEAD(&abort_list); 255 256 /* Now process the txq */ 257 spin_lock_irq(&phba->hbalock); 258 if (phba->sli_rev == LPFC_SLI_REV4) 259 spin_lock(&pring->ring_lock); 260 261 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 262 /* Check to see if iocb matches the nport we are looking for */ 263 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 264 list_del_init(&iocb->list); 265 list_add_tail(&iocb->list, &abort_list); 266 } 267 } 268 269 if (phba->sli_rev == LPFC_SLI_REV4) 270 spin_unlock(&pring->ring_lock); 271 spin_unlock_irq(&phba->hbalock); 272 273 /* Cancel all the IOCBs from the completions list */ 274 lpfc_sli_cancel_iocbs(phba, &abort_list, 275 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 276 277 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 278 } 279 280 /* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up 281 * @phba: pointer to lpfc hba data structure. 282 * @link_mbox: pointer to CONFIG_LINK mailbox object 283 * 284 * This routine is only called if we are SLI3, direct connect pt2pt 285 * mode and the remote NPort issues the PLOGI after link up. 286 */ 287 static void 288 lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) 289 { 290 LPFC_MBOXQ_t *login_mbox; 291 MAILBOX_t *mb = &link_mbox->u.mb; 292 struct lpfc_iocbq *save_iocb; 293 struct lpfc_nodelist *ndlp; 294 int rc; 295 296 ndlp = link_mbox->ctx_ndlp; 297 login_mbox = link_mbox->context3; 298 save_iocb = login_mbox->context3; 299 link_mbox->context3 = NULL; 300 login_mbox->context3 = NULL; 301 302 /* Check for CONFIG_LINK error */ 303 if (mb->mbxStatus) { 304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 305 "4575 CONFIG_LINK fails pt2pt discovery: %x\n", 306 mb->mbxStatus); 307 mempool_free(login_mbox, phba->mbox_mem_pool); 308 mempool_free(link_mbox, phba->mbox_mem_pool); 309 kfree(save_iocb); 310 return; 311 } 312 313 /* Now that CONFIG_LINK completed, and our SID is configured, 314 * we can now proceed with sending the PLOGI ACC. 315 */ 316 rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, 317 save_iocb, ndlp, login_mbox); 318 if (rc) { 319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 320 "4576 PLOGI ACC fails pt2pt discovery: %x\n", 321 rc); 322 mempool_free(login_mbox, phba->mbox_mem_pool); 323 } 324 325 mempool_free(link_mbox, phba->mbox_mem_pool); 326 kfree(save_iocb); 327 } 328 329 /** 330 * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler 331 * @phba: Pointer to HBA context object. 332 * @pmb: Pointer to mailbox object. 333 * 334 * This function provides the unreg rpi mailbox completion handler for a tgt. 335 * The routine frees the memory resources associated with the completed 336 * mailbox command and transmits the ELS ACC. 337 * 338 * This routine is only called if we are SLI4, acting in target 339 * mode and the remote NPort issues the PLOGI after link up. 340 **/ 341 static void 342 lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 343 { 344 struct lpfc_vport *vport = pmb->vport; 345 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 346 LPFC_MBOXQ_t *mbox = pmb->context3; 347 struct lpfc_iocbq *piocb = NULL; 348 int rc; 349 350 if (mbox) { 351 pmb->context3 = NULL; 352 piocb = mbox->context3; 353 mbox->context3 = NULL; 354 } 355 356 /* 357 * Complete the unreg rpi mbx request, and update flags. 358 * This will also restart any deferred events. 359 */ 360 lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); 361 362 if (!piocb) { 363 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 364 "4578 PLOGI ACC fail\n"); 365 if (mbox) 366 mempool_free(mbox, phba->mbox_mem_pool); 367 return; 368 } 369 370 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); 371 if (rc) { 372 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 373 "4579 PLOGI ACC fail %x\n", rc); 374 if (mbox) 375 mempool_free(mbox, phba->mbox_mem_pool); 376 } 377 kfree(piocb); 378 } 379 380 static int 381 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 382 struct lpfc_iocbq *cmdiocb) 383 { 384 struct lpfc_hba *phba = vport->phba; 385 struct lpfc_dmabuf *pcmd; 386 uint64_t nlp_portwwn = 0; 387 uint32_t *lp; 388 IOCB_t *icmd; 389 struct serv_parm *sp; 390 uint32_t ed_tov; 391 LPFC_MBOXQ_t *link_mbox; 392 LPFC_MBOXQ_t *login_mbox; 393 struct lpfc_iocbq *save_iocb; 394 struct ls_rjt stat; 395 uint32_t vid, flag; 396 u16 rpi; 397 int rc, defer_acc; 398 399 memset(&stat, 0, sizeof (struct ls_rjt)); 400 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 401 lp = (uint32_t *) pcmd->virt; 402 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 403 if (wwn_to_u64(sp->portName.u.wwn) == 0) { 404 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 405 "0140 PLOGI Reject: invalid nname\n"); 406 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 407 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; 408 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 409 NULL); 410 return 0; 411 } 412 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { 413 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 414 "0141 PLOGI Reject: invalid pname\n"); 415 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 416 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; 417 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 418 NULL); 419 return 0; 420 } 421 422 nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn); 423 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) { 424 /* Reject this request because invalid parameters */ 425 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 426 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 427 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 428 NULL); 429 return 0; 430 } 431 icmd = &cmdiocb->iocb; 432 433 /* PLOGI chkparm OK */ 434 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 435 "0114 PLOGI chkparm OK Data: x%x x%x x%x " 436 "x%x x%x x%x\n", 437 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 438 ndlp->nlp_rpi, vport->port_state, 439 vport->fc_flag); 440 441 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 442 ndlp->nlp_fcp_info |= CLASS2; 443 else 444 ndlp->nlp_fcp_info |= CLASS3; 445 446 defer_acc = 0; 447 ndlp->nlp_class_sup = 0; 448 if (sp->cls1.classValid) 449 ndlp->nlp_class_sup |= FC_COS_CLASS1; 450 if (sp->cls2.classValid) 451 ndlp->nlp_class_sup |= FC_COS_CLASS2; 452 if (sp->cls3.classValid) 453 ndlp->nlp_class_sup |= FC_COS_CLASS3; 454 if (sp->cls4.classValid) 455 ndlp->nlp_class_sup |= FC_COS_CLASS4; 456 ndlp->nlp_maxframe = 457 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 458 /* if already logged in, do implicit logout */ 459 switch (ndlp->nlp_state) { 460 case NLP_STE_NPR_NODE: 461 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 462 break; 463 fallthrough; 464 case NLP_STE_REG_LOGIN_ISSUE: 465 case NLP_STE_PRLI_ISSUE: 466 case NLP_STE_UNMAPPED_NODE: 467 case NLP_STE_MAPPED_NODE: 468 /* For initiators, lpfc_plogi_confirm_nport skips fabric did. 469 * For target mode, execute implicit logo. 470 * Fabric nodes go into NPR. 471 */ 472 if (!(ndlp->nlp_type & NLP_FABRIC) && 473 !(phba->nvmet_support)) { 474 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, 475 ndlp, NULL); 476 return 1; 477 } 478 if (nlp_portwwn != 0 && 479 nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) 480 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 481 "0143 PLOGI recv'd from DID: x%x " 482 "WWPN changed: old %llx new %llx\n", 483 ndlp->nlp_DID, 484 (unsigned long long)nlp_portwwn, 485 (unsigned long long) 486 wwn_to_u64(sp->portName.u.wwn)); 487 488 /* Notify transport of connectivity loss to trigger cleanup. */ 489 if (phba->nvmet_support && 490 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 491 lpfc_nvmet_invalidate_host(phba, ndlp); 492 493 ndlp->nlp_prev_state = ndlp->nlp_state; 494 /* rport needs to be unregistered first */ 495 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 496 break; 497 } 498 499 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 500 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 501 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 502 ndlp->nlp_flag &= ~NLP_FIRSTBURST; 503 504 login_mbox = NULL; 505 link_mbox = NULL; 506 save_iocb = NULL; 507 508 /* Check for Nport to NPort pt2pt protocol */ 509 if ((vport->fc_flag & FC_PT2PT) && 510 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 511 /* rcv'ed PLOGI decides what our NPortId will be */ 512 vport->fc_myDID = icmd->un.rcvels.parmRo; 513 514 ed_tov = be32_to_cpu(sp->cmn.e_d_tov); 515 if (sp->cmn.edtovResolution) { 516 /* E_D_TOV ticks are in nanoseconds */ 517 ed_tov = (phba->fc_edtov + 999999) / 1000000; 518 } 519 520 /* 521 * For pt-to-pt, use the larger EDTOV 522 * RATOV = 2 * EDTOV 523 */ 524 if (ed_tov > phba->fc_edtov) 525 phba->fc_edtov = ed_tov; 526 phba->fc_ratov = (2 * phba->fc_edtov) / 1000; 527 528 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 529 530 /* Issue config_link / reg_vfi to account for updated TOV's */ 531 532 if (phba->sli_rev == LPFC_SLI_REV4) 533 lpfc_issue_reg_vfi(vport); 534 else { 535 defer_acc = 1; 536 link_mbox = mempool_alloc(phba->mbox_mem_pool, 537 GFP_KERNEL); 538 if (!link_mbox) 539 goto out; 540 lpfc_config_link(phba, link_mbox); 541 link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc; 542 link_mbox->vport = vport; 543 link_mbox->ctx_ndlp = ndlp; 544 545 save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); 546 if (!save_iocb) 547 goto out; 548 /* Save info from cmd IOCB used in rsp */ 549 memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, 550 sizeof(struct lpfc_iocbq)); 551 } 552 553 lpfc_can_disctmo(vport); 554 } 555 556 ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; 557 if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && 558 sp->cmn.valid_vendor_ver_level) { 559 vid = be32_to_cpu(sp->un.vv.vid); 560 flag = be32_to_cpu(sp->un.vv.flags); 561 if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) 562 ndlp->nlp_flag |= NLP_SUPPRESS_RSP; 563 } 564 565 login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 566 if (!login_mbox) 567 goto out; 568 569 /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ 570 if (phba->nvmet_support && !defer_acc) { 571 link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 572 if (!link_mbox) 573 goto out; 574 575 /* As unique identifiers such as iotag would be overwritten 576 * with those from the cmdiocb, allocate separate temporary 577 * storage for the copy. 578 */ 579 save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); 580 if (!save_iocb) 581 goto out; 582 583 /* Unreg RPI is required for SLI4. */ 584 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 585 lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); 586 link_mbox->vport = vport; 587 link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 588 if (!link_mbox->ctx_ndlp) 589 goto out; 590 591 link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; 592 593 if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 594 (!(vport->fc_flag & FC_OFFLINE_MODE))) 595 ndlp->nlp_flag |= NLP_UNREG_INP; 596 597 /* Save info from cmd IOCB used in rsp */ 598 memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); 599 600 /* Delay sending ACC till unreg RPI completes. */ 601 defer_acc = 1; 602 } else if (phba->sli_rev == LPFC_SLI_REV4) 603 lpfc_unreg_rpi(vport, ndlp); 604 605 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, 606 (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); 607 if (rc) 608 goto out; 609 610 /* ACC PLOGI rsp command needs to execute first, 611 * queue this login_mbox command to be processed later. 612 */ 613 login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 614 /* 615 * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox 616 * command issued in lpfc_cmpl_els_acc(). 617 */ 618 login_mbox->vport = vport; 619 spin_lock_irq(&ndlp->lock); 620 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 621 spin_unlock_irq(&ndlp->lock); 622 623 /* 624 * If there is an outstanding PLOGI issued, abort it before 625 * sending ACC rsp for received PLOGI. If pending plogi 626 * is not canceled here, the plogi will be rejected by 627 * remote port and will be retried. On a configuration with 628 * single discovery thread, this will cause a huge delay in 629 * discovery. Also this will cause multiple state machines 630 * running in parallel for this node. 631 * This only applies to a fabric environment. 632 */ 633 if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && 634 (vport->fc_flag & FC_FABRIC)) { 635 /* software abort outstanding PLOGI */ 636 lpfc_els_abort(phba, ndlp); 637 } 638 639 if ((vport->port_type == LPFC_NPIV_PORT && 640 vport->cfg_restrict_login)) { 641 642 /* no deferred ACC */ 643 kfree(save_iocb); 644 645 /* In order to preserve RPIs, we want to cleanup 646 * the default RPI the firmware created to rcv 647 * this ELS request. The only way to do this is 648 * to register, then unregister the RPI. 649 */ 650 spin_lock_irq(&ndlp->lock); 651 ndlp->nlp_flag |= NLP_RM_DFLT_RPI; 652 spin_unlock_irq(&ndlp->lock); 653 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 654 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 655 rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 656 ndlp, login_mbox); 657 if (rc) 658 mempool_free(login_mbox, phba->mbox_mem_pool); 659 return 1; 660 } 661 if (defer_acc) { 662 /* So the order here should be: 663 * SLI3 pt2pt 664 * Issue CONFIG_LINK mbox 665 * CONFIG_LINK cmpl 666 * SLI4 tgt 667 * Issue UNREG RPI mbx 668 * UNREG RPI cmpl 669 * Issue PLOGI ACC 670 * PLOGI ACC cmpl 671 * Issue REG_LOGIN mbox 672 */ 673 674 /* Save the REG_LOGIN mbox for and rcv IOCB copy later */ 675 link_mbox->context3 = login_mbox; 676 login_mbox->context3 = save_iocb; 677 678 /* Start the ball rolling by issuing CONFIG_LINK here */ 679 rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); 680 if (rc == MBX_NOT_FINISHED) 681 goto out; 682 return 1; 683 } 684 685 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox); 686 if (rc) 687 mempool_free(login_mbox, phba->mbox_mem_pool); 688 return 1; 689 out: 690 if (defer_acc) 691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 692 "4577 discovery failure: %p %p %p\n", 693 save_iocb, link_mbox, login_mbox); 694 kfree(save_iocb); 695 if (link_mbox) 696 mempool_free(link_mbox, phba->mbox_mem_pool); 697 if (login_mbox) 698 mempool_free(login_mbox, phba->mbox_mem_pool); 699 700 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 701 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 702 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 703 return 0; 704 } 705 706 /** 707 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine 708 * @phba: pointer to lpfc hba data structure. 709 * @mboxq: pointer to mailbox object 710 * 711 * This routine is invoked to issue a completion to a rcv'ed 712 * ADISC or PDISC after the paused RPI has been resumed. 713 **/ 714 static void 715 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 716 { 717 struct lpfc_vport *vport; 718 struct lpfc_iocbq *elsiocb; 719 struct lpfc_nodelist *ndlp; 720 uint32_t cmd; 721 722 elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf; 723 ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp; 724 vport = mboxq->vport; 725 cmd = elsiocb->drvrTimeout; 726 727 if (cmd == ELS_CMD_ADISC) { 728 lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp); 729 } else { 730 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb, 731 ndlp, NULL); 732 } 733 kfree(elsiocb); 734 mempool_free(mboxq, phba->mbox_mem_pool); 735 } 736 737 static int 738 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 739 struct lpfc_iocbq *cmdiocb) 740 { 741 struct lpfc_iocbq *elsiocb; 742 struct lpfc_dmabuf *pcmd; 743 struct serv_parm *sp; 744 struct lpfc_name *pnn, *ppn; 745 struct ls_rjt stat; 746 ADISC *ap; 747 IOCB_t *icmd; 748 uint32_t *lp; 749 uint32_t cmd; 750 751 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 752 lp = (uint32_t *) pcmd->virt; 753 754 cmd = *lp++; 755 if (cmd == ELS_CMD_ADISC) { 756 ap = (ADISC *) lp; 757 pnn = (struct lpfc_name *) & ap->nodeName; 758 ppn = (struct lpfc_name *) & ap->portName; 759 } else { 760 sp = (struct serv_parm *) lp; 761 pnn = (struct lpfc_name *) & sp->nodeName; 762 ppn = (struct lpfc_name *) & sp->portName; 763 } 764 765 icmd = &cmdiocb->iocb; 766 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 767 768 /* 769 * As soon as we send ACC, the remote NPort can 770 * start sending us data. Thus, for SLI4 we must 771 * resume the RPI before the ACC goes out. 772 */ 773 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 774 elsiocb = kmalloc(sizeof(struct lpfc_iocbq), 775 GFP_KERNEL); 776 if (elsiocb) { 777 778 /* Save info from cmd IOCB used in rsp */ 779 memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb, 780 sizeof(struct lpfc_iocbq)); 781 782 /* Save the ELS cmd */ 783 elsiocb->drvrTimeout = cmd; 784 785 lpfc_sli4_resume_rpi(ndlp, 786 lpfc_mbx_cmpl_resume_rpi, elsiocb); 787 goto out; 788 } 789 } 790 791 if (cmd == ELS_CMD_ADISC) { 792 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 793 } else { 794 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, 795 ndlp, NULL); 796 } 797 out: 798 /* If we are authenticated, move to the proper state. 799 * It is possible an ADISC arrived and the remote nport 800 * is already in MAPPED or UNMAPPED state. Catch this 801 * condition and don't set the nlp_state again because 802 * it causes an unnecessary transport unregister/register. 803 */ 804 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { 805 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) 806 lpfc_nlp_set_state(vport, ndlp, 807 NLP_STE_MAPPED_NODE); 808 } 809 810 return 1; 811 } 812 /* Reject this request because invalid parameters */ 813 stat.un.b.lsRjtRsvd0 = 0; 814 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 815 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 816 stat.un.b.vendorUnique = 0; 817 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 818 819 /* 1 sec timeout */ 820 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 821 822 spin_lock_irq(&ndlp->lock); 823 ndlp->nlp_flag |= NLP_DELAY_TMO; 824 spin_unlock_irq(&ndlp->lock); 825 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 826 ndlp->nlp_prev_state = ndlp->nlp_state; 827 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 828 return 0; 829 } 830 831 static int 832 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 833 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 834 { 835 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 836 struct lpfc_hba *phba = vport->phba; 837 struct lpfc_vport **vports; 838 int i, active_vlink_present = 0 ; 839 840 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 841 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 842 * PLOGIs during LOGO storms from a device. 843 */ 844 spin_lock_irq(&ndlp->lock); 845 ndlp->nlp_flag |= NLP_LOGO_ACC; 846 spin_unlock_irq(&ndlp->lock); 847 if (els_cmd == ELS_CMD_PRLO) 848 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 849 else 850 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 851 852 /* Notify transport of connectivity loss to trigger cleanup. */ 853 if (phba->nvmet_support && 854 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 855 lpfc_nvmet_invalidate_host(phba, ndlp); 856 857 if (ndlp->nlp_DID == Fabric_DID) { 858 if (vport->port_state <= LPFC_FDISC) 859 goto out; 860 lpfc_linkdown_port(vport); 861 spin_lock_irq(shost->host_lock); 862 vport->fc_flag |= FC_VPORT_LOGO_RCVD; 863 spin_unlock_irq(shost->host_lock); 864 vports = lpfc_create_vport_work_array(phba); 865 if (vports) { 866 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 867 i++) { 868 if ((!(vports[i]->fc_flag & 869 FC_VPORT_LOGO_RCVD)) && 870 (vports[i]->port_state > LPFC_FDISC)) { 871 active_vlink_present = 1; 872 break; 873 } 874 } 875 lpfc_destroy_vport_work_array(phba, vports); 876 } 877 878 /* 879 * Don't re-instantiate if vport is marked for deletion. 880 * If we are here first then vport_delete is going to wait 881 * for discovery to complete. 882 */ 883 if (!(vport->load_flag & FC_UNLOADING) && 884 active_vlink_present) { 885 /* 886 * If there are other active VLinks present, 887 * re-instantiate the Vlink using FDISC. 888 */ 889 mod_timer(&ndlp->nlp_delayfunc, 890 jiffies + msecs_to_jiffies(1000)); 891 spin_lock_irq(&ndlp->lock); 892 ndlp->nlp_flag |= NLP_DELAY_TMO; 893 spin_unlock_irq(&ndlp->lock); 894 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 895 vport->port_state = LPFC_FDISC; 896 } else { 897 spin_lock_irq(shost->host_lock); 898 phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG; 899 spin_unlock_irq(shost->host_lock); 900 lpfc_retry_pport_discovery(phba); 901 } 902 } else if ((!(ndlp->nlp_type & NLP_FABRIC) && 903 ((ndlp->nlp_type & NLP_FCP_TARGET) || 904 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 905 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 906 /* Only try to re-login if this is NOT a Fabric Node */ 907 mod_timer(&ndlp->nlp_delayfunc, 908 jiffies + msecs_to_jiffies(1000 * 1)); 909 spin_lock_irq(&ndlp->lock); 910 ndlp->nlp_flag |= NLP_DELAY_TMO; 911 spin_unlock_irq(&ndlp->lock); 912 913 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 914 } 915 out: 916 ndlp->nlp_prev_state = ndlp->nlp_state; 917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 918 919 spin_lock_irq(&ndlp->lock); 920 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 921 spin_unlock_irq(&ndlp->lock); 922 /* The driver has to wait until the ACC completes before it continues 923 * processing the LOGO. The action will resume in 924 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 925 * unreg_login, the driver waits so the ACC does not get aborted. 926 */ 927 return 0; 928 } 929 930 static uint32_t 931 lpfc_rcv_prli_support_check(struct lpfc_vport *vport, 932 struct lpfc_nodelist *ndlp, 933 struct lpfc_iocbq *cmdiocb) 934 { 935 struct ls_rjt stat; 936 uint32_t *payload; 937 uint32_t cmd; 938 939 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 940 cmd = *payload; 941 if (vport->phba->nvmet_support) { 942 /* Must be a NVME PRLI */ 943 if (cmd == ELS_CMD_PRLI) 944 goto out; 945 } else { 946 /* Initiator mode. */ 947 if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI)) 948 goto out; 949 } 950 return 1; 951 out: 952 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC, 953 "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " 954 "state x%x flags x%x\n", 955 cmd, ndlp->nlp_rpi, ndlp->nlp_state, 956 ndlp->nlp_flag); 957 memset(&stat, 0, sizeof(struct ls_rjt)); 958 stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED; 959 stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED; 960 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 961 ndlp, NULL); 962 return 0; 963 } 964 965 static void 966 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 967 struct lpfc_iocbq *cmdiocb) 968 { 969 struct lpfc_hba *phba = vport->phba; 970 struct lpfc_dmabuf *pcmd; 971 uint32_t *lp; 972 PRLI *npr; 973 struct fc_rport *rport = ndlp->rport; 974 u32 roles; 975 976 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 977 lp = (uint32_t *) pcmd->virt; 978 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 979 980 if ((npr->prliType == PRLI_FCP_TYPE) || 981 (npr->prliType == PRLI_NVME_TYPE)) { 982 if (npr->initiatorFunc) { 983 if (npr->prliType == PRLI_FCP_TYPE) 984 ndlp->nlp_type |= NLP_FCP_INITIATOR; 985 if (npr->prliType == PRLI_NVME_TYPE) 986 ndlp->nlp_type |= NLP_NVME_INITIATOR; 987 } 988 if (npr->targetFunc) { 989 if (npr->prliType == PRLI_FCP_TYPE) 990 ndlp->nlp_type |= NLP_FCP_TARGET; 991 if (npr->prliType == PRLI_NVME_TYPE) 992 ndlp->nlp_type |= NLP_NVME_TARGET; 993 if (npr->writeXferRdyDis) 994 ndlp->nlp_flag |= NLP_FIRSTBURST; 995 } 996 if (npr->Retry && ndlp->nlp_type & 997 (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) 998 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 999 1000 if (npr->Retry && phba->nsler && 1001 ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET)) 1002 ndlp->nlp_nvme_info |= NLP_NVME_NSLER; 1003 1004 1005 /* If this driver is in nvme target mode, set the ndlp's fc4 1006 * type to NVME provided the PRLI response claims NVME FC4 1007 * type. Target mode does not issue gft_id so doesn't get 1008 * the fc4 type set until now. 1009 */ 1010 if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) { 1011 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 1012 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1013 } 1014 if (npr->prliType == PRLI_FCP_TYPE) 1015 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1016 } 1017 if (rport) { 1018 /* We need to update the rport role values */ 1019 roles = FC_RPORT_ROLE_UNKNOWN; 1020 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 1021 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1022 if (ndlp->nlp_type & NLP_FCP_TARGET) 1023 roles |= FC_RPORT_ROLE_FCP_TARGET; 1024 1025 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 1026 "rport rolechg: role:x%x did:x%x flg:x%x", 1027 roles, ndlp->nlp_DID, ndlp->nlp_flag); 1028 1029 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 1030 fc_remote_port_rolechg(rport, roles); 1031 } 1032 } 1033 1034 static uint32_t 1035 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1036 { 1037 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 1038 spin_lock_irq(&ndlp->lock); 1039 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1040 spin_unlock_irq(&ndlp->lock); 1041 return 0; 1042 } 1043 1044 if (!(vport->fc_flag & FC_PT2PT)) { 1045 /* Check config parameter use-adisc or FCP-2 */ 1046 if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || 1047 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && 1048 (ndlp->nlp_type & NLP_FCP_TARGET)))) { 1049 spin_lock_irq(&ndlp->lock); 1050 ndlp->nlp_flag |= NLP_NPR_ADISC; 1051 spin_unlock_irq(&ndlp->lock); 1052 return 1; 1053 } 1054 } 1055 1056 spin_lock_irq(&ndlp->lock); 1057 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1058 spin_unlock_irq(&ndlp->lock); 1059 lpfc_unreg_rpi(vport, ndlp); 1060 return 0; 1061 } 1062 1063 /** 1064 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. 1065 * @phba : Pointer to lpfc_hba structure. 1066 * @vport: Pointer to lpfc_vport structure. 1067 * @ndlp: Pointer to lpfc_nodelist structure. 1068 * @rpi : rpi to be release. 1069 * 1070 * This function will send a unreg_login mailbox command to the firmware 1071 * to release a rpi. 1072 **/ 1073 static void 1074 lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, 1075 struct lpfc_nodelist *ndlp, uint16_t rpi) 1076 { 1077 LPFC_MBOXQ_t *pmb; 1078 int rc; 1079 1080 /* If there is already an UNREG in progress for this ndlp, 1081 * no need to queue up another one. 1082 */ 1083 if (ndlp->nlp_flag & NLP_UNREG_INP) { 1084 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1085 "1435 release_rpi SKIP UNREG x%x on " 1086 "NPort x%x deferred x%x flg x%x " 1087 "Data: x%px\n", 1088 ndlp->nlp_rpi, ndlp->nlp_DID, 1089 ndlp->nlp_defer_did, 1090 ndlp->nlp_flag, ndlp); 1091 return; 1092 } 1093 1094 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1095 GFP_KERNEL); 1096 if (!pmb) 1097 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1098 "2796 mailbox memory allocation failed \n"); 1099 else { 1100 lpfc_unreg_login(phba, vport->vpi, rpi, pmb); 1101 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1102 pmb->vport = vport; 1103 pmb->ctx_ndlp = lpfc_nlp_get(ndlp); 1104 if (!pmb->ctx_ndlp) { 1105 mempool_free(pmb, phba->mbox_mem_pool); 1106 return; 1107 } 1108 1109 if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 1110 (!(vport->fc_flag & FC_OFFLINE_MODE))) 1111 ndlp->nlp_flag |= NLP_UNREG_INP; 1112 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1114 "1437 release_rpi UNREG x%x " 1115 "on NPort x%x flg x%x\n", 1116 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); 1117 1118 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1119 if (rc == MBX_NOT_FINISHED) 1120 mempool_free(pmb, phba->mbox_mem_pool); 1121 } 1122 } 1123 1124 static uint32_t 1125 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1126 void *arg, uint32_t evt) 1127 { 1128 struct lpfc_hba *phba; 1129 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1130 uint16_t rpi; 1131 1132 phba = vport->phba; 1133 /* Release the RPI if reglogin completing */ 1134 if (!(phba->pport->load_flag & FC_UNLOADING) && 1135 (evt == NLP_EVT_CMPL_REG_LOGIN) && 1136 (!pmb->u.mb.mbxStatus)) { 1137 rpi = pmb->u.mb.un.varWords[0]; 1138 lpfc_release_rpi(phba, vport, ndlp, rpi); 1139 } 1140 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1141 "0271 Illegal State Transition: node x%x " 1142 "event x%x, state x%x Data: x%x x%x\n", 1143 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 1144 ndlp->nlp_flag); 1145 return ndlp->nlp_state; 1146 } 1147 1148 static uint32_t 1149 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1150 void *arg, uint32_t evt) 1151 { 1152 /* This transition is only legal if we previously 1153 * rcv'ed a PLOGI. Since we don't want 2 discovery threads 1154 * working on the same NPortID, do nothing for this thread 1155 * to stop it. 1156 */ 1157 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 1158 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1159 "0272 Illegal State Transition: node x%x " 1160 "event x%x, state x%x Data: x%x x%x\n", 1161 ndlp->nlp_DID, evt, ndlp->nlp_state, 1162 ndlp->nlp_rpi, ndlp->nlp_flag); 1163 } 1164 return ndlp->nlp_state; 1165 } 1166 1167 /* Start of Discovery State Machine routines */ 1168 1169 static uint32_t 1170 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1171 void *arg, uint32_t evt) 1172 { 1173 struct lpfc_iocbq *cmdiocb; 1174 1175 cmdiocb = (struct lpfc_iocbq *) arg; 1176 1177 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1178 return ndlp->nlp_state; 1179 } 1180 return NLP_STE_FREED_NODE; 1181 } 1182 1183 static uint32_t 1184 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1185 void *arg, uint32_t evt) 1186 { 1187 lpfc_issue_els_logo(vport, ndlp, 0); 1188 return ndlp->nlp_state; 1189 } 1190 1191 static uint32_t 1192 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1193 void *arg, uint32_t evt) 1194 { 1195 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1196 1197 spin_lock_irq(&ndlp->lock); 1198 ndlp->nlp_flag |= NLP_LOGO_ACC; 1199 spin_unlock_irq(&ndlp->lock); 1200 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1201 1202 return ndlp->nlp_state; 1203 } 1204 1205 static uint32_t 1206 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1207 void *arg, uint32_t evt) 1208 { 1209 return NLP_STE_FREED_NODE; 1210 } 1211 1212 static uint32_t 1213 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1214 void *arg, uint32_t evt) 1215 { 1216 return NLP_STE_FREED_NODE; 1217 } 1218 1219 static uint32_t 1220 lpfc_device_recov_unused_node(struct lpfc_vport *vport, 1221 struct lpfc_nodelist *ndlp, 1222 void *arg, uint32_t evt) 1223 { 1224 return ndlp->nlp_state; 1225 } 1226 1227 static uint32_t 1228 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1229 void *arg, uint32_t evt) 1230 { 1231 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1232 struct lpfc_hba *phba = vport->phba; 1233 struct lpfc_iocbq *cmdiocb = arg; 1234 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 1235 uint32_t *lp = (uint32_t *) pcmd->virt; 1236 struct serv_parm *sp = (struct serv_parm *) (lp + 1); 1237 struct ls_rjt stat; 1238 int port_cmp; 1239 1240 memset(&stat, 0, sizeof (struct ls_rjt)); 1241 1242 /* For a PLOGI, we only accept if our portname is less 1243 * than the remote portname. 1244 */ 1245 phba->fc_stat.elsLogiCol++; 1246 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 1247 sizeof(struct lpfc_name)); 1248 1249 if (port_cmp >= 0) { 1250 /* Reject this request because the remote node will accept 1251 ours */ 1252 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1253 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 1254 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 1255 NULL); 1256 } else { 1257 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && 1258 (ndlp->nlp_flag & NLP_NPR_2B_DISC) && 1259 (vport->num_disc_nodes)) { 1260 spin_lock_irq(&ndlp->lock); 1261 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1262 spin_unlock_irq(&ndlp->lock); 1263 /* Check if there are more PLOGIs to be sent */ 1264 lpfc_more_plogi(vport); 1265 if (vport->num_disc_nodes == 0) { 1266 spin_lock_irq(shost->host_lock); 1267 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1268 spin_unlock_irq(shost->host_lock); 1269 lpfc_can_disctmo(vport); 1270 lpfc_end_rscn(vport); 1271 } 1272 } 1273 } /* If our portname was less */ 1274 1275 return ndlp->nlp_state; 1276 } 1277 1278 static uint32_t 1279 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1280 void *arg, uint32_t evt) 1281 { 1282 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1283 struct ls_rjt stat; 1284 1285 memset(&stat, 0, sizeof (struct ls_rjt)); 1286 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 1287 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1288 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 1289 return ndlp->nlp_state; 1290 } 1291 1292 static uint32_t 1293 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1294 void *arg, uint32_t evt) 1295 { 1296 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1297 1298 /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ 1299 if (vport->phba->sli_rev == LPFC_SLI_REV3) 1300 ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; 1301 /* software abort outstanding PLOGI */ 1302 lpfc_els_abort(vport->phba, ndlp); 1303 1304 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1305 return ndlp->nlp_state; 1306 } 1307 1308 static uint32_t 1309 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1310 void *arg, uint32_t evt) 1311 { 1312 struct lpfc_hba *phba = vport->phba; 1313 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1314 1315 /* software abort outstanding PLOGI */ 1316 lpfc_els_abort(phba, ndlp); 1317 1318 if (evt == NLP_EVT_RCV_LOGO) { 1319 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1320 } else { 1321 lpfc_issue_els_logo(vport, ndlp, 0); 1322 } 1323 1324 /* Put ndlp in npr state set plogi timer for 1 sec */ 1325 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); 1326 spin_lock_irq(&ndlp->lock); 1327 ndlp->nlp_flag |= NLP_DELAY_TMO; 1328 spin_unlock_irq(&ndlp->lock); 1329 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1330 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 1331 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1332 1333 return ndlp->nlp_state; 1334 } 1335 1336 static uint32_t 1337 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, 1338 struct lpfc_nodelist *ndlp, 1339 void *arg, 1340 uint32_t evt) 1341 { 1342 struct lpfc_hba *phba = vport->phba; 1343 struct lpfc_iocbq *cmdiocb, *rspiocb; 1344 struct lpfc_dmabuf *pcmd, *prsp, *mp; 1345 uint32_t *lp; 1346 uint32_t vid, flag; 1347 IOCB_t *irsp; 1348 struct serv_parm *sp; 1349 uint32_t ed_tov; 1350 LPFC_MBOXQ_t *mbox; 1351 int rc; 1352 1353 cmdiocb = (struct lpfc_iocbq *) arg; 1354 rspiocb = cmdiocb->context_un.rsp_iocb; 1355 1356 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1357 /* Recovery from PLOGI collision logic */ 1358 return ndlp->nlp_state; 1359 } 1360 1361 irsp = &rspiocb->iocb; 1362 1363 if (irsp->ulpStatus) 1364 goto out; 1365 1366 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 1367 1368 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1369 if (!prsp) 1370 goto out; 1371 1372 lp = (uint32_t *) prsp->virt; 1373 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 1374 1375 /* Some switches have FDMI servers returning 0 for WWN */ 1376 if ((ndlp->nlp_DID != FDMI_DID) && 1377 (wwn_to_u64(sp->portName.u.wwn) == 0 || 1378 wwn_to_u64(sp->nodeName.u.wwn) == 0)) { 1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1380 "0142 PLOGI RSP: Invalid WWN.\n"); 1381 goto out; 1382 } 1383 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0)) 1384 goto out; 1385 /* PLOGI chkparm OK */ 1386 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1387 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 1388 ndlp->nlp_DID, ndlp->nlp_state, 1389 ndlp->nlp_flag, ndlp->nlp_rpi); 1390 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) 1391 ndlp->nlp_fcp_info |= CLASS2; 1392 else 1393 ndlp->nlp_fcp_info |= CLASS3; 1394 1395 ndlp->nlp_class_sup = 0; 1396 if (sp->cls1.classValid) 1397 ndlp->nlp_class_sup |= FC_COS_CLASS1; 1398 if (sp->cls2.classValid) 1399 ndlp->nlp_class_sup |= FC_COS_CLASS2; 1400 if (sp->cls3.classValid) 1401 ndlp->nlp_class_sup |= FC_COS_CLASS3; 1402 if (sp->cls4.classValid) 1403 ndlp->nlp_class_sup |= FC_COS_CLASS4; 1404 ndlp->nlp_maxframe = 1405 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 1406 1407 if ((vport->fc_flag & FC_PT2PT) && 1408 (vport->fc_flag & FC_PT2PT_PLOGI)) { 1409 ed_tov = be32_to_cpu(sp->cmn.e_d_tov); 1410 if (sp->cmn.edtovResolution) { 1411 /* E_D_TOV ticks are in nanoseconds */ 1412 ed_tov = (phba->fc_edtov + 999999) / 1000000; 1413 } 1414 1415 ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; 1416 if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && 1417 sp->cmn.valid_vendor_ver_level) { 1418 vid = be32_to_cpu(sp->un.vv.vid); 1419 flag = be32_to_cpu(sp->un.vv.flags); 1420 if ((vid == LPFC_VV_EMLX_ID) && 1421 (flag & LPFC_VV_SUPPRESS_RSP)) 1422 ndlp->nlp_flag |= NLP_SUPPRESS_RSP; 1423 } 1424 1425 /* 1426 * Use the larger EDTOV 1427 * RATOV = 2 * EDTOV for pt-to-pt 1428 */ 1429 if (ed_tov > phba->fc_edtov) 1430 phba->fc_edtov = ed_tov; 1431 phba->fc_ratov = (2 * phba->fc_edtov) / 1000; 1432 1433 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 1434 1435 /* Issue config_link / reg_vfi to account for updated TOV's */ 1436 if (phba->sli_rev == LPFC_SLI_REV4) { 1437 lpfc_issue_reg_vfi(vport); 1438 } else { 1439 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1440 if (!mbox) { 1441 lpfc_printf_vlog(vport, KERN_ERR, 1442 LOG_TRACE_EVENT, 1443 "0133 PLOGI: no memory " 1444 "for config_link " 1445 "Data: x%x x%x x%x x%x\n", 1446 ndlp->nlp_DID, ndlp->nlp_state, 1447 ndlp->nlp_flag, ndlp->nlp_rpi); 1448 goto out; 1449 } 1450 1451 lpfc_config_link(phba, mbox); 1452 1453 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1454 mbox->vport = vport; 1455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 1456 if (rc == MBX_NOT_FINISHED) { 1457 mempool_free(mbox, phba->mbox_mem_pool); 1458 goto out; 1459 } 1460 } 1461 } 1462 1463 lpfc_unreg_rpi(vport, ndlp); 1464 1465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1466 if (!mbox) { 1467 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1468 "0018 PLOGI: no memory for reg_login " 1469 "Data: x%x x%x x%x x%x\n", 1470 ndlp->nlp_DID, ndlp->nlp_state, 1471 ndlp->nlp_flag, ndlp->nlp_rpi); 1472 goto out; 1473 } 1474 1475 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, 1476 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { 1477 switch (ndlp->nlp_DID) { 1478 case NameServer_DID: 1479 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 1480 break; 1481 case FDMI_DID: 1482 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 1483 break; 1484 default: 1485 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 1486 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1487 } 1488 1489 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 1490 if (!mbox->ctx_ndlp) 1491 goto out; 1492 1493 mbox->vport = vport; 1494 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 1495 != MBX_NOT_FINISHED) { 1496 lpfc_nlp_set_state(vport, ndlp, 1497 NLP_STE_REG_LOGIN_ISSUE); 1498 return ndlp->nlp_state; 1499 } 1500 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 1501 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1502 /* decrement node reference count to the failed mbox 1503 * command 1504 */ 1505 lpfc_nlp_put(ndlp); 1506 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 1507 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1508 kfree(mp); 1509 mempool_free(mbox, phba->mbox_mem_pool); 1510 1511 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1512 "0134 PLOGI: cannot issue reg_login " 1513 "Data: x%x x%x x%x x%x\n", 1514 ndlp->nlp_DID, ndlp->nlp_state, 1515 ndlp->nlp_flag, ndlp->nlp_rpi); 1516 } else { 1517 mempool_free(mbox, phba->mbox_mem_pool); 1518 1519 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1520 "0135 PLOGI: cannot format reg_login " 1521 "Data: x%x x%x x%x x%x\n", 1522 ndlp->nlp_DID, ndlp->nlp_state, 1523 ndlp->nlp_flag, ndlp->nlp_rpi); 1524 } 1525 1526 1527 out: 1528 if (ndlp->nlp_DID == NameServer_DID) { 1529 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1530 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1531 "0261 Cannot Register NameServer login\n"); 1532 } 1533 1534 /* 1535 ** In case the node reference counter does not go to zero, ensure that 1536 ** the stale state for the node is not processed. 1537 */ 1538 1539 ndlp->nlp_prev_state = ndlp->nlp_state; 1540 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1541 return NLP_STE_FREED_NODE; 1542 } 1543 1544 static uint32_t 1545 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1546 void *arg, uint32_t evt) 1547 { 1548 return ndlp->nlp_state; 1549 } 1550 1551 static uint32_t 1552 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, 1553 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) 1554 { 1555 struct lpfc_hba *phba; 1556 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1557 MAILBOX_t *mb = &pmb->u.mb; 1558 uint16_t rpi; 1559 1560 phba = vport->phba; 1561 /* Release the RPI */ 1562 if (!(phba->pport->load_flag & FC_UNLOADING) && 1563 !mb->mbxStatus) { 1564 rpi = pmb->u.mb.un.varWords[0]; 1565 lpfc_release_rpi(phba, vport, ndlp, rpi); 1566 } 1567 return ndlp->nlp_state; 1568 } 1569 1570 static uint32_t 1571 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1572 void *arg, uint32_t evt) 1573 { 1574 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1575 spin_lock_irq(&ndlp->lock); 1576 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1577 spin_unlock_irq(&ndlp->lock); 1578 return ndlp->nlp_state; 1579 } else { 1580 /* software abort outstanding PLOGI */ 1581 lpfc_els_abort(vport->phba, ndlp); 1582 1583 lpfc_drop_node(vport, ndlp); 1584 return NLP_STE_FREED_NODE; 1585 } 1586 } 1587 1588 static uint32_t 1589 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, 1590 struct lpfc_nodelist *ndlp, 1591 void *arg, 1592 uint32_t evt) 1593 { 1594 struct lpfc_hba *phba = vport->phba; 1595 1596 /* Don't do anything that will mess up processing of the 1597 * previous RSCN. 1598 */ 1599 if (vport->fc_flag & FC_RSCN_DEFERRED) 1600 return ndlp->nlp_state; 1601 1602 /* software abort outstanding PLOGI */ 1603 lpfc_els_abort(phba, ndlp); 1604 1605 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 1606 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1607 spin_lock_irq(&ndlp->lock); 1608 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1609 spin_unlock_irq(&ndlp->lock); 1610 1611 return ndlp->nlp_state; 1612 } 1613 1614 static uint32_t 1615 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1616 void *arg, uint32_t evt) 1617 { 1618 struct lpfc_hba *phba = vport->phba; 1619 struct lpfc_iocbq *cmdiocb; 1620 1621 /* software abort outstanding ADISC */ 1622 lpfc_els_abort(phba, ndlp); 1623 1624 cmdiocb = (struct lpfc_iocbq *) arg; 1625 1626 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1627 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1628 spin_lock_irq(&ndlp->lock); 1629 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1630 spin_unlock_irq(&ndlp->lock); 1631 if (vport->num_disc_nodes) 1632 lpfc_more_adisc(vport); 1633 } 1634 return ndlp->nlp_state; 1635 } 1636 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1637 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1638 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1639 1640 return ndlp->nlp_state; 1641 } 1642 1643 static uint32_t 1644 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1645 void *arg, uint32_t evt) 1646 { 1647 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1648 1649 if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) 1650 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1651 return ndlp->nlp_state; 1652 } 1653 1654 static uint32_t 1655 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1656 void *arg, uint32_t evt) 1657 { 1658 struct lpfc_hba *phba = vport->phba; 1659 struct lpfc_iocbq *cmdiocb; 1660 1661 cmdiocb = (struct lpfc_iocbq *) arg; 1662 1663 /* software abort outstanding ADISC */ 1664 lpfc_els_abort(phba, ndlp); 1665 1666 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1667 return ndlp->nlp_state; 1668 } 1669 1670 static uint32_t 1671 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, 1672 struct lpfc_nodelist *ndlp, 1673 void *arg, uint32_t evt) 1674 { 1675 struct lpfc_iocbq *cmdiocb; 1676 1677 cmdiocb = (struct lpfc_iocbq *) arg; 1678 1679 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1680 return ndlp->nlp_state; 1681 } 1682 1683 static uint32_t 1684 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1685 void *arg, uint32_t evt) 1686 { 1687 struct lpfc_iocbq *cmdiocb; 1688 1689 cmdiocb = (struct lpfc_iocbq *) arg; 1690 1691 /* Treat like rcv logo */ 1692 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1693 return ndlp->nlp_state; 1694 } 1695 1696 static uint32_t 1697 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, 1698 struct lpfc_nodelist *ndlp, 1699 void *arg, uint32_t evt) 1700 { 1701 struct lpfc_hba *phba = vport->phba; 1702 struct lpfc_iocbq *cmdiocb, *rspiocb; 1703 IOCB_t *irsp; 1704 ADISC *ap; 1705 int rc; 1706 1707 cmdiocb = (struct lpfc_iocbq *) arg; 1708 rspiocb = cmdiocb->context_un.rsp_iocb; 1709 1710 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1711 irsp = &rspiocb->iocb; 1712 1713 if ((irsp->ulpStatus) || 1714 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1715 /* 1 sec timeout */ 1716 mod_timer(&ndlp->nlp_delayfunc, 1717 jiffies + msecs_to_jiffies(1000)); 1718 spin_lock_irq(&ndlp->lock); 1719 ndlp->nlp_flag |= NLP_DELAY_TMO; 1720 spin_unlock_irq(&ndlp->lock); 1721 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1722 1723 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); 1724 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); 1725 1726 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1727 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1728 lpfc_unreg_rpi(vport, ndlp); 1729 return ndlp->nlp_state; 1730 } 1731 1732 if (phba->sli_rev == LPFC_SLI_REV4) { 1733 rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL); 1734 if (rc) { 1735 /* Stay in state and retry. */ 1736 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1737 return ndlp->nlp_state; 1738 } 1739 } 1740 1741 if (ndlp->nlp_type & NLP_FCP_TARGET) 1742 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1743 1744 if (ndlp->nlp_type & NLP_NVME_TARGET) 1745 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 1746 1747 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { 1748 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1749 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1750 } else { 1751 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1753 } 1754 1755 return ndlp->nlp_state; 1756 } 1757 1758 static uint32_t 1759 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1760 void *arg, uint32_t evt) 1761 { 1762 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1763 spin_lock_irq(&ndlp->lock); 1764 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1765 spin_unlock_irq(&ndlp->lock); 1766 return ndlp->nlp_state; 1767 } else { 1768 /* software abort outstanding ADISC */ 1769 lpfc_els_abort(vport->phba, ndlp); 1770 1771 lpfc_drop_node(vport, ndlp); 1772 return NLP_STE_FREED_NODE; 1773 } 1774 } 1775 1776 static uint32_t 1777 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, 1778 struct lpfc_nodelist *ndlp, 1779 void *arg, 1780 uint32_t evt) 1781 { 1782 struct lpfc_hba *phba = vport->phba; 1783 1784 /* Don't do anything that will mess up processing of the 1785 * previous RSCN. 1786 */ 1787 if (vport->fc_flag & FC_RSCN_DEFERRED) 1788 return ndlp->nlp_state; 1789 1790 /* software abort outstanding ADISC */ 1791 lpfc_els_abort(phba, ndlp); 1792 1793 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1794 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1795 spin_lock_irq(&ndlp->lock); 1796 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1797 spin_unlock_irq(&ndlp->lock); 1798 lpfc_disc_set_adisc(vport, ndlp); 1799 return ndlp->nlp_state; 1800 } 1801 1802 static uint32_t 1803 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, 1804 struct lpfc_nodelist *ndlp, 1805 void *arg, 1806 uint32_t evt) 1807 { 1808 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1809 1810 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1811 return ndlp->nlp_state; 1812 } 1813 1814 static uint32_t 1815 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, 1816 struct lpfc_nodelist *ndlp, 1817 void *arg, 1818 uint32_t evt) 1819 { 1820 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1821 struct ls_rjt stat; 1822 1823 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) { 1824 return ndlp->nlp_state; 1825 } 1826 if (vport->phba->nvmet_support) { 1827 /* NVME Target mode. Handle and respond to the PRLI and 1828 * transition to UNMAPPED provided the RPI has completed 1829 * registration. 1830 */ 1831 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 1832 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1833 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1834 } else { 1835 /* RPI registration has not completed. Reject the PRLI 1836 * to prevent an illegal state transition when the 1837 * rpi registration does complete. 1838 */ 1839 memset(&stat, 0, sizeof(struct ls_rjt)); 1840 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 1841 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1842 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 1843 ndlp, NULL); 1844 return ndlp->nlp_state; 1845 } 1846 } else { 1847 /* Initiator mode. */ 1848 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1849 } 1850 return ndlp->nlp_state; 1851 } 1852 1853 static uint32_t 1854 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, 1855 struct lpfc_nodelist *ndlp, 1856 void *arg, 1857 uint32_t evt) 1858 { 1859 struct lpfc_hba *phba = vport->phba; 1860 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1861 LPFC_MBOXQ_t *mb; 1862 LPFC_MBOXQ_t *nextmb; 1863 struct lpfc_dmabuf *mp; 1864 struct lpfc_nodelist *ns_ndlp; 1865 1866 cmdiocb = (struct lpfc_iocbq *) arg; 1867 1868 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1869 if ((mb = phba->sli.mbox_active)) { 1870 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1871 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { 1872 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1873 lpfc_nlp_put(ndlp); 1874 mb->ctx_ndlp = NULL; 1875 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1876 } 1877 } 1878 1879 spin_lock_irq(&phba->hbalock); 1880 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1881 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1882 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { 1883 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 1884 if (mp) { 1885 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1886 kfree(mp); 1887 } 1888 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1889 lpfc_nlp_put(ndlp); 1890 list_del(&mb->list); 1891 phba->sli.mboxq_cnt--; 1892 mempool_free(mb, phba->mbox_mem_pool); 1893 } 1894 } 1895 spin_unlock_irq(&phba->hbalock); 1896 1897 /* software abort if any GID_FT is outstanding */ 1898 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { 1899 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 1900 if (ns_ndlp) 1901 lpfc_els_abort(phba, ns_ndlp); 1902 } 1903 1904 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1905 return ndlp->nlp_state; 1906 } 1907 1908 static uint32_t 1909 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, 1910 struct lpfc_nodelist *ndlp, 1911 void *arg, 1912 uint32_t evt) 1913 { 1914 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1915 1916 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1917 return ndlp->nlp_state; 1918 } 1919 1920 static uint32_t 1921 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, 1922 struct lpfc_nodelist *ndlp, 1923 void *arg, 1924 uint32_t evt) 1925 { 1926 struct lpfc_iocbq *cmdiocb; 1927 1928 cmdiocb = (struct lpfc_iocbq *) arg; 1929 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1930 return ndlp->nlp_state; 1931 } 1932 1933 static uint32_t 1934 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, 1935 struct lpfc_nodelist *ndlp, 1936 void *arg, 1937 uint32_t evt) 1938 { 1939 struct lpfc_hba *phba = vport->phba; 1940 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1941 MAILBOX_t *mb = &pmb->u.mb; 1942 uint32_t did = mb->un.varWords[1]; 1943 1944 if (mb->mbxStatus) { 1945 /* RegLogin failed */ 1946 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1947 "0246 RegLogin failed Data: x%x x%x x%x x%x " 1948 "x%x\n", 1949 did, mb->mbxStatus, vport->port_state, 1950 mb->un.varRegLogin.vpi, 1951 mb->un.varRegLogin.rpi); 1952 /* 1953 * If RegLogin failed due to lack of HBA resources do not 1954 * retry discovery. 1955 */ 1956 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1957 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1958 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1959 return ndlp->nlp_state; 1960 } 1961 1962 /* Put ndlp in npr state set plogi timer for 1 sec */ 1963 mod_timer(&ndlp->nlp_delayfunc, 1964 jiffies + msecs_to_jiffies(1000 * 1)); 1965 spin_lock_irq(&ndlp->lock); 1966 ndlp->nlp_flag |= NLP_DELAY_TMO; 1967 spin_unlock_irq(&ndlp->lock); 1968 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1969 1970 lpfc_issue_els_logo(vport, ndlp, 0); 1971 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1972 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1973 return ndlp->nlp_state; 1974 } 1975 1976 /* SLI4 ports have preallocated logical rpis. */ 1977 if (phba->sli_rev < LPFC_SLI_REV4) 1978 ndlp->nlp_rpi = mb->un.varWords[0]; 1979 1980 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1981 1982 /* Only if we are not a fabric nport do we issue PRLI */ 1983 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1984 "3066 RegLogin Complete on x%x x%x x%x\n", 1985 did, ndlp->nlp_type, ndlp->nlp_fc4_type); 1986 if (!(ndlp->nlp_type & NLP_FABRIC) && 1987 (phba->nvmet_support == 0)) { 1988 /* The driver supports FCP and NVME concurrently. If the 1989 * ndlp's nlp_fc4_type is still zero, the driver doesn't 1990 * know what PRLI to send yet. Figure that out now and 1991 * call PRLI depending on the outcome. 1992 */ 1993 if (vport->fc_flag & FC_PT2PT) { 1994 /* If we are pt2pt, there is no Fabric to determine 1995 * the FC4 type of the remote nport. So if NVME 1996 * is configured try it. 1997 */ 1998 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1999 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2000 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 2001 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2002 /* We need to update the localport also */ 2003 lpfc_nvme_update_localport(vport); 2004 } 2005 2006 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2007 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 2008 2009 } else if (ndlp->nlp_fc4_type == 0) { 2010 /* If we are only configured for FCP, the driver 2011 * should just issue PRLI for FCP. Otherwise issue 2012 * GFT_ID to determine if remote port supports NVME. 2013 */ 2014 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { 2015 lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0, 2016 ndlp->nlp_DID); 2017 return ndlp->nlp_state; 2018 } 2019 ndlp->nlp_fc4_type = NLP_FC4_FCP; 2020 } 2021 2022 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 2023 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2024 if (lpfc_issue_els_prli(vport, ndlp, 0)) { 2025 lpfc_issue_els_logo(vport, ndlp, 0); 2026 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 2027 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2028 } 2029 } else { 2030 if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) 2031 phba->targetport->port_id = vport->fc_myDID; 2032 2033 /* Only Fabric ports should transition. NVME target 2034 * must complete PRLI. 2035 */ 2036 if (ndlp->nlp_type & NLP_FABRIC) { 2037 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 2038 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2039 } 2040 } 2041 return ndlp->nlp_state; 2042 } 2043 2044 static uint32_t 2045 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, 2046 struct lpfc_nodelist *ndlp, 2047 void *arg, 2048 uint32_t evt) 2049 { 2050 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 2051 spin_lock_irq(&ndlp->lock); 2052 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 2053 spin_unlock_irq(&ndlp->lock); 2054 return ndlp->nlp_state; 2055 } else { 2056 lpfc_drop_node(vport, ndlp); 2057 return NLP_STE_FREED_NODE; 2058 } 2059 } 2060 2061 static uint32_t 2062 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, 2063 struct lpfc_nodelist *ndlp, 2064 void *arg, 2065 uint32_t evt) 2066 { 2067 /* Don't do anything that will mess up processing of the 2068 * previous RSCN. 2069 */ 2070 if (vport->fc_flag & FC_RSCN_DEFERRED) 2071 return ndlp->nlp_state; 2072 2073 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 2074 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2075 spin_lock_irq(&ndlp->lock); 2076 2077 /* If we are a target we won't immediately transition into PRLI, 2078 * so if REG_LOGIN already completed we don't need to ignore it. 2079 */ 2080 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || 2081 !vport->phba->nvmet_support) 2082 ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; 2083 2084 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2085 spin_unlock_irq(&ndlp->lock); 2086 lpfc_disc_set_adisc(vport, ndlp); 2087 return ndlp->nlp_state; 2088 } 2089 2090 static uint32_t 2091 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2092 void *arg, uint32_t evt) 2093 { 2094 struct lpfc_iocbq *cmdiocb; 2095 2096 cmdiocb = (struct lpfc_iocbq *) arg; 2097 2098 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 2099 return ndlp->nlp_state; 2100 } 2101 2102 static uint32_t 2103 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2104 void *arg, uint32_t evt) 2105 { 2106 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2107 2108 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) 2109 return ndlp->nlp_state; 2110 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 2111 return ndlp->nlp_state; 2112 } 2113 2114 static uint32_t 2115 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2116 void *arg, uint32_t evt) 2117 { 2118 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2119 2120 /* Software abort outstanding PRLI before sending acc */ 2121 lpfc_els_abort(vport->phba, ndlp); 2122 2123 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 2124 return ndlp->nlp_state; 2125 } 2126 2127 static uint32_t 2128 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2129 void *arg, uint32_t evt) 2130 { 2131 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2132 2133 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 2134 return ndlp->nlp_state; 2135 } 2136 2137 /* This routine is envoked when we rcv a PRLO request from a nport 2138 * we are logged into. We should send back a PRLO rsp setting the 2139 * appropriate bits. 2140 * NEXT STATE = PRLI_ISSUE 2141 */ 2142 static uint32_t 2143 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2144 void *arg, uint32_t evt) 2145 { 2146 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2147 2148 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 2149 return ndlp->nlp_state; 2150 } 2151 2152 static uint32_t 2153 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2154 void *arg, uint32_t evt) 2155 { 2156 struct lpfc_iocbq *cmdiocb, *rspiocb; 2157 struct lpfc_hba *phba = vport->phba; 2158 IOCB_t *irsp; 2159 PRLI *npr; 2160 struct lpfc_nvme_prli *nvpr; 2161 void *temp_ptr; 2162 2163 cmdiocb = (struct lpfc_iocbq *) arg; 2164 rspiocb = cmdiocb->context_un.rsp_iocb; 2165 2166 /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp 2167 * format is different so NULL the two PRLI types so that the 2168 * driver correctly gets the correct context. 2169 */ 2170 npr = NULL; 2171 nvpr = NULL; 2172 temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 2173 if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ) 2174 npr = (PRLI *) temp_ptr; 2175 else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ) 2176 nvpr = (struct lpfc_nvme_prli *) temp_ptr; 2177 2178 irsp = &rspiocb->iocb; 2179 if (irsp->ulpStatus) { 2180 if ((vport->port_type == LPFC_NPIV_PORT) && 2181 vport->cfg_restrict_login) { 2182 goto out; 2183 } 2184 2185 /* Adjust the nlp_type accordingly if the PRLI failed */ 2186 if (npr) 2187 ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; 2188 if (nvpr) 2189 ndlp->nlp_fc4_type &= ~NLP_FC4_NVME; 2190 2191 /* We can't set the DSM state till BOTH PRLIs complete */ 2192 goto out_err; 2193 } 2194 2195 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && 2196 (npr->prliType == PRLI_FCP_TYPE)) { 2197 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2198 "6028 FCP NPR PRLI Cmpl Init %d Target %d\n", 2199 npr->initiatorFunc, 2200 npr->targetFunc); 2201 if (npr->initiatorFunc) 2202 ndlp->nlp_type |= NLP_FCP_INITIATOR; 2203 if (npr->targetFunc) { 2204 ndlp->nlp_type |= NLP_FCP_TARGET; 2205 if (npr->writeXferRdyDis) 2206 ndlp->nlp_flag |= NLP_FIRSTBURST; 2207 } 2208 if (npr->Retry) 2209 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 2210 2211 } else if (nvpr && 2212 (bf_get_be32(prli_acc_rsp_code, nvpr) == 2213 PRLI_REQ_EXECUTED) && 2214 (bf_get_be32(prli_type_code, nvpr) == 2215 PRLI_NVME_TYPE)) { 2216 2217 /* Complete setting up the remote ndlp personality. */ 2218 if (bf_get_be32(prli_init, nvpr)) 2219 ndlp->nlp_type |= NLP_NVME_INITIATOR; 2220 2221 if (phba->nsler && bf_get_be32(prli_nsler, nvpr) && 2222 bf_get_be32(prli_conf, nvpr)) 2223 2224 ndlp->nlp_nvme_info |= NLP_NVME_NSLER; 2225 else 2226 ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; 2227 2228 /* Target driver cannot solicit NVME FB. */ 2229 if (bf_get_be32(prli_tgt, nvpr)) { 2230 /* Complete the nvme target roles. The transport 2231 * needs to know if the rport is capable of 2232 * discovery in addition to its role. 2233 */ 2234 ndlp->nlp_type |= NLP_NVME_TARGET; 2235 if (bf_get_be32(prli_disc, nvpr)) 2236 ndlp->nlp_type |= NLP_NVME_DISCOVERY; 2237 2238 /* 2239 * If prli_fba is set, the Target supports FirstBurst. 2240 * If prli_fb_sz is 0, the FirstBurst size is unlimited, 2241 * otherwise it defines the actual size supported by 2242 * the NVME Target. 2243 */ 2244 if ((bf_get_be32(prli_fba, nvpr) == 1) && 2245 (phba->cfg_nvme_enable_fb) && 2246 (!phba->nvmet_support)) { 2247 /* Both sides support FB. The target's first 2248 * burst size is a 512 byte encoded value. 2249 */ 2250 ndlp->nlp_flag |= NLP_FIRSTBURST; 2251 ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, 2252 nvpr); 2253 2254 /* Expressed in units of 512 bytes */ 2255 if (ndlp->nvme_fb_size) 2256 ndlp->nvme_fb_size <<= 2257 LPFC_NVME_FB_SHIFT; 2258 else 2259 ndlp->nvme_fb_size = LPFC_NVME_MAX_FB; 2260 } 2261 } 2262 2263 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2264 "6029 NVME PRLI Cmpl w1 x%08x " 2265 "w4 x%08x w5 x%08x flag x%x, " 2266 "fcp_info x%x nlp_type x%x\n", 2267 be32_to_cpu(nvpr->word1), 2268 be32_to_cpu(nvpr->word4), 2269 be32_to_cpu(nvpr->word5), 2270 ndlp->nlp_flag, ndlp->nlp_fcp_info, 2271 ndlp->nlp_type); 2272 } 2273 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 2274 (vport->port_type == LPFC_NPIV_PORT) && 2275 vport->cfg_restrict_login) { 2276 out: 2277 spin_lock_irq(&ndlp->lock); 2278 ndlp->nlp_flag |= NLP_TARGET_REMOVE; 2279 spin_unlock_irq(&ndlp->lock); 2280 lpfc_issue_els_logo(vport, ndlp, 0); 2281 2282 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 2283 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2284 return ndlp->nlp_state; 2285 } 2286 2287 out_err: 2288 /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs 2289 * are complete. 2290 */ 2291 if (ndlp->fc4_prli_sent == 0) { 2292 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 2293 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) 2294 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 2295 else if (ndlp->nlp_type & 2296 (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR)) 2297 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2298 } else 2299 lpfc_printf_vlog(vport, 2300 KERN_INFO, LOG_ELS, 2301 "3067 PRLI's still outstanding " 2302 "on x%06x - count %d, Pend Node Mode " 2303 "transition...\n", 2304 ndlp->nlp_DID, ndlp->fc4_prli_sent); 2305 2306 return ndlp->nlp_state; 2307 } 2308 2309 /*! lpfc_device_rm_prli_issue 2310 * 2311 * \pre 2312 * \post 2313 * \param phba 2314 * \param ndlp 2315 * \param arg 2316 * \param evt 2317 * \return uint32_t 2318 * 2319 * \b Description: 2320 * This routine is envoked when we a request to remove a nport we are in the 2321 * process of PRLIing. We should software abort outstanding prli, unreg 2322 * login, send a logout. We will change node state to UNUSED_NODE, put it 2323 * on plogi list so it can be freed when LOGO completes. 2324 * 2325 */ 2326 2327 static uint32_t 2328 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2329 void *arg, uint32_t evt) 2330 { 2331 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 2332 spin_lock_irq(&ndlp->lock); 2333 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 2334 spin_unlock_irq(&ndlp->lock); 2335 return ndlp->nlp_state; 2336 } else { 2337 /* software abort outstanding PLOGI */ 2338 lpfc_els_abort(vport->phba, ndlp); 2339 2340 lpfc_drop_node(vport, ndlp); 2341 return NLP_STE_FREED_NODE; 2342 } 2343 } 2344 2345 2346 /*! lpfc_device_recov_prli_issue 2347 * 2348 * \pre 2349 * \post 2350 * \param phba 2351 * \param ndlp 2352 * \param arg 2353 * \param evt 2354 * \return uint32_t 2355 * 2356 * \b Description: 2357 * The routine is envoked when the state of a device is unknown, like 2358 * during a link down. We should remove the nodelist entry from the 2359 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 2360 * outstanding PRLI command, then free the node entry. 2361 */ 2362 static uint32_t 2363 lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 2364 struct lpfc_nodelist *ndlp, 2365 void *arg, 2366 uint32_t evt) 2367 { 2368 struct lpfc_hba *phba = vport->phba; 2369 2370 /* Don't do anything that will mess up processing of the 2371 * previous RSCN. 2372 */ 2373 if (vport->fc_flag & FC_RSCN_DEFERRED) 2374 return ndlp->nlp_state; 2375 2376 /* software abort outstanding PRLI */ 2377 lpfc_els_abort(phba, ndlp); 2378 2379 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 2380 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2381 spin_lock_irq(&ndlp->lock); 2382 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2383 spin_unlock_irq(&ndlp->lock); 2384 lpfc_disc_set_adisc(vport, ndlp); 2385 return ndlp->nlp_state; 2386 } 2387 2388 static uint32_t 2389 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2390 void *arg, uint32_t evt) 2391 { 2392 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 2393 struct ls_rjt stat; 2394 2395 memset(&stat, 0, sizeof(struct ls_rjt)); 2396 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 2397 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 2398 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 2399 return ndlp->nlp_state; 2400 } 2401 2402 static uint32_t 2403 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2404 void *arg, uint32_t evt) 2405 { 2406 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 2407 struct ls_rjt stat; 2408 2409 memset(&stat, 0, sizeof(struct ls_rjt)); 2410 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 2411 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 2412 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 2413 return ndlp->nlp_state; 2414 } 2415 2416 static uint32_t 2417 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2418 void *arg, uint32_t evt) 2419 { 2420 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 2421 2422 spin_lock_irq(&ndlp->lock); 2423 ndlp->nlp_flag |= NLP_LOGO_ACC; 2424 spin_unlock_irq(&ndlp->lock); 2425 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 2426 return ndlp->nlp_state; 2427 } 2428 2429 static uint32_t 2430 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2431 void *arg, uint32_t evt) 2432 { 2433 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 2434 struct ls_rjt stat; 2435 2436 memset(&stat, 0, sizeof(struct ls_rjt)); 2437 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 2438 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 2439 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 2440 return ndlp->nlp_state; 2441 } 2442 2443 static uint32_t 2444 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2445 void *arg, uint32_t evt) 2446 { 2447 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; 2448 struct ls_rjt stat; 2449 2450 memset(&stat, 0, sizeof(struct ls_rjt)); 2451 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 2452 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 2453 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 2454 return ndlp->nlp_state; 2455 } 2456 2457 static uint32_t 2458 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2459 void *arg, uint32_t evt) 2460 { 2461 ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; 2462 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2463 spin_lock_irq(&ndlp->lock); 2464 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2465 spin_unlock_irq(&ndlp->lock); 2466 lpfc_disc_set_adisc(vport, ndlp); 2467 return ndlp->nlp_state; 2468 } 2469 2470 static uint32_t 2471 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2472 void *arg, uint32_t evt) 2473 { 2474 /* 2475 * DevLoss has timed out and is calling for Device Remove. 2476 * In this case, abort the LOGO and cleanup the ndlp 2477 */ 2478 2479 lpfc_unreg_rpi(vport, ndlp); 2480 /* software abort outstanding PLOGI */ 2481 lpfc_els_abort(vport->phba, ndlp); 2482 lpfc_drop_node(vport, ndlp); 2483 return NLP_STE_FREED_NODE; 2484 } 2485 2486 static uint32_t 2487 lpfc_device_recov_logo_issue(struct lpfc_vport *vport, 2488 struct lpfc_nodelist *ndlp, 2489 void *arg, uint32_t evt) 2490 { 2491 /* 2492 * Device Recovery events have no meaning for a node with a LOGO 2493 * outstanding. The LOGO has to complete first and handle the 2494 * node from that point. 2495 */ 2496 return ndlp->nlp_state; 2497 } 2498 2499 static uint32_t 2500 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2501 void *arg, uint32_t evt) 2502 { 2503 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2504 2505 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 2506 return ndlp->nlp_state; 2507 } 2508 2509 static uint32_t 2510 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2511 void *arg, uint32_t evt) 2512 { 2513 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2514 2515 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) 2516 return ndlp->nlp_state; 2517 2518 lpfc_rcv_prli(vport, ndlp, cmdiocb); 2519 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 2520 return ndlp->nlp_state; 2521 } 2522 2523 static uint32_t 2524 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2525 void *arg, uint32_t evt) 2526 { 2527 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2528 2529 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 2530 return ndlp->nlp_state; 2531 } 2532 2533 static uint32_t 2534 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2535 void *arg, uint32_t evt) 2536 { 2537 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2538 2539 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 2540 return ndlp->nlp_state; 2541 } 2542 2543 static uint32_t 2544 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2545 void *arg, uint32_t evt) 2546 { 2547 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2548 2549 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 2550 return ndlp->nlp_state; 2551 } 2552 2553 static uint32_t 2554 lpfc_device_recov_unmap_node(struct lpfc_vport *vport, 2555 struct lpfc_nodelist *ndlp, 2556 void *arg, 2557 uint32_t evt) 2558 { 2559 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 2560 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2561 spin_lock_irq(&ndlp->lock); 2562 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2563 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); 2564 spin_unlock_irq(&ndlp->lock); 2565 lpfc_disc_set_adisc(vport, ndlp); 2566 2567 return ndlp->nlp_state; 2568 } 2569 2570 static uint32_t 2571 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2572 void *arg, uint32_t evt) 2573 { 2574 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2575 2576 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 2577 return ndlp->nlp_state; 2578 } 2579 2580 static uint32_t 2581 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2582 void *arg, uint32_t evt) 2583 { 2584 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2585 2586 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) 2587 return ndlp->nlp_state; 2588 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 2589 return ndlp->nlp_state; 2590 } 2591 2592 static uint32_t 2593 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2594 void *arg, uint32_t evt) 2595 { 2596 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2597 2598 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 2599 return ndlp->nlp_state; 2600 } 2601 2602 static uint32_t 2603 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, 2604 struct lpfc_nodelist *ndlp, 2605 void *arg, uint32_t evt) 2606 { 2607 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2608 2609 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 2610 return ndlp->nlp_state; 2611 } 2612 2613 static uint32_t 2614 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2615 void *arg, uint32_t evt) 2616 { 2617 struct lpfc_hba *phba = vport->phba; 2618 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2619 2620 /* flush the target */ 2621 lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], 2622 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 2623 2624 /* Treat like rcv logo */ 2625 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 2626 return ndlp->nlp_state; 2627 } 2628 2629 static uint32_t 2630 lpfc_device_recov_mapped_node(struct lpfc_vport *vport, 2631 struct lpfc_nodelist *ndlp, 2632 void *arg, 2633 uint32_t evt) 2634 { 2635 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 2636 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2637 spin_lock_irq(&ndlp->lock); 2638 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2639 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); 2640 spin_unlock_irq(&ndlp->lock); 2641 lpfc_disc_set_adisc(vport, ndlp); 2642 return ndlp->nlp_state; 2643 } 2644 2645 static uint32_t 2646 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2647 void *arg, uint32_t evt) 2648 { 2649 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2650 2651 /* Ignore PLOGI if we have an outstanding LOGO */ 2652 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) 2653 return ndlp->nlp_state; 2654 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 2655 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2656 spin_lock_irq(&ndlp->lock); 2657 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); 2658 spin_unlock_irq(&ndlp->lock); 2659 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 2660 /* send PLOGI immediately, move to PLOGI issue state */ 2661 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2662 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 2663 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2664 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2665 } 2666 } 2667 return ndlp->nlp_state; 2668 } 2669 2670 static uint32_t 2671 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2672 void *arg, uint32_t evt) 2673 { 2674 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2675 struct ls_rjt stat; 2676 2677 memset(&stat, 0, sizeof (struct ls_rjt)); 2678 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 2679 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 2680 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 2681 2682 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 2683 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 2684 spin_lock_irq(&ndlp->lock); 2685 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2686 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 2687 spin_unlock_irq(&ndlp->lock); 2688 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2689 lpfc_issue_els_adisc(vport, ndlp, 0); 2690 } else { 2691 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 2692 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2693 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2694 } 2695 } 2696 return ndlp->nlp_state; 2697 } 2698 2699 static uint32_t 2700 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2701 void *arg, uint32_t evt) 2702 { 2703 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2704 2705 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 2706 return ndlp->nlp_state; 2707 } 2708 2709 static uint32_t 2710 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2711 void *arg, uint32_t evt) 2712 { 2713 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2714 2715 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 2716 /* 2717 * Do not start discovery if discovery is about to start 2718 * or discovery in progress for this node. Starting discovery 2719 * here will affect the counting of discovery threads. 2720 */ 2721 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 2722 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 2723 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 2724 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2725 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 2726 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2727 lpfc_issue_els_adisc(vport, ndlp, 0); 2728 } else { 2729 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 2730 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2731 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2732 } 2733 } 2734 return ndlp->nlp_state; 2735 } 2736 2737 static uint32_t 2738 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2739 void *arg, uint32_t evt) 2740 { 2741 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2742 2743 spin_lock_irq(&ndlp->lock); 2744 ndlp->nlp_flag |= NLP_LOGO_ACC; 2745 spin_unlock_irq(&ndlp->lock); 2746 2747 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 2748 2749 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 2750 mod_timer(&ndlp->nlp_delayfunc, 2751 jiffies + msecs_to_jiffies(1000 * 1)); 2752 spin_lock_irq(&ndlp->lock); 2753 ndlp->nlp_flag |= NLP_DELAY_TMO; 2754 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2755 spin_unlock_irq(&ndlp->lock); 2756 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 2757 } else { 2758 spin_lock_irq(&ndlp->lock); 2759 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2760 spin_unlock_irq(&ndlp->lock); 2761 } 2762 return ndlp->nlp_state; 2763 } 2764 2765 static uint32_t 2766 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2767 void *arg, uint32_t evt) 2768 { 2769 struct lpfc_iocbq *cmdiocb, *rspiocb; 2770 IOCB_t *irsp; 2771 2772 cmdiocb = (struct lpfc_iocbq *) arg; 2773 rspiocb = cmdiocb->context_un.rsp_iocb; 2774 2775 irsp = &rspiocb->iocb; 2776 if (irsp->ulpStatus) { 2777 return NLP_STE_FREED_NODE; 2778 } 2779 return ndlp->nlp_state; 2780 } 2781 2782 static uint32_t 2783 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2784 void *arg, uint32_t evt) 2785 { 2786 struct lpfc_iocbq *cmdiocb, *rspiocb; 2787 IOCB_t *irsp; 2788 2789 cmdiocb = (struct lpfc_iocbq *) arg; 2790 rspiocb = cmdiocb->context_un.rsp_iocb; 2791 2792 irsp = &rspiocb->iocb; 2793 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 2794 lpfc_drop_node(vport, ndlp); 2795 return NLP_STE_FREED_NODE; 2796 } 2797 return ndlp->nlp_state; 2798 } 2799 2800 static uint32_t 2801 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2802 void *arg, uint32_t evt) 2803 { 2804 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2805 2806 /* For the fabric port just clear the fc flags. */ 2807 if (ndlp->nlp_DID == Fabric_DID) { 2808 spin_lock_irq(shost->host_lock); 2809 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 2810 spin_unlock_irq(shost->host_lock); 2811 } 2812 lpfc_unreg_rpi(vport, ndlp); 2813 return ndlp->nlp_state; 2814 } 2815 2816 static uint32_t 2817 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2818 void *arg, uint32_t evt) 2819 { 2820 struct lpfc_iocbq *cmdiocb, *rspiocb; 2821 IOCB_t *irsp; 2822 2823 cmdiocb = (struct lpfc_iocbq *) arg; 2824 rspiocb = cmdiocb->context_un.rsp_iocb; 2825 2826 irsp = &rspiocb->iocb; 2827 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 2828 lpfc_drop_node(vport, ndlp); 2829 return NLP_STE_FREED_NODE; 2830 } 2831 return ndlp->nlp_state; 2832 } 2833 2834 static uint32_t 2835 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, 2836 struct lpfc_nodelist *ndlp, 2837 void *arg, uint32_t evt) 2838 { 2839 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 2840 MAILBOX_t *mb = &pmb->u.mb; 2841 2842 if (!mb->mbxStatus) { 2843 /* SLI4 ports have preallocated logical rpis. */ 2844 if (vport->phba->sli_rev < LPFC_SLI_REV4) 2845 ndlp->nlp_rpi = mb->un.varWords[0]; 2846 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 2847 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 2848 lpfc_unreg_rpi(vport, ndlp); 2849 } 2850 } else { 2851 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2852 lpfc_drop_node(vport, ndlp); 2853 return NLP_STE_FREED_NODE; 2854 } 2855 } 2856 return ndlp->nlp_state; 2857 } 2858 2859 static uint32_t 2860 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2861 void *arg, uint32_t evt) 2862 { 2863 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 2864 spin_lock_irq(&ndlp->lock); 2865 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 2866 spin_unlock_irq(&ndlp->lock); 2867 return ndlp->nlp_state; 2868 } 2869 lpfc_drop_node(vport, ndlp); 2870 return NLP_STE_FREED_NODE; 2871 } 2872 2873 static uint32_t 2874 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2875 void *arg, uint32_t evt) 2876 { 2877 /* Don't do anything that will mess up processing of the 2878 * previous RSCN. 2879 */ 2880 if (vport->fc_flag & FC_RSCN_DEFERRED) 2881 return ndlp->nlp_state; 2882 2883 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2884 spin_lock_irq(&ndlp->lock); 2885 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2886 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); 2887 spin_unlock_irq(&ndlp->lock); 2888 return ndlp->nlp_state; 2889 } 2890 2891 2892 /* This next section defines the NPort Discovery State Machine */ 2893 2894 /* There are 4 different double linked lists nodelist entries can reside on. 2895 * The plogi list and adisc list are used when Link Up discovery or RSCN 2896 * processing is needed. Each list holds the nodes that we will send PLOGI 2897 * or ADISC on. These lists will keep track of what nodes will be effected 2898 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 2899 * The unmapped_list will contain all nodes that we have successfully logged 2900 * into at the Fibre Channel level. The mapped_list will contain all nodes 2901 * that are mapped FCP targets. 2902 */ 2903 /* 2904 * The bind list is a list of undiscovered (potentially non-existent) nodes 2905 * that we have saved binding information on. This information is used when 2906 * nodes transition from the unmapped to the mapped list. 2907 */ 2908 /* For UNUSED_NODE state, the node has just been allocated . 2909 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 2910 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 2911 * and put on the unmapped list. For ADISC processing, the node is taken off 2912 * the ADISC list and placed on either the mapped or unmapped list (depending 2913 * on its previous state). Once on the unmapped list, a PRLI is issued and the 2914 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 2915 * changed to UNMAPPED_NODE. If the completion indicates a mapped 2916 * node, the node is taken off the unmapped list. The binding list is checked 2917 * for a valid binding, or a binding is automatically assigned. If binding 2918 * assignment is unsuccessful, the node is left on the unmapped list. If 2919 * binding assignment is successful, the associated binding list entry (if 2920 * any) is removed, and the node is placed on the mapped list. 2921 */ 2922 /* 2923 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 2924 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 2925 * expire, all effected nodes will receive a DEVICE_RM event. 2926 */ 2927 /* 2928 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 2929 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 2930 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 2931 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 2932 * we will first process the ADISC list. 32 entries are processed initially and 2933 * ADISC is initited for each one. Completions / Events for each node are 2934 * funnelled thru the state machine. As each node finishes ADISC processing, it 2935 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 2936 * waiting, and the ADISC list count is identically 0, then we are done. For 2937 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 2938 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 2939 * list. 32 entries are processed initially and PLOGI is initited for each one. 2940 * Completions / Events for each node are funnelled thru the state machine. As 2941 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 2942 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 2943 * indentically 0, then we are done. We have now completed discovery / RSCN 2944 * handling. Upon completion, ALL nodes should be on either the mapped or 2945 * unmapped lists. 2946 */ 2947 2948 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 2949 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { 2950 /* Action routine Event Current State */ 2951 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 2952 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 2953 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 2954 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 2955 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 2956 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 2957 lpfc_disc_illegal, /* CMPL_PLOGI */ 2958 lpfc_disc_illegal, /* CMPL_PRLI */ 2959 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 2960 lpfc_disc_illegal, /* CMPL_ADISC */ 2961 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2962 lpfc_device_rm_unused_node, /* DEVICE_RM */ 2963 lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */ 2964 2965 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 2966 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 2967 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 2968 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 2969 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 2970 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 2971 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 2972 lpfc_disc_illegal, /* CMPL_PRLI */ 2973 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ 2974 lpfc_disc_illegal, /* CMPL_ADISC */ 2975 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ 2976 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 2977 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 2978 2979 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 2980 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 2981 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 2982 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 2983 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 2984 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 2985 lpfc_disc_illegal, /* CMPL_PLOGI */ 2986 lpfc_disc_illegal, /* CMPL_PRLI */ 2987 lpfc_disc_illegal, /* CMPL_LOGO */ 2988 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 2989 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2990 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 2991 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 2992 2993 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 2994 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 2995 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 2996 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 2997 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 2998 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 2999 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 3000 lpfc_disc_illegal, /* CMPL_PRLI */ 3001 lpfc_disc_illegal, /* CMPL_LOGO */ 3002 lpfc_disc_illegal, /* CMPL_ADISC */ 3003 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 3004 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 3005 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 3006 3007 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 3008 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 3009 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 3010 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 3011 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 3012 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 3013 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 3014 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 3015 lpfc_disc_illegal, /* CMPL_LOGO */ 3016 lpfc_disc_illegal, /* CMPL_ADISC */ 3017 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 3018 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 3019 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 3020 3021 lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */ 3022 lpfc_rcv_prli_logo_issue, /* RCV_PRLI */ 3023 lpfc_rcv_logo_logo_issue, /* RCV_LOGO */ 3024 lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */ 3025 lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */ 3026 lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */ 3027 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 3028 lpfc_disc_illegal, /* CMPL_PRLI */ 3029 lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */ 3030 lpfc_disc_illegal, /* CMPL_ADISC */ 3031 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 3032 lpfc_device_rm_logo_issue, /* DEVICE_RM */ 3033 lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */ 3034 3035 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 3036 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 3037 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 3038 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 3039 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 3040 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 3041 lpfc_disc_illegal, /* CMPL_PLOGI */ 3042 lpfc_disc_illegal, /* CMPL_PRLI */ 3043 lpfc_disc_illegal, /* CMPL_LOGO */ 3044 lpfc_disc_illegal, /* CMPL_ADISC */ 3045 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 3046 lpfc_disc_illegal, /* DEVICE_RM */ 3047 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 3048 3049 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 3050 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 3051 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 3052 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 3053 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 3054 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 3055 lpfc_disc_illegal, /* CMPL_PLOGI */ 3056 lpfc_disc_illegal, /* CMPL_PRLI */ 3057 lpfc_disc_illegal, /* CMPL_LOGO */ 3058 lpfc_disc_illegal, /* CMPL_ADISC */ 3059 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 3060 lpfc_disc_illegal, /* DEVICE_RM */ 3061 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 3062 3063 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 3064 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 3065 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 3066 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 3067 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 3068 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 3069 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 3070 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 3071 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 3072 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 3073 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 3074 lpfc_device_rm_npr_node, /* DEVICE_RM */ 3075 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 3076 }; 3077 3078 int 3079 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3080 void *arg, uint32_t evt) 3081 { 3082 uint32_t cur_state, rc; 3083 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 3084 uint32_t); 3085 uint32_t got_ndlp = 0; 3086 uint32_t data1; 3087 3088 if (lpfc_nlp_get(ndlp)) 3089 got_ndlp = 1; 3090 3091 cur_state = ndlp->nlp_state; 3092 3093 data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | 3094 ((uint32_t)ndlp->nlp_type)); 3095 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 3096 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3097 "0211 DSM in event x%x on NPort x%x in " 3098 "state %d rpi x%x Data: x%x x%x\n", 3099 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, 3100 ndlp->nlp_flag, data1); 3101 3102 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 3103 "DSM in: evt:%d ste:%d did:x%x", 3104 evt, cur_state, ndlp->nlp_DID); 3105 3106 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 3107 rc = (func) (vport, ndlp, arg, evt); 3108 3109 /* DSM out state <rc> on NPort <nlp_DID> */ 3110 if (got_ndlp) { 3111 data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | 3112 ((uint32_t)ndlp->nlp_type)); 3113 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3114 "0212 DSM out state %d on NPort x%x " 3115 "rpi x%x Data: x%x x%x\n", 3116 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, 3117 data1); 3118 3119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 3120 "DSM out: ste:%d did:x%x flg:x%x", 3121 rc, ndlp->nlp_DID, ndlp->nlp_flag); 3122 /* Decrement the ndlp reference count held for this function */ 3123 lpfc_nlp_put(ndlp); 3124 } else { 3125 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3126 "0213 DSM out state %d on NPort free\n", rc); 3127 3128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 3129 "DSM out: ste:%d did:x%x flg:x%x", 3130 rc, 0, 0); 3131 } 3132 3133 return rc; 3134 } 3135