1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw.h" 32 #include "lpfc_sli.h" 33 #include "lpfc_nl.h" 34 #include "lpfc_disc.h" 35 #include "lpfc_scsi.h" 36 #include "lpfc.h" 37 #include "lpfc_logmsg.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_vport.h" 40 #include "lpfc_debugfs.h" 41 42 43 /* Called to verify a rcv'ed ADISC was intended for us. */ 44 static int 45 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 46 struct lpfc_name *nn, struct lpfc_name *pn) 47 { 48 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 49 * table entry for that node. 50 */ 51 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) 52 return 0; 53 54 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) 55 return 0; 56 57 /* we match, return success */ 58 return 1; 59 } 60 61 int 62 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 63 struct serv_parm * sp, uint32_t class) 64 { 65 volatile struct serv_parm *hsp = &vport->fc_sparam; 66 uint16_t hsp_value, ssp_value = 0; 67 68 /* 69 * The receive data field size and buffer-to-buffer receive data field 70 * size entries are 16 bits but are represented as two 8-bit fields in 71 * the driver data structure to account for rsvd bits and other control 72 * bits. Reconstruct and compare the fields as a 16-bit values before 73 * correcting the byte values. 74 */ 75 if (sp->cls1.classValid) { 76 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 77 hsp->cls1.rcvDataSizeLsb; 78 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 79 sp->cls1.rcvDataSizeLsb; 80 if (!ssp_value) 81 goto bad_service_param; 82 if (ssp_value > hsp_value) { 83 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 84 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 85 } 86 } else if (class == CLASS1) { 87 goto bad_service_param; 88 } 89 90 if (sp->cls2.classValid) { 91 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 92 hsp->cls2.rcvDataSizeLsb; 93 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 94 sp->cls2.rcvDataSizeLsb; 95 if (!ssp_value) 96 goto bad_service_param; 97 if (ssp_value > hsp_value) { 98 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 99 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 100 } 101 } else if (class == CLASS2) { 102 goto bad_service_param; 103 } 104 105 if (sp->cls3.classValid) { 106 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 107 hsp->cls3.rcvDataSizeLsb; 108 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 109 sp->cls3.rcvDataSizeLsb; 110 if (!ssp_value) 111 goto bad_service_param; 112 if (ssp_value > hsp_value) { 113 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 114 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 115 } 116 } else if (class == CLASS3) { 117 goto bad_service_param; 118 } 119 120 /* 121 * Preserve the upper four bits of the MSB from the PLOGI response. 122 * These bits contain the Buffer-to-Buffer State Change Number 123 * from the target and need to be passed to the FW. 124 */ 125 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 126 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 127 if (ssp_value > hsp_value) { 128 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 129 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 130 (hsp->cmn.bbRcvSizeMsb & 0x0F); 131 } 132 133 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 134 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 135 return 1; 136 bad_service_param: 137 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 138 "0207 Device %x " 139 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 140 "invalid service parameters. Ignoring device.\n", 141 ndlp->nlp_DID, 142 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], 143 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], 144 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], 145 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); 146 return 0; 147 } 148 149 static void * 150 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 151 struct lpfc_iocbq *rspiocb) 152 { 153 struct lpfc_dmabuf *pcmd, *prsp; 154 uint32_t *lp; 155 void *ptr = NULL; 156 IOCB_t *irsp; 157 158 irsp = &rspiocb->iocb; 159 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 160 161 /* For lpfc_els_abort, context2 could be zero'ed to delay 162 * freeing associated memory till after ABTS completes. 163 */ 164 if (pcmd) { 165 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 166 list); 167 if (prsp) { 168 lp = (uint32_t *) prsp->virt; 169 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 170 } 171 } else { 172 /* Force ulpStatus error since we are returning NULL ptr */ 173 if (!(irsp->ulpStatus)) { 174 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 175 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 176 } 177 ptr = NULL; 178 } 179 return ptr; 180 } 181 182 183 /* 184 * Free resources / clean up outstanding I/Os 185 * associated with a LPFC_NODELIST entry. This 186 * routine effectively results in a "software abort". 187 */ 188 int 189 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 190 { 191 LIST_HEAD(completions); 192 struct lpfc_sli *psli = &phba->sli; 193 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 194 struct lpfc_iocbq *iocb, *next_iocb; 195 IOCB_t *cmd; 196 197 /* Abort outstanding I/O on NPort <nlp_DID> */ 198 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 199 "0205 Abort outstanding I/O on NPort x%x " 200 "Data: x%x x%x x%x\n", 201 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 202 ndlp->nlp_rpi); 203 204 lpfc_fabric_abort_nport(ndlp); 205 206 /* First check the txq */ 207 spin_lock_irq(&phba->hbalock); 208 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 209 /* Check to see if iocb matches the nport we are looking for */ 210 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 211 /* It matches, so deque and call compl with anp error */ 212 list_move_tail(&iocb->list, &completions); 213 pring->txq_cnt--; 214 } 215 } 216 217 /* Next check the txcmplq */ 218 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 219 /* Check to see if iocb matches the nport we are looking for */ 220 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 221 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 222 } 223 } 224 spin_unlock_irq(&phba->hbalock); 225 226 while (!list_empty(&completions)) { 227 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 228 cmd = &iocb->iocb; 229 list_del_init(&iocb->list); 230 231 if (!iocb->iocb_cmpl) 232 lpfc_sli_release_iocbq(phba, iocb); 233 else { 234 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 235 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 236 (iocb->iocb_cmpl) (phba, iocb, iocb); 237 } 238 } 239 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 240 return 0; 241 } 242 243 static int 244 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 245 struct lpfc_iocbq *cmdiocb) 246 { 247 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 248 struct lpfc_hba *phba = vport->phba; 249 struct lpfc_dmabuf *pcmd; 250 uint32_t *lp; 251 IOCB_t *icmd; 252 struct serv_parm *sp; 253 LPFC_MBOXQ_t *mbox; 254 struct ls_rjt stat; 255 int rc; 256 257 memset(&stat, 0, sizeof (struct ls_rjt)); 258 if (vport->port_state <= LPFC_FLOGI) { 259 /* Before responding to PLOGI, check for pt2pt mode. 260 * If we are pt2pt, with an outstanding FLOGI, abort 261 * the FLOGI and resend it first. 262 */ 263 if (vport->fc_flag & FC_PT2PT) { 264 lpfc_els_abort_flogi(phba); 265 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 266 /* If the other side is supposed to initiate 267 * the PLOGI anyway, just ACC it now and 268 * move on with discovery. 269 */ 270 phba->fc_edtov = FF_DEF_EDTOV; 271 phba->fc_ratov = FF_DEF_RATOV; 272 /* Start discovery - this should just do 273 CLEAR_LA */ 274 lpfc_disc_start(vport); 275 } else 276 lpfc_initial_flogi(vport); 277 } else { 278 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 279 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 280 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 281 ndlp, NULL); 282 return 0; 283 } 284 } 285 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 286 lp = (uint32_t *) pcmd->virt; 287 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 288 if (wwn_to_u64(sp->portName.u.wwn) == 0) { 289 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 290 "0140 PLOGI Reject: invalid nname\n"); 291 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 292 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; 293 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 294 NULL); 295 return 0; 296 } 297 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { 298 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 299 "0141 PLOGI Reject: invalid pname\n"); 300 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 301 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; 302 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 303 NULL); 304 return 0; 305 } 306 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 307 /* Reject this request because invalid parameters */ 308 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 309 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 310 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 311 NULL); 312 return 0; 313 } 314 icmd = &cmdiocb->iocb; 315 316 /* PLOGI chkparm OK */ 317 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 318 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 319 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 320 ndlp->nlp_rpi); 321 322 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 323 ndlp->nlp_fcp_info |= CLASS2; 324 else 325 ndlp->nlp_fcp_info |= CLASS3; 326 327 ndlp->nlp_class_sup = 0; 328 if (sp->cls1.classValid) 329 ndlp->nlp_class_sup |= FC_COS_CLASS1; 330 if (sp->cls2.classValid) 331 ndlp->nlp_class_sup |= FC_COS_CLASS2; 332 if (sp->cls3.classValid) 333 ndlp->nlp_class_sup |= FC_COS_CLASS3; 334 if (sp->cls4.classValid) 335 ndlp->nlp_class_sup |= FC_COS_CLASS4; 336 ndlp->nlp_maxframe = 337 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 338 339 /* no need to reg_login if we are already in one of these states */ 340 switch (ndlp->nlp_state) { 341 case NLP_STE_NPR_NODE: 342 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 343 break; 344 case NLP_STE_REG_LOGIN_ISSUE: 345 case NLP_STE_PRLI_ISSUE: 346 case NLP_STE_UNMAPPED_NODE: 347 case NLP_STE_MAPPED_NODE: 348 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 349 return 1; 350 } 351 352 if ((vport->fc_flag & FC_PT2PT) && 353 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 354 /* rcv'ed PLOGI decides what our NPortId will be */ 355 vport->fc_myDID = icmd->un.rcvels.parmRo; 356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 357 if (mbox == NULL) 358 goto out; 359 lpfc_config_link(phba, mbox); 360 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 361 mbox->vport = vport; 362 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 363 if (rc == MBX_NOT_FINISHED) { 364 mempool_free(mbox, phba->mbox_mem_pool); 365 goto out; 366 } 367 368 lpfc_can_disctmo(vport); 369 } 370 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 371 if (!mbox) 372 goto out; 373 374 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 375 (uint8_t *) sp, mbox, 0); 376 if (rc) { 377 mempool_free(mbox, phba->mbox_mem_pool); 378 goto out; 379 } 380 381 /* ACC PLOGI rsp command needs to execute first, 382 * queue this mbox command to be processed later. 383 */ 384 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 385 /* 386 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 387 * command issued in lpfc_cmpl_els_acc(). 388 */ 389 mbox->vport = vport; 390 spin_lock_irq(shost->host_lock); 391 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 392 spin_unlock_irq(shost->host_lock); 393 394 /* 395 * If there is an outstanding PLOGI issued, abort it before 396 * sending ACC rsp for received PLOGI. If pending plogi 397 * is not canceled here, the plogi will be rejected by 398 * remote port and will be retried. On a configuration with 399 * single discovery thread, this will cause a huge delay in 400 * discovery. Also this will cause multiple state machines 401 * running in parallel for this node. 402 */ 403 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 404 /* software abort outstanding PLOGI */ 405 lpfc_els_abort(phba, ndlp); 406 } 407 408 if ((vport->port_type == LPFC_NPIV_PORT && 409 vport->cfg_restrict_login)) { 410 411 /* In order to preserve RPIs, we want to cleanup 412 * the default RPI the firmware created to rcv 413 * this ELS request. The only way to do this is 414 * to register, then unregister the RPI. 415 */ 416 spin_lock_irq(shost->host_lock); 417 ndlp->nlp_flag |= NLP_RM_DFLT_RPI; 418 spin_unlock_irq(shost->host_lock); 419 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 420 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 421 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 422 ndlp, mbox); 423 return 1; 424 } 425 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 426 return 1; 427 out: 428 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 429 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 430 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 431 return 0; 432 } 433 434 static int 435 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 436 struct lpfc_iocbq *cmdiocb) 437 { 438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 439 struct lpfc_dmabuf *pcmd; 440 struct serv_parm *sp; 441 struct lpfc_name *pnn, *ppn; 442 struct ls_rjt stat; 443 ADISC *ap; 444 IOCB_t *icmd; 445 uint32_t *lp; 446 uint32_t cmd; 447 448 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 449 lp = (uint32_t *) pcmd->virt; 450 451 cmd = *lp++; 452 if (cmd == ELS_CMD_ADISC) { 453 ap = (ADISC *) lp; 454 pnn = (struct lpfc_name *) & ap->nodeName; 455 ppn = (struct lpfc_name *) & ap->portName; 456 } else { 457 sp = (struct serv_parm *) lp; 458 pnn = (struct lpfc_name *) & sp->nodeName; 459 ppn = (struct lpfc_name *) & sp->portName; 460 } 461 462 icmd = &cmdiocb->iocb; 463 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 464 if (cmd == ELS_CMD_ADISC) { 465 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 466 } else { 467 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 468 NULL); 469 } 470 return 1; 471 } 472 /* Reject this request because invalid parameters */ 473 stat.un.b.lsRjtRsvd0 = 0; 474 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 475 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 476 stat.un.b.vendorUnique = 0; 477 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 478 479 /* 1 sec timeout */ 480 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 481 482 spin_lock_irq(shost->host_lock); 483 ndlp->nlp_flag |= NLP_DELAY_TMO; 484 spin_unlock_irq(shost->host_lock); 485 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 486 ndlp->nlp_prev_state = ndlp->nlp_state; 487 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 488 return 0; 489 } 490 491 static int 492 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 493 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 494 { 495 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 496 497 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 498 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 499 * PLOGIs during LOGO storms from a device. 500 */ 501 spin_lock_irq(shost->host_lock); 502 ndlp->nlp_flag |= NLP_LOGO_ACC; 503 spin_unlock_irq(shost->host_lock); 504 if (els_cmd == ELS_CMD_PRLO) 505 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 506 else 507 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 508 509 if ((!(ndlp->nlp_type & NLP_FABRIC) && 510 ((ndlp->nlp_type & NLP_FCP_TARGET) || 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 513 /* Only try to re-login if this is NOT a Fabric Node */ 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 515 spin_lock_irq(shost->host_lock); 516 ndlp->nlp_flag |= NLP_DELAY_TMO; 517 spin_unlock_irq(shost->host_lock); 518 519 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 520 } 521 ndlp->nlp_prev_state = ndlp->nlp_state; 522 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 523 524 spin_lock_irq(shost->host_lock); 525 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 526 spin_unlock_irq(shost->host_lock); 527 /* The driver has to wait until the ACC completes before it continues 528 * processing the LOGO. The action will resume in 529 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 530 * unreg_login, the driver waits so the ACC does not get aborted. 531 */ 532 return 0; 533 } 534 535 static void 536 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 537 struct lpfc_iocbq *cmdiocb) 538 { 539 struct lpfc_dmabuf *pcmd; 540 uint32_t *lp; 541 PRLI *npr; 542 struct fc_rport *rport = ndlp->rport; 543 u32 roles; 544 545 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 546 lp = (uint32_t *) pcmd->virt; 547 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 548 549 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 550 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 551 if (npr->prliType == PRLI_FCP_TYPE) { 552 if (npr->initiatorFunc) 553 ndlp->nlp_type |= NLP_FCP_INITIATOR; 554 if (npr->targetFunc) 555 ndlp->nlp_type |= NLP_FCP_TARGET; 556 if (npr->Retry) 557 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 558 } 559 if (rport) { 560 /* We need to update the rport role values */ 561 roles = FC_RPORT_ROLE_UNKNOWN; 562 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 563 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 564 if (ndlp->nlp_type & NLP_FCP_TARGET) 565 roles |= FC_RPORT_ROLE_FCP_TARGET; 566 567 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 568 "rport rolechg: role:x%x did:x%x flg:x%x", 569 roles, ndlp->nlp_DID, ndlp->nlp_flag); 570 571 fc_remote_port_rolechg(rport, roles); 572 } 573 } 574 575 static uint32_t 576 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 577 { 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 579 580 if (!ndlp->nlp_rpi) { 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 582 return 0; 583 } 584 585 if (!(vport->fc_flag & FC_PT2PT)) { 586 /* Check config parameter use-adisc or FCP-2 */ 587 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 588 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 589 spin_lock_irq(shost->host_lock); 590 ndlp->nlp_flag |= NLP_NPR_ADISC; 591 spin_unlock_irq(shost->host_lock); 592 return 1; 593 } 594 } 595 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 596 lpfc_unreg_rpi(vport, ndlp); 597 return 0; 598 } 599 600 static uint32_t 601 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 602 void *arg, uint32_t evt) 603 { 604 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 605 "0271 Illegal State Transition: node x%x " 606 "event x%x, state x%x Data: x%x x%x\n", 607 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 608 ndlp->nlp_flag); 609 return ndlp->nlp_state; 610 } 611 612 static uint32_t 613 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 614 void *arg, uint32_t evt) 615 { 616 /* This transition is only legal if we previously 617 * rcv'ed a PLOGI. Since we don't want 2 discovery threads 618 * working on the same NPortID, do nothing for this thread 619 * to stop it. 620 */ 621 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 622 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 623 "0272 Illegal State Transition: node x%x " 624 "event x%x, state x%x Data: x%x x%x\n", 625 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 626 ndlp->nlp_flag); 627 } 628 return ndlp->nlp_state; 629 } 630 631 /* Start of Discovery State Machine routines */ 632 633 static uint32_t 634 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 635 void *arg, uint32_t evt) 636 { 637 struct lpfc_iocbq *cmdiocb; 638 639 cmdiocb = (struct lpfc_iocbq *) arg; 640 641 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 642 return ndlp->nlp_state; 643 } 644 return NLP_STE_FREED_NODE; 645 } 646 647 static uint32_t 648 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 649 void *arg, uint32_t evt) 650 { 651 lpfc_issue_els_logo(vport, ndlp, 0); 652 return ndlp->nlp_state; 653 } 654 655 static uint32_t 656 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 657 void *arg, uint32_t evt) 658 { 659 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 660 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 661 662 spin_lock_irq(shost->host_lock); 663 ndlp->nlp_flag |= NLP_LOGO_ACC; 664 spin_unlock_irq(shost->host_lock); 665 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 666 667 return ndlp->nlp_state; 668 } 669 670 static uint32_t 671 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 672 void *arg, uint32_t evt) 673 { 674 return NLP_STE_FREED_NODE; 675 } 676 677 static uint32_t 678 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 679 void *arg, uint32_t evt) 680 { 681 return NLP_STE_FREED_NODE; 682 } 683 684 static uint32_t 685 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 686 void *arg, uint32_t evt) 687 { 688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 689 struct lpfc_hba *phba = vport->phba; 690 struct lpfc_iocbq *cmdiocb = arg; 691 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 692 uint32_t *lp = (uint32_t *) pcmd->virt; 693 struct serv_parm *sp = (struct serv_parm *) (lp + 1); 694 struct ls_rjt stat; 695 int port_cmp; 696 697 memset(&stat, 0, sizeof (struct ls_rjt)); 698 699 /* For a PLOGI, we only accept if our portname is less 700 * than the remote portname. 701 */ 702 phba->fc_stat.elsLogiCol++; 703 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 704 sizeof(struct lpfc_name)); 705 706 if (port_cmp >= 0) { 707 /* Reject this request because the remote node will accept 708 ours */ 709 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 710 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 711 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 712 NULL); 713 } else { 714 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && 715 (ndlp->nlp_flag & NLP_NPR_2B_DISC) && 716 (vport->num_disc_nodes)) { 717 spin_lock_irq(shost->host_lock); 718 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 719 spin_unlock_irq(shost->host_lock); 720 /* Check if there are more PLOGIs to be sent */ 721 lpfc_more_plogi(vport); 722 if (vport->num_disc_nodes == 0) { 723 spin_lock_irq(shost->host_lock); 724 vport->fc_flag &= ~FC_NDISC_ACTIVE; 725 spin_unlock_irq(shost->host_lock); 726 lpfc_can_disctmo(vport); 727 lpfc_end_rscn(vport); 728 } 729 } 730 } /* If our portname was less */ 731 732 return ndlp->nlp_state; 733 } 734 735 static uint32_t 736 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 737 void *arg, uint32_t evt) 738 { 739 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 740 struct ls_rjt stat; 741 742 memset(&stat, 0, sizeof (struct ls_rjt)); 743 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 744 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 745 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 746 return ndlp->nlp_state; 747 } 748 749 static uint32_t 750 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 751 void *arg, uint32_t evt) 752 { 753 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 754 755 /* software abort outstanding PLOGI */ 756 lpfc_els_abort(vport->phba, ndlp); 757 758 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 759 return ndlp->nlp_state; 760 } 761 762 static uint32_t 763 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 764 void *arg, uint32_t evt) 765 { 766 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 767 struct lpfc_hba *phba = vport->phba; 768 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 769 770 /* software abort outstanding PLOGI */ 771 lpfc_els_abort(phba, ndlp); 772 773 if (evt == NLP_EVT_RCV_LOGO) { 774 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 775 } else { 776 lpfc_issue_els_logo(vport, ndlp, 0); 777 } 778 779 /* Put ndlp in npr state set plogi timer for 1 sec */ 780 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 781 spin_lock_irq(shost->host_lock); 782 ndlp->nlp_flag |= NLP_DELAY_TMO; 783 spin_unlock_irq(shost->host_lock); 784 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 785 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 786 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 787 788 return ndlp->nlp_state; 789 } 790 791 static uint32_t 792 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, 793 struct lpfc_nodelist *ndlp, 794 void *arg, 795 uint32_t evt) 796 { 797 struct lpfc_hba *phba = vport->phba; 798 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 799 struct lpfc_iocbq *cmdiocb, *rspiocb; 800 struct lpfc_dmabuf *pcmd, *prsp, *mp; 801 uint32_t *lp; 802 IOCB_t *irsp; 803 struct serv_parm *sp; 804 LPFC_MBOXQ_t *mbox; 805 806 cmdiocb = (struct lpfc_iocbq *) arg; 807 rspiocb = cmdiocb->context_un.rsp_iocb; 808 809 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 810 /* Recovery from PLOGI collision logic */ 811 return ndlp->nlp_state; 812 } 813 814 irsp = &rspiocb->iocb; 815 816 if (irsp->ulpStatus) 817 goto out; 818 819 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 820 821 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 822 823 lp = (uint32_t *) prsp->virt; 824 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 825 826 /* Some switches have FDMI servers returning 0 for WWN */ 827 if ((ndlp->nlp_DID != FDMI_DID) && 828 (wwn_to_u64(sp->portName.u.wwn) == 0 || 829 wwn_to_u64(sp->nodeName.u.wwn) == 0)) { 830 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 831 "0142 PLOGI RSP: Invalid WWN.\n"); 832 goto out; 833 } 834 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 835 goto out; 836 /* PLOGI chkparm OK */ 837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 838 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 839 ndlp->nlp_DID, ndlp->nlp_state, 840 ndlp->nlp_flag, ndlp->nlp_rpi); 841 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) 842 ndlp->nlp_fcp_info |= CLASS2; 843 else 844 ndlp->nlp_fcp_info |= CLASS3; 845 846 ndlp->nlp_class_sup = 0; 847 if (sp->cls1.classValid) 848 ndlp->nlp_class_sup |= FC_COS_CLASS1; 849 if (sp->cls2.classValid) 850 ndlp->nlp_class_sup |= FC_COS_CLASS2; 851 if (sp->cls3.classValid) 852 ndlp->nlp_class_sup |= FC_COS_CLASS3; 853 if (sp->cls4.classValid) 854 ndlp->nlp_class_sup |= FC_COS_CLASS4; 855 ndlp->nlp_maxframe = 856 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 857 858 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 859 if (!mbox) { 860 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 861 "0133 PLOGI: no memory for reg_login " 862 "Data: x%x x%x x%x x%x\n", 863 ndlp->nlp_DID, ndlp->nlp_state, 864 ndlp->nlp_flag, ndlp->nlp_rpi); 865 goto out; 866 } 867 868 lpfc_unreg_rpi(vport, ndlp); 869 870 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 871 (uint8_t *) sp, mbox, 0) == 0) { 872 switch (ndlp->nlp_DID) { 873 case NameServer_DID: 874 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 875 break; 876 case FDMI_DID: 877 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 878 break; 879 default: 880 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 881 } 882 mbox->context2 = lpfc_nlp_get(ndlp); 883 mbox->vport = vport; 884 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 885 != MBX_NOT_FINISHED) { 886 lpfc_nlp_set_state(vport, ndlp, 887 NLP_STE_REG_LOGIN_ISSUE); 888 return ndlp->nlp_state; 889 } 890 /* decrement node reference count to the failed mbox 891 * command 892 */ 893 lpfc_nlp_put(ndlp); 894 mp = (struct lpfc_dmabuf *) mbox->context1; 895 lpfc_mbuf_free(phba, mp->virt, mp->phys); 896 kfree(mp); 897 mempool_free(mbox, phba->mbox_mem_pool); 898 899 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 900 "0134 PLOGI: cannot issue reg_login " 901 "Data: x%x x%x x%x x%x\n", 902 ndlp->nlp_DID, ndlp->nlp_state, 903 ndlp->nlp_flag, ndlp->nlp_rpi); 904 } else { 905 mempool_free(mbox, phba->mbox_mem_pool); 906 907 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 908 "0135 PLOGI: cannot format reg_login " 909 "Data: x%x x%x x%x x%x\n", 910 ndlp->nlp_DID, ndlp->nlp_state, 911 ndlp->nlp_flag, ndlp->nlp_rpi); 912 } 913 914 915 out: 916 if (ndlp->nlp_DID == NameServer_DID) { 917 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 918 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 919 "0261 Cannot Register NameServer login\n"); 920 } 921 922 spin_lock_irq(shost->host_lock); 923 ndlp->nlp_flag |= NLP_DEFER_RM; 924 spin_unlock_irq(shost->host_lock); 925 return NLP_STE_FREED_NODE; 926 } 927 928 static uint32_t 929 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 930 void *arg, uint32_t evt) 931 { 932 return ndlp->nlp_state; 933 } 934 935 static uint32_t 936 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, 937 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) 938 { 939 return ndlp->nlp_state; 940 } 941 942 static uint32_t 943 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 944 void *arg, uint32_t evt) 945 { 946 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 947 948 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 949 spin_lock_irq(shost->host_lock); 950 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 951 spin_unlock_irq(shost->host_lock); 952 return ndlp->nlp_state; 953 } else { 954 /* software abort outstanding PLOGI */ 955 lpfc_els_abort(vport->phba, ndlp); 956 957 lpfc_drop_node(vport, ndlp); 958 return NLP_STE_FREED_NODE; 959 } 960 } 961 962 static uint32_t 963 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, 964 struct lpfc_nodelist *ndlp, 965 void *arg, 966 uint32_t evt) 967 { 968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 969 struct lpfc_hba *phba = vport->phba; 970 971 /* Don't do anything that will mess up processing of the 972 * previous RSCN. 973 */ 974 if (vport->fc_flag & FC_RSCN_DEFERRED) 975 return ndlp->nlp_state; 976 977 /* software abort outstanding PLOGI */ 978 lpfc_els_abort(phba, ndlp); 979 980 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 981 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 982 spin_lock_irq(shost->host_lock); 983 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 984 spin_unlock_irq(shost->host_lock); 985 986 return ndlp->nlp_state; 987 } 988 989 static uint32_t 990 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 991 void *arg, uint32_t evt) 992 { 993 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 994 struct lpfc_hba *phba = vport->phba; 995 struct lpfc_iocbq *cmdiocb; 996 997 /* software abort outstanding ADISC */ 998 lpfc_els_abort(phba, ndlp); 999 1000 cmdiocb = (struct lpfc_iocbq *) arg; 1001 1002 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1003 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1004 spin_lock_irq(shost->host_lock); 1005 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1006 spin_unlock_irq(shost->host_lock); 1007 if (vport->num_disc_nodes) 1008 lpfc_more_adisc(vport); 1009 } 1010 return ndlp->nlp_state; 1011 } 1012 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1013 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1014 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1015 1016 return ndlp->nlp_state; 1017 } 1018 1019 static uint32_t 1020 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1021 void *arg, uint32_t evt) 1022 { 1023 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1024 1025 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1026 return ndlp->nlp_state; 1027 } 1028 1029 static uint32_t 1030 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1031 void *arg, uint32_t evt) 1032 { 1033 struct lpfc_hba *phba = vport->phba; 1034 struct lpfc_iocbq *cmdiocb; 1035 1036 cmdiocb = (struct lpfc_iocbq *) arg; 1037 1038 /* software abort outstanding ADISC */ 1039 lpfc_els_abort(phba, ndlp); 1040 1041 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1042 return ndlp->nlp_state; 1043 } 1044 1045 static uint32_t 1046 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, 1047 struct lpfc_nodelist *ndlp, 1048 void *arg, uint32_t evt) 1049 { 1050 struct lpfc_iocbq *cmdiocb; 1051 1052 cmdiocb = (struct lpfc_iocbq *) arg; 1053 1054 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1055 return ndlp->nlp_state; 1056 } 1057 1058 static uint32_t 1059 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1060 void *arg, uint32_t evt) 1061 { 1062 struct lpfc_iocbq *cmdiocb; 1063 1064 cmdiocb = (struct lpfc_iocbq *) arg; 1065 1066 /* Treat like rcv logo */ 1067 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1068 return ndlp->nlp_state; 1069 } 1070 1071 static uint32_t 1072 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, 1073 struct lpfc_nodelist *ndlp, 1074 void *arg, uint32_t evt) 1075 { 1076 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1077 struct lpfc_hba *phba = vport->phba; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb; 1079 IOCB_t *irsp; 1080 ADISC *ap; 1081 1082 cmdiocb = (struct lpfc_iocbq *) arg; 1083 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 1085 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1086 irsp = &rspiocb->iocb; 1087 1088 if ((irsp->ulpStatus) || 1089 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1090 /* 1 sec timeout */ 1091 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1092 spin_lock_irq(shost->host_lock); 1093 ndlp->nlp_flag |= NLP_DELAY_TMO; 1094 spin_unlock_irq(shost->host_lock); 1095 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1096 1097 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); 1098 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); 1099 1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1102 lpfc_unreg_rpi(vport, ndlp); 1103 return ndlp->nlp_state; 1104 } 1105 1106 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1107 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1108 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1109 } else { 1110 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1111 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1112 } 1113 return ndlp->nlp_state; 1114 } 1115 1116 static uint32_t 1117 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1118 void *arg, uint32_t evt) 1119 { 1120 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1121 1122 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1123 spin_lock_irq(shost->host_lock); 1124 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1125 spin_unlock_irq(shost->host_lock); 1126 return ndlp->nlp_state; 1127 } else { 1128 /* software abort outstanding ADISC */ 1129 lpfc_els_abort(vport->phba, ndlp); 1130 1131 lpfc_drop_node(vport, ndlp); 1132 return NLP_STE_FREED_NODE; 1133 } 1134 } 1135 1136 static uint32_t 1137 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, 1138 struct lpfc_nodelist *ndlp, 1139 void *arg, 1140 uint32_t evt) 1141 { 1142 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1143 struct lpfc_hba *phba = vport->phba; 1144 1145 /* Don't do anything that will mess up processing of the 1146 * previous RSCN. 1147 */ 1148 if (vport->fc_flag & FC_RSCN_DEFERRED) 1149 return ndlp->nlp_state; 1150 1151 /* software abort outstanding ADISC */ 1152 lpfc_els_abort(phba, ndlp); 1153 1154 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1155 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1156 spin_lock_irq(shost->host_lock); 1157 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1158 spin_unlock_irq(shost->host_lock); 1159 lpfc_disc_set_adisc(vport, ndlp); 1160 return ndlp->nlp_state; 1161 } 1162 1163 static uint32_t 1164 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, 1165 struct lpfc_nodelist *ndlp, 1166 void *arg, 1167 uint32_t evt) 1168 { 1169 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1170 1171 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1172 return ndlp->nlp_state; 1173 } 1174 1175 static uint32_t 1176 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, 1177 struct lpfc_nodelist *ndlp, 1178 void *arg, 1179 uint32_t evt) 1180 { 1181 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1182 1183 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1184 return ndlp->nlp_state; 1185 } 1186 1187 static uint32_t 1188 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, 1189 struct lpfc_nodelist *ndlp, 1190 void *arg, 1191 uint32_t evt) 1192 { 1193 struct lpfc_hba *phba = vport->phba; 1194 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1195 LPFC_MBOXQ_t *mb; 1196 LPFC_MBOXQ_t *nextmb; 1197 struct lpfc_dmabuf *mp; 1198 1199 cmdiocb = (struct lpfc_iocbq *) arg; 1200 1201 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1202 if ((mb = phba->sli.mbox_active)) { 1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1205 lpfc_nlp_put(ndlp); 1206 mb->context2 = NULL; 1207 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1208 } 1209 } 1210 1211 spin_lock_irq(&phba->hbalock); 1212 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1213 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 mp = (struct lpfc_dmabuf *) (mb->context1); 1216 if (mp) { 1217 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1218 kfree(mp); 1219 } 1220 lpfc_nlp_put(ndlp); 1221 list_del(&mb->list); 1222 mempool_free(mb, phba->mbox_mem_pool); 1223 } 1224 } 1225 spin_unlock_irq(&phba->hbalock); 1226 1227 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1228 return ndlp->nlp_state; 1229 } 1230 1231 static uint32_t 1232 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, 1233 struct lpfc_nodelist *ndlp, 1234 void *arg, 1235 uint32_t evt) 1236 { 1237 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1238 1239 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1240 return ndlp->nlp_state; 1241 } 1242 1243 static uint32_t 1244 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, 1245 struct lpfc_nodelist *ndlp, 1246 void *arg, 1247 uint32_t evt) 1248 { 1249 struct lpfc_iocbq *cmdiocb; 1250 1251 cmdiocb = (struct lpfc_iocbq *) arg; 1252 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1253 return ndlp->nlp_state; 1254 } 1255 1256 static uint32_t 1257 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, 1258 struct lpfc_nodelist *ndlp, 1259 void *arg, 1260 uint32_t evt) 1261 { 1262 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1263 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1264 MAILBOX_t *mb = &pmb->mb; 1265 uint32_t did = mb->un.varWords[1]; 1266 1267 if (mb->mbxStatus) { 1268 /* RegLogin failed */ 1269 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1270 "0246 RegLogin failed Data: x%x x%x x%x\n", 1271 did, mb->mbxStatus, vport->port_state); 1272 /* 1273 * If RegLogin failed due to lack of HBA resources do not 1274 * retry discovery. 1275 */ 1276 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1277 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1278 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1279 return ndlp->nlp_state; 1280 } 1281 1282 /* Put ndlp in npr state set plogi timer for 1 sec */ 1283 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1284 spin_lock_irq(shost->host_lock); 1285 ndlp->nlp_flag |= NLP_DELAY_TMO; 1286 spin_unlock_irq(shost->host_lock); 1287 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1288 1289 lpfc_issue_els_logo(vport, ndlp, 0); 1290 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1291 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1292 return ndlp->nlp_state; 1293 } 1294 1295 ndlp->nlp_rpi = mb->un.varWords[0]; 1296 1297 /* Only if we are not a fabric nport do we issue PRLI */ 1298 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1299 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1300 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1301 lpfc_issue_els_prli(vport, ndlp, 0); 1302 } else { 1303 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1304 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1305 } 1306 return ndlp->nlp_state; 1307 } 1308 1309 static uint32_t 1310 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, 1311 struct lpfc_nodelist *ndlp, 1312 void *arg, 1313 uint32_t evt) 1314 { 1315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1316 1317 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1318 spin_lock_irq(shost->host_lock); 1319 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1320 spin_unlock_irq(shost->host_lock); 1321 return ndlp->nlp_state; 1322 } else { 1323 lpfc_drop_node(vport, ndlp); 1324 return NLP_STE_FREED_NODE; 1325 } 1326 } 1327 1328 static uint32_t 1329 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, 1330 struct lpfc_nodelist *ndlp, 1331 void *arg, 1332 uint32_t evt) 1333 { 1334 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1335 1336 /* Don't do anything that will mess up processing of the 1337 * previous RSCN. 1338 */ 1339 if (vport->fc_flag & FC_RSCN_DEFERRED) 1340 return ndlp->nlp_state; 1341 1342 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1343 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1344 spin_lock_irq(shost->host_lock); 1345 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1346 spin_unlock_irq(shost->host_lock); 1347 lpfc_disc_set_adisc(vport, ndlp); 1348 return ndlp->nlp_state; 1349 } 1350 1351 static uint32_t 1352 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1353 void *arg, uint32_t evt) 1354 { 1355 struct lpfc_iocbq *cmdiocb; 1356 1357 cmdiocb = (struct lpfc_iocbq *) arg; 1358 1359 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1360 return ndlp->nlp_state; 1361 } 1362 1363 static uint32_t 1364 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1365 void *arg, uint32_t evt) 1366 { 1367 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1368 1369 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1370 return ndlp->nlp_state; 1371 } 1372 1373 static uint32_t 1374 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1375 void *arg, uint32_t evt) 1376 { 1377 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1378 1379 /* Software abort outstanding PRLI before sending acc */ 1380 lpfc_els_abort(vport->phba, ndlp); 1381 1382 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1383 return ndlp->nlp_state; 1384 } 1385 1386 static uint32_t 1387 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1388 void *arg, uint32_t evt) 1389 { 1390 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1391 1392 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1393 return ndlp->nlp_state; 1394 } 1395 1396 /* This routine is envoked when we rcv a PRLO request from a nport 1397 * we are logged into. We should send back a PRLO rsp setting the 1398 * appropriate bits. 1399 * NEXT STATE = PRLI_ISSUE 1400 */ 1401 static uint32_t 1402 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1403 void *arg, uint32_t evt) 1404 { 1405 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1406 1407 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1408 return ndlp->nlp_state; 1409 } 1410 1411 static uint32_t 1412 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1413 void *arg, uint32_t evt) 1414 { 1415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1416 struct lpfc_iocbq *cmdiocb, *rspiocb; 1417 struct lpfc_hba *phba = vport->phba; 1418 IOCB_t *irsp; 1419 PRLI *npr; 1420 1421 cmdiocb = (struct lpfc_iocbq *) arg; 1422 rspiocb = cmdiocb->context_un.rsp_iocb; 1423 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1424 1425 irsp = &rspiocb->iocb; 1426 if (irsp->ulpStatus) { 1427 if ((vport->port_type == LPFC_NPIV_PORT) && 1428 vport->cfg_restrict_login) { 1429 goto out; 1430 } 1431 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1432 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1433 return ndlp->nlp_state; 1434 } 1435 1436 /* Check out PRLI rsp */ 1437 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1438 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1439 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1440 (npr->prliType == PRLI_FCP_TYPE)) { 1441 if (npr->initiatorFunc) 1442 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1443 if (npr->targetFunc) 1444 ndlp->nlp_type |= NLP_FCP_TARGET; 1445 if (npr->Retry) 1446 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1447 } 1448 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 1449 (vport->port_type == LPFC_NPIV_PORT) && 1450 vport->cfg_restrict_login) { 1451 out: 1452 spin_lock_irq(shost->host_lock); 1453 ndlp->nlp_flag |= NLP_TARGET_REMOVE; 1454 spin_unlock_irq(shost->host_lock); 1455 lpfc_issue_els_logo(vport, ndlp, 0); 1456 1457 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1458 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1459 return ndlp->nlp_state; 1460 } 1461 1462 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1463 if (ndlp->nlp_type & NLP_FCP_TARGET) 1464 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1465 else 1466 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1467 return ndlp->nlp_state; 1468 } 1469 1470 /*! lpfc_device_rm_prli_issue 1471 * 1472 * \pre 1473 * \post 1474 * \param phba 1475 * \param ndlp 1476 * \param arg 1477 * \param evt 1478 * \return uint32_t 1479 * 1480 * \b Description: 1481 * This routine is envoked when we a request to remove a nport we are in the 1482 * process of PRLIing. We should software abort outstanding prli, unreg 1483 * login, send a logout. We will change node state to UNUSED_NODE, put it 1484 * on plogi list so it can be freed when LOGO completes. 1485 * 1486 */ 1487 1488 static uint32_t 1489 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1490 void *arg, uint32_t evt) 1491 { 1492 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1493 1494 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1495 spin_lock_irq(shost->host_lock); 1496 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1497 spin_unlock_irq(shost->host_lock); 1498 return ndlp->nlp_state; 1499 } else { 1500 /* software abort outstanding PLOGI */ 1501 lpfc_els_abort(vport->phba, ndlp); 1502 1503 lpfc_drop_node(vport, ndlp); 1504 return NLP_STE_FREED_NODE; 1505 } 1506 } 1507 1508 1509 /*! lpfc_device_recov_prli_issue 1510 * 1511 * \pre 1512 * \post 1513 * \param phba 1514 * \param ndlp 1515 * \param arg 1516 * \param evt 1517 * \return uint32_t 1518 * 1519 * \b Description: 1520 * The routine is envoked when the state of a device is unknown, like 1521 * during a link down. We should remove the nodelist entry from the 1522 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1523 * outstanding PRLI command, then free the node entry. 1524 */ 1525 static uint32_t 1526 lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 1527 struct lpfc_nodelist *ndlp, 1528 void *arg, 1529 uint32_t evt) 1530 { 1531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1532 struct lpfc_hba *phba = vport->phba; 1533 1534 /* Don't do anything that will mess up processing of the 1535 * previous RSCN. 1536 */ 1537 if (vport->fc_flag & FC_RSCN_DEFERRED) 1538 return ndlp->nlp_state; 1539 1540 /* software abort outstanding PRLI */ 1541 lpfc_els_abort(phba, ndlp); 1542 1543 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1544 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1545 spin_lock_irq(shost->host_lock); 1546 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1547 spin_unlock_irq(shost->host_lock); 1548 lpfc_disc_set_adisc(vport, ndlp); 1549 return ndlp->nlp_state; 1550 } 1551 1552 static uint32_t 1553 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1554 void *arg, uint32_t evt) 1555 { 1556 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1557 1558 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1559 return ndlp->nlp_state; 1560 } 1561 1562 static uint32_t 1563 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1564 void *arg, uint32_t evt) 1565 { 1566 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1567 1568 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1569 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1570 return ndlp->nlp_state; 1571 } 1572 1573 static uint32_t 1574 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1575 void *arg, uint32_t evt) 1576 { 1577 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1578 1579 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1580 return ndlp->nlp_state; 1581 } 1582 1583 static uint32_t 1584 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1585 void *arg, uint32_t evt) 1586 { 1587 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1588 1589 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1590 return ndlp->nlp_state; 1591 } 1592 1593 static uint32_t 1594 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1595 void *arg, uint32_t evt) 1596 { 1597 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1598 1599 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1600 return ndlp->nlp_state; 1601 } 1602 1603 static uint32_t 1604 lpfc_device_recov_unmap_node(struct lpfc_vport *vport, 1605 struct lpfc_nodelist *ndlp, 1606 void *arg, 1607 uint32_t evt) 1608 { 1609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1610 1611 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1612 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1613 spin_lock_irq(shost->host_lock); 1614 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1615 spin_unlock_irq(shost->host_lock); 1616 lpfc_disc_set_adisc(vport, ndlp); 1617 1618 return ndlp->nlp_state; 1619 } 1620 1621 static uint32_t 1622 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1623 void *arg, uint32_t evt) 1624 { 1625 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1626 1627 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1628 return ndlp->nlp_state; 1629 } 1630 1631 static uint32_t 1632 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1633 void *arg, uint32_t evt) 1634 { 1635 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1636 1637 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1638 return ndlp->nlp_state; 1639 } 1640 1641 static uint32_t 1642 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1643 void *arg, uint32_t evt) 1644 { 1645 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1646 1647 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1648 return ndlp->nlp_state; 1649 } 1650 1651 static uint32_t 1652 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, 1653 struct lpfc_nodelist *ndlp, 1654 void *arg, uint32_t evt) 1655 { 1656 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1657 1658 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1659 return ndlp->nlp_state; 1660 } 1661 1662 static uint32_t 1663 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1664 void *arg, uint32_t evt) 1665 { 1666 struct lpfc_hba *phba = vport->phba; 1667 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1668 1669 /* flush the target */ 1670 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1671 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 1672 1673 /* Treat like rcv logo */ 1674 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1675 return ndlp->nlp_state; 1676 } 1677 1678 static uint32_t 1679 lpfc_device_recov_mapped_node(struct lpfc_vport *vport, 1680 struct lpfc_nodelist *ndlp, 1681 void *arg, 1682 uint32_t evt) 1683 { 1684 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1685 1686 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1687 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1688 spin_lock_irq(shost->host_lock); 1689 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1690 spin_unlock_irq(shost->host_lock); 1691 lpfc_disc_set_adisc(vport, ndlp); 1692 return ndlp->nlp_state; 1693 } 1694 1695 static uint32_t 1696 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1697 void *arg, uint32_t evt) 1698 { 1699 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1700 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1701 1702 /* Ignore PLOGI if we have an outstanding LOGO */ 1703 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) 1704 return ndlp->nlp_state; 1705 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1706 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1707 spin_lock_irq(shost->host_lock); 1708 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); 1709 spin_unlock_irq(shost->host_lock); 1710 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1711 /* send PLOGI immediately, move to PLOGI issue state */ 1712 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1713 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1714 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1715 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1716 } 1717 } 1718 return ndlp->nlp_state; 1719 } 1720 1721 static uint32_t 1722 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1723 void *arg, uint32_t evt) 1724 { 1725 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1726 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1727 struct ls_rjt stat; 1728 1729 memset(&stat, 0, sizeof (struct ls_rjt)); 1730 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1731 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1732 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 1733 1734 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1735 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1736 spin_lock_irq(shost->host_lock); 1737 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1738 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1739 spin_unlock_irq(shost->host_lock); 1740 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1741 lpfc_issue_els_adisc(vport, ndlp, 0); 1742 } else { 1743 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1744 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1745 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1746 } 1747 } 1748 return ndlp->nlp_state; 1749 } 1750 1751 static uint32_t 1752 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1753 void *arg, uint32_t evt) 1754 { 1755 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1756 1757 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1758 return ndlp->nlp_state; 1759 } 1760 1761 static uint32_t 1762 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1763 void *arg, uint32_t evt) 1764 { 1765 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1766 1767 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1768 /* 1769 * Do not start discovery if discovery is about to start 1770 * or discovery in progress for this node. Starting discovery 1771 * here will affect the counting of discovery threads. 1772 */ 1773 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1774 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1775 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1776 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1777 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1778 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1779 lpfc_issue_els_adisc(vport, ndlp, 0); 1780 } else { 1781 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1782 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1783 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1784 } 1785 } 1786 return ndlp->nlp_state; 1787 } 1788 1789 static uint32_t 1790 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1791 void *arg, uint32_t evt) 1792 { 1793 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1794 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1795 1796 spin_lock_irq(shost->host_lock); 1797 ndlp->nlp_flag |= NLP_LOGO_ACC; 1798 spin_unlock_irq(shost->host_lock); 1799 1800 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1801 1802 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 1803 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1804 spin_lock_irq(shost->host_lock); 1805 ndlp->nlp_flag |= NLP_DELAY_TMO; 1806 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1807 spin_unlock_irq(shost->host_lock); 1808 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1809 } else { 1810 spin_lock_irq(shost->host_lock); 1811 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1812 spin_unlock_irq(shost->host_lock); 1813 } 1814 return ndlp->nlp_state; 1815 } 1816 1817 static uint32_t 1818 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1819 void *arg, uint32_t evt) 1820 { 1821 struct lpfc_iocbq *cmdiocb, *rspiocb; 1822 IOCB_t *irsp; 1823 1824 cmdiocb = (struct lpfc_iocbq *) arg; 1825 rspiocb = cmdiocb->context_un.rsp_iocb; 1826 1827 irsp = &rspiocb->iocb; 1828 if (irsp->ulpStatus) { 1829 ndlp->nlp_flag |= NLP_DEFER_RM; 1830 return NLP_STE_FREED_NODE; 1831 } 1832 return ndlp->nlp_state; 1833 } 1834 1835 static uint32_t 1836 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1837 void *arg, uint32_t evt) 1838 { 1839 struct lpfc_iocbq *cmdiocb, *rspiocb; 1840 IOCB_t *irsp; 1841 1842 cmdiocb = (struct lpfc_iocbq *) arg; 1843 rspiocb = cmdiocb->context_un.rsp_iocb; 1844 1845 irsp = &rspiocb->iocb; 1846 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1847 lpfc_drop_node(vport, ndlp); 1848 return NLP_STE_FREED_NODE; 1849 } 1850 return ndlp->nlp_state; 1851 } 1852 1853 static uint32_t 1854 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1855 void *arg, uint32_t evt) 1856 { 1857 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1858 if (ndlp->nlp_DID == Fabric_DID) { 1859 spin_lock_irq(shost->host_lock); 1860 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1861 spin_unlock_irq(shost->host_lock); 1862 } 1863 lpfc_unreg_rpi(vport, ndlp); 1864 return ndlp->nlp_state; 1865 } 1866 1867 static uint32_t 1868 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1869 void *arg, uint32_t evt) 1870 { 1871 struct lpfc_iocbq *cmdiocb, *rspiocb; 1872 IOCB_t *irsp; 1873 1874 cmdiocb = (struct lpfc_iocbq *) arg; 1875 rspiocb = cmdiocb->context_un.rsp_iocb; 1876 1877 irsp = &rspiocb->iocb; 1878 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1879 lpfc_drop_node(vport, ndlp); 1880 return NLP_STE_FREED_NODE; 1881 } 1882 return ndlp->nlp_state; 1883 } 1884 1885 static uint32_t 1886 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, 1887 struct lpfc_nodelist *ndlp, 1888 void *arg, uint32_t evt) 1889 { 1890 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1891 MAILBOX_t *mb = &pmb->mb; 1892 1893 if (!mb->mbxStatus) 1894 ndlp->nlp_rpi = mb->un.varWords[0]; 1895 else { 1896 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1897 lpfc_drop_node(vport, ndlp); 1898 return NLP_STE_FREED_NODE; 1899 } 1900 } 1901 return ndlp->nlp_state; 1902 } 1903 1904 static uint32_t 1905 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1906 void *arg, uint32_t evt) 1907 { 1908 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1909 1910 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1911 spin_lock_irq(shost->host_lock); 1912 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1913 spin_unlock_irq(shost->host_lock); 1914 return ndlp->nlp_state; 1915 } 1916 lpfc_drop_node(vport, ndlp); 1917 return NLP_STE_FREED_NODE; 1918 } 1919 1920 static uint32_t 1921 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1922 void *arg, uint32_t evt) 1923 { 1924 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1925 1926 /* Don't do anything that will mess up processing of the 1927 * previous RSCN. 1928 */ 1929 if (vport->fc_flag & FC_RSCN_DEFERRED) 1930 return ndlp->nlp_state; 1931 1932 spin_lock_irq(shost->host_lock); 1933 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1934 spin_unlock_irq(shost->host_lock); 1935 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1936 return ndlp->nlp_state; 1937 } 1938 1939 1940 /* This next section defines the NPort Discovery State Machine */ 1941 1942 /* There are 4 different double linked lists nodelist entries can reside on. 1943 * The plogi list and adisc list are used when Link Up discovery or RSCN 1944 * processing is needed. Each list holds the nodes that we will send PLOGI 1945 * or ADISC on. These lists will keep track of what nodes will be effected 1946 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 1947 * The unmapped_list will contain all nodes that we have successfully logged 1948 * into at the Fibre Channel level. The mapped_list will contain all nodes 1949 * that are mapped FCP targets. 1950 */ 1951 /* 1952 * The bind list is a list of undiscovered (potentially non-existent) nodes 1953 * that we have saved binding information on. This information is used when 1954 * nodes transition from the unmapped to the mapped list. 1955 */ 1956 /* For UNUSED_NODE state, the node has just been allocated . 1957 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 1958 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 1959 * and put on the unmapped list. For ADISC processing, the node is taken off 1960 * the ADISC list and placed on either the mapped or unmapped list (depending 1961 * on its previous state). Once on the unmapped list, a PRLI is issued and the 1962 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 1963 * changed to UNMAPPED_NODE. If the completion indicates a mapped 1964 * node, the node is taken off the unmapped list. The binding list is checked 1965 * for a valid binding, or a binding is automatically assigned. If binding 1966 * assignment is unsuccessful, the node is left on the unmapped list. If 1967 * binding assignment is successful, the associated binding list entry (if 1968 * any) is removed, and the node is placed on the mapped list. 1969 */ 1970 /* 1971 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1972 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 1973 * expire, all effected nodes will receive a DEVICE_RM event. 1974 */ 1975 /* 1976 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 1977 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 1978 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 1979 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 1980 * we will first process the ADISC list. 32 entries are processed initially and 1981 * ADISC is initited for each one. Completions / Events for each node are 1982 * funnelled thru the state machine. As each node finishes ADISC processing, it 1983 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 1984 * waiting, and the ADISC list count is identically 0, then we are done. For 1985 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 1986 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 1987 * list. 32 entries are processed initially and PLOGI is initited for each one. 1988 * Completions / Events for each node are funnelled thru the state machine. As 1989 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 1990 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 1991 * indentically 0, then we are done. We have now completed discovery / RSCN 1992 * handling. Upon completion, ALL nodes should be on either the mapped or 1993 * unmapped lists. 1994 */ 1995 1996 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 1997 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { 1998 /* Action routine Event Current State */ 1999 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 2000 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 2001 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 2002 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 2003 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 2004 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 2005 lpfc_disc_illegal, /* CMPL_PLOGI */ 2006 lpfc_disc_illegal, /* CMPL_PRLI */ 2007 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 2008 lpfc_disc_illegal, /* CMPL_ADISC */ 2009 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2010 lpfc_device_rm_unused_node, /* DEVICE_RM */ 2011 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 2012 2013 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 2014 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 2015 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 2016 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 2017 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 2018 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 2019 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 2020 lpfc_disc_illegal, /* CMPL_PRLI */ 2021 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ 2022 lpfc_disc_illegal, /* CMPL_ADISC */ 2023 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ 2024 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 2025 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 2026 2027 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 2028 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 2029 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 2030 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 2031 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 2032 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 2033 lpfc_disc_illegal, /* CMPL_PLOGI */ 2034 lpfc_disc_illegal, /* CMPL_PRLI */ 2035 lpfc_disc_illegal, /* CMPL_LOGO */ 2036 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 2037 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2038 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 2039 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 2040 2041 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 2042 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 2043 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 2044 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 2045 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 2046 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 2047 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2048 lpfc_disc_illegal, /* CMPL_PRLI */ 2049 lpfc_disc_illegal, /* CMPL_LOGO */ 2050 lpfc_disc_illegal, /* CMPL_ADISC */ 2051 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 2052 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 2053 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 2054 2055 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 2056 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 2057 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 2058 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 2059 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 2060 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 2061 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2062 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 2063 lpfc_disc_illegal, /* CMPL_LOGO */ 2064 lpfc_disc_illegal, /* CMPL_ADISC */ 2065 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2066 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 2067 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 2068 2069 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 2070 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 2071 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 2072 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 2073 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 2074 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 2075 lpfc_disc_illegal, /* CMPL_PLOGI */ 2076 lpfc_disc_illegal, /* CMPL_PRLI */ 2077 lpfc_disc_illegal, /* CMPL_LOGO */ 2078 lpfc_disc_illegal, /* CMPL_ADISC */ 2079 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2080 lpfc_disc_illegal, /* DEVICE_RM */ 2081 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 2082 2083 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 2084 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 2085 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 2086 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 2087 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 2088 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 2089 lpfc_disc_illegal, /* CMPL_PLOGI */ 2090 lpfc_disc_illegal, /* CMPL_PRLI */ 2091 lpfc_disc_illegal, /* CMPL_LOGO */ 2092 lpfc_disc_illegal, /* CMPL_ADISC */ 2093 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2094 lpfc_disc_illegal, /* DEVICE_RM */ 2095 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 2096 2097 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 2098 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 2099 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 2100 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 2101 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 2102 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 2103 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 2104 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 2105 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 2106 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 2107 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 2108 lpfc_device_rm_npr_node, /* DEVICE_RM */ 2109 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 2110 }; 2111 2112 int 2113 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2114 void *arg, uint32_t evt) 2115 { 2116 uint32_t cur_state, rc; 2117 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2118 uint32_t); 2119 uint32_t got_ndlp = 0; 2120 2121 if (lpfc_nlp_get(ndlp)) 2122 got_ndlp = 1; 2123 2124 cur_state = ndlp->nlp_state; 2125 2126 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2127 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2128 "0211 DSM in event x%x on NPort x%x in " 2129 "state %d Data: x%x\n", 2130 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2131 2132 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2133 "DSM in: evt:%d ste:%d did:x%x", 2134 evt, cur_state, ndlp->nlp_DID); 2135 2136 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2137 rc = (func) (vport, ndlp, arg, evt); 2138 2139 /* DSM out state <rc> on NPort <nlp_DID> */ 2140 if (got_ndlp) { 2141 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2142 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2143 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2144 2145 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2146 "DSM out: ste:%d did:x%x flg:x%x", 2147 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2148 /* Decrement the ndlp reference count held for this function */ 2149 lpfc_nlp_put(ndlp); 2150 } else { 2151 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2152 "0213 DSM out state %d on NPort free\n", rc); 2153 2154 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2155 "DSM out: ste:%d did:x%x flg:x%x", 2156 rc, 0, 0); 2157 } 2158 2159 return rc; 2160 } 2161