1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw4.h" 32 #include "lpfc_hw.h" 33 #include "lpfc_sli.h" 34 #include "lpfc_sli4.h" 35 #include "lpfc_nl.h" 36 #include "lpfc_disc.h" 37 #include "lpfc_scsi.h" 38 #include "lpfc.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_crtn.h" 41 #include "lpfc_vport.h" 42 #include "lpfc_debugfs.h" 43 44 45 /* Called to verify a rcv'ed ADISC was intended for us. */ 46 static int 47 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 48 struct lpfc_name *nn, struct lpfc_name *pn) 49 { 50 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 51 * table entry for that node. 52 */ 53 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) 54 return 0; 55 56 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) 57 return 0; 58 59 /* we match, return success */ 60 return 1; 61 } 62 63 int 64 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 65 struct serv_parm * sp, uint32_t class) 66 { 67 volatile struct serv_parm *hsp = &vport->fc_sparam; 68 uint16_t hsp_value, ssp_value = 0; 69 70 /* 71 * The receive data field size and buffer-to-buffer receive data field 72 * size entries are 16 bits but are represented as two 8-bit fields in 73 * the driver data structure to account for rsvd bits and other control 74 * bits. Reconstruct and compare the fields as a 16-bit values before 75 * correcting the byte values. 76 */ 77 if (sp->cls1.classValid) { 78 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 79 hsp->cls1.rcvDataSizeLsb; 80 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 81 sp->cls1.rcvDataSizeLsb; 82 if (!ssp_value) 83 goto bad_service_param; 84 if (ssp_value > hsp_value) { 85 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 86 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 87 } 88 } else if (class == CLASS1) { 89 goto bad_service_param; 90 } 91 92 if (sp->cls2.classValid) { 93 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 94 hsp->cls2.rcvDataSizeLsb; 95 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 96 sp->cls2.rcvDataSizeLsb; 97 if (!ssp_value) 98 goto bad_service_param; 99 if (ssp_value > hsp_value) { 100 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 101 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 102 } 103 } else if (class == CLASS2) { 104 goto bad_service_param; 105 } 106 107 if (sp->cls3.classValid) { 108 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 109 hsp->cls3.rcvDataSizeLsb; 110 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 111 sp->cls3.rcvDataSizeLsb; 112 if (!ssp_value) 113 goto bad_service_param; 114 if (ssp_value > hsp_value) { 115 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 116 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 117 } 118 } else if (class == CLASS3) { 119 goto bad_service_param; 120 } 121 122 /* 123 * Preserve the upper four bits of the MSB from the PLOGI response. 124 * These bits contain the Buffer-to-Buffer State Change Number 125 * from the target and need to be passed to the FW. 126 */ 127 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 128 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 129 if (ssp_value > hsp_value) { 130 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 131 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 132 (hsp->cmn.bbRcvSizeMsb & 0x0F); 133 } 134 135 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 136 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 137 return 1; 138 bad_service_param: 139 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 140 "0207 Device %x " 141 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 142 "invalid service parameters. Ignoring device.\n", 143 ndlp->nlp_DID, 144 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], 145 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], 146 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], 147 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); 148 return 0; 149 } 150 151 static void * 152 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 153 struct lpfc_iocbq *rspiocb) 154 { 155 struct lpfc_dmabuf *pcmd, *prsp; 156 uint32_t *lp; 157 void *ptr = NULL; 158 IOCB_t *irsp; 159 160 irsp = &rspiocb->iocb; 161 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 162 163 /* For lpfc_els_abort, context2 could be zero'ed to delay 164 * freeing associated memory till after ABTS completes. 165 */ 166 if (pcmd) { 167 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 168 list); 169 if (prsp) { 170 lp = (uint32_t *) prsp->virt; 171 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 172 } 173 } else { 174 /* Force ulpStatus error since we are returning NULL ptr */ 175 if (!(irsp->ulpStatus)) { 176 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 177 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 178 } 179 ptr = NULL; 180 } 181 return ptr; 182 } 183 184 185 /* 186 * Free resources / clean up outstanding I/Os 187 * associated with a LPFC_NODELIST entry. This 188 * routine effectively results in a "software abort". 189 */ 190 int 191 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 192 { 193 LIST_HEAD(completions); 194 struct lpfc_sli *psli = &phba->sli; 195 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 196 struct lpfc_iocbq *iocb, *next_iocb; 197 198 /* Abort outstanding I/O on NPort <nlp_DID> */ 199 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 200 "0205 Abort outstanding I/O on NPort x%x " 201 "Data: x%x x%x x%x\n", 202 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 203 ndlp->nlp_rpi); 204 205 lpfc_fabric_abort_nport(ndlp); 206 207 /* First check the txq */ 208 spin_lock_irq(&phba->hbalock); 209 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 210 /* Check to see if iocb matches the nport we are looking for */ 211 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 212 /* It matches, so deque and call compl with anp error */ 213 list_move_tail(&iocb->list, &completions); 214 pring->txq_cnt--; 215 } 216 } 217 218 /* Next check the txcmplq */ 219 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 220 /* Check to see if iocb matches the nport we are looking for */ 221 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 222 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 223 } 224 } 225 spin_unlock_irq(&phba->hbalock); 226 227 /* Cancel all the IOCBs from the completions list */ 228 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 229 IOERR_SLI_ABORTED); 230 231 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 232 return 0; 233 } 234 235 static int 236 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 237 struct lpfc_iocbq *cmdiocb) 238 { 239 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 240 struct lpfc_hba *phba = vport->phba; 241 struct lpfc_dmabuf *pcmd; 242 uint32_t *lp; 243 IOCB_t *icmd; 244 struct serv_parm *sp; 245 LPFC_MBOXQ_t *mbox; 246 struct ls_rjt stat; 247 int rc; 248 249 memset(&stat, 0, sizeof (struct ls_rjt)); 250 if (vport->port_state <= LPFC_FLOGI) { 251 /* Before responding to PLOGI, check for pt2pt mode. 252 * If we are pt2pt, with an outstanding FLOGI, abort 253 * the FLOGI and resend it first. 254 */ 255 if (vport->fc_flag & FC_PT2PT) { 256 lpfc_els_abort_flogi(phba); 257 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 258 /* If the other side is supposed to initiate 259 * the PLOGI anyway, just ACC it now and 260 * move on with discovery. 261 */ 262 phba->fc_edtov = FF_DEF_EDTOV; 263 phba->fc_ratov = FF_DEF_RATOV; 264 /* Start discovery - this should just do 265 CLEAR_LA */ 266 lpfc_disc_start(vport); 267 } else 268 lpfc_initial_flogi(vport); 269 } else { 270 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 271 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 272 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 273 ndlp, NULL); 274 return 0; 275 } 276 } 277 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 278 lp = (uint32_t *) pcmd->virt; 279 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 280 if (wwn_to_u64(sp->portName.u.wwn) == 0) { 281 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 282 "0140 PLOGI Reject: invalid nname\n"); 283 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 284 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; 285 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 286 NULL); 287 return 0; 288 } 289 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { 290 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 291 "0141 PLOGI Reject: invalid pname\n"); 292 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 293 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; 294 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 295 NULL); 296 return 0; 297 } 298 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 299 /* Reject this request because invalid parameters */ 300 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 301 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 302 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 303 NULL); 304 return 0; 305 } 306 icmd = &cmdiocb->iocb; 307 308 /* PLOGI chkparm OK */ 309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 310 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 311 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 312 ndlp->nlp_rpi); 313 314 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 315 ndlp->nlp_fcp_info |= CLASS2; 316 else 317 ndlp->nlp_fcp_info |= CLASS3; 318 319 ndlp->nlp_class_sup = 0; 320 if (sp->cls1.classValid) 321 ndlp->nlp_class_sup |= FC_COS_CLASS1; 322 if (sp->cls2.classValid) 323 ndlp->nlp_class_sup |= FC_COS_CLASS2; 324 if (sp->cls3.classValid) 325 ndlp->nlp_class_sup |= FC_COS_CLASS3; 326 if (sp->cls4.classValid) 327 ndlp->nlp_class_sup |= FC_COS_CLASS4; 328 ndlp->nlp_maxframe = 329 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 330 331 /* no need to reg_login if we are already in one of these states */ 332 switch (ndlp->nlp_state) { 333 case NLP_STE_NPR_NODE: 334 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 335 break; 336 case NLP_STE_REG_LOGIN_ISSUE: 337 case NLP_STE_PRLI_ISSUE: 338 case NLP_STE_UNMAPPED_NODE: 339 case NLP_STE_MAPPED_NODE: 340 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 341 return 1; 342 } 343 344 if ((vport->fc_flag & FC_PT2PT) && 345 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 346 /* rcv'ed PLOGI decides what our NPortId will be */ 347 vport->fc_myDID = icmd->un.rcvels.parmRo; 348 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 349 if (mbox == NULL) 350 goto out; 351 lpfc_config_link(phba, mbox); 352 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 353 mbox->vport = vport; 354 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 355 if (rc == MBX_NOT_FINISHED) { 356 mempool_free(mbox, phba->mbox_mem_pool); 357 goto out; 358 } 359 360 lpfc_can_disctmo(vport); 361 } 362 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 363 if (!mbox) 364 goto out; 365 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, 367 (uint8_t *) sp, mbox, 0); 368 if (rc) { 369 mempool_free(mbox, phba->mbox_mem_pool); 370 goto out; 371 } 372 373 /* ACC PLOGI rsp command needs to execute first, 374 * queue this mbox command to be processed later. 375 */ 376 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 377 /* 378 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 379 * command issued in lpfc_cmpl_els_acc(). 380 */ 381 mbox->vport = vport; 382 spin_lock_irq(shost->host_lock); 383 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 384 spin_unlock_irq(shost->host_lock); 385 386 /* 387 * If there is an outstanding PLOGI issued, abort it before 388 * sending ACC rsp for received PLOGI. If pending plogi 389 * is not canceled here, the plogi will be rejected by 390 * remote port and will be retried. On a configuration with 391 * single discovery thread, this will cause a huge delay in 392 * discovery. Also this will cause multiple state machines 393 * running in parallel for this node. 394 */ 395 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 396 /* software abort outstanding PLOGI */ 397 lpfc_els_abort(phba, ndlp); 398 } 399 400 if ((vport->port_type == LPFC_NPIV_PORT && 401 vport->cfg_restrict_login)) { 402 403 /* In order to preserve RPIs, we want to cleanup 404 * the default RPI the firmware created to rcv 405 * this ELS request. The only way to do this is 406 * to register, then unregister the RPI. 407 */ 408 spin_lock_irq(shost->host_lock); 409 ndlp->nlp_flag |= NLP_RM_DFLT_RPI; 410 spin_unlock_irq(shost->host_lock); 411 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 412 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 413 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 414 ndlp, mbox); 415 return 1; 416 } 417 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 418 return 1; 419 out: 420 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 421 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 422 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 423 return 0; 424 } 425 426 static int 427 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 428 struct lpfc_iocbq *cmdiocb) 429 { 430 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 431 struct lpfc_dmabuf *pcmd; 432 struct serv_parm *sp; 433 struct lpfc_name *pnn, *ppn; 434 struct ls_rjt stat; 435 ADISC *ap; 436 IOCB_t *icmd; 437 uint32_t *lp; 438 uint32_t cmd; 439 440 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 441 lp = (uint32_t *) pcmd->virt; 442 443 cmd = *lp++; 444 if (cmd == ELS_CMD_ADISC) { 445 ap = (ADISC *) lp; 446 pnn = (struct lpfc_name *) & ap->nodeName; 447 ppn = (struct lpfc_name *) & ap->portName; 448 } else { 449 sp = (struct serv_parm *) lp; 450 pnn = (struct lpfc_name *) & sp->nodeName; 451 ppn = (struct lpfc_name *) & sp->portName; 452 } 453 454 icmd = &cmdiocb->iocb; 455 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 456 if (cmd == ELS_CMD_ADISC) { 457 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 458 } else { 459 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 460 NULL); 461 } 462 return 1; 463 } 464 /* Reject this request because invalid parameters */ 465 stat.un.b.lsRjtRsvd0 = 0; 466 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 467 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 468 stat.un.b.vendorUnique = 0; 469 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 470 471 /* 1 sec timeout */ 472 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 473 474 spin_lock_irq(shost->host_lock); 475 ndlp->nlp_flag |= NLP_DELAY_TMO; 476 spin_unlock_irq(shost->host_lock); 477 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 478 ndlp->nlp_prev_state = ndlp->nlp_state; 479 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 480 return 0; 481 } 482 483 static int 484 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 485 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 486 { 487 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 488 489 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 490 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 491 * PLOGIs during LOGO storms from a device. 492 */ 493 spin_lock_irq(shost->host_lock); 494 ndlp->nlp_flag |= NLP_LOGO_ACC; 495 spin_unlock_irq(shost->host_lock); 496 if (els_cmd == ELS_CMD_PRLO) 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 498 else 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 500 if ((ndlp->nlp_DID == Fabric_DID) && 501 vport->port_type == LPFC_NPIV_PORT) { 502 lpfc_linkdown_port(vport); 503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 504 spin_lock_irq(shost->host_lock); 505 ndlp->nlp_flag |= NLP_DELAY_TMO; 506 spin_unlock_irq(shost->host_lock); 507 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) && 510 ((ndlp->nlp_type & NLP_FCP_TARGET) || 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 513 /* Only try to re-login if this is NOT a Fabric Node */ 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 515 spin_lock_irq(shost->host_lock); 516 ndlp->nlp_flag |= NLP_DELAY_TMO; 517 spin_unlock_irq(shost->host_lock); 518 519 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 520 } 521 ndlp->nlp_prev_state = ndlp->nlp_state; 522 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 523 524 spin_lock_irq(shost->host_lock); 525 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 526 spin_unlock_irq(shost->host_lock); 527 /* The driver has to wait until the ACC completes before it continues 528 * processing the LOGO. The action will resume in 529 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 530 * unreg_login, the driver waits so the ACC does not get aborted. 531 */ 532 return 0; 533 } 534 535 static void 536 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 537 struct lpfc_iocbq *cmdiocb) 538 { 539 struct lpfc_dmabuf *pcmd; 540 uint32_t *lp; 541 PRLI *npr; 542 struct fc_rport *rport = ndlp->rport; 543 u32 roles; 544 545 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 546 lp = (uint32_t *) pcmd->virt; 547 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 548 549 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 550 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 551 if (npr->prliType == PRLI_FCP_TYPE) { 552 if (npr->initiatorFunc) 553 ndlp->nlp_type |= NLP_FCP_INITIATOR; 554 if (npr->targetFunc) 555 ndlp->nlp_type |= NLP_FCP_TARGET; 556 if (npr->Retry) 557 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 558 } 559 if (rport) { 560 /* We need to update the rport role values */ 561 roles = FC_RPORT_ROLE_UNKNOWN; 562 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 563 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 564 if (ndlp->nlp_type & NLP_FCP_TARGET) 565 roles |= FC_RPORT_ROLE_FCP_TARGET; 566 567 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 568 "rport rolechg: role:x%x did:x%x flg:x%x", 569 roles, ndlp->nlp_DID, ndlp->nlp_flag); 570 571 fc_remote_port_rolechg(rport, roles); 572 } 573 } 574 575 static uint32_t 576 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 577 { 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 579 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 582 return 0; 583 } 584 585 if (!(vport->fc_flag & FC_PT2PT)) { 586 /* Check config parameter use-adisc or FCP-2 */ 587 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 588 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 589 spin_lock_irq(shost->host_lock); 590 ndlp->nlp_flag |= NLP_NPR_ADISC; 591 spin_unlock_irq(shost->host_lock); 592 return 1; 593 } 594 } 595 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 596 lpfc_unreg_rpi(vport, ndlp); 597 return 0; 598 } 599 600 static uint32_t 601 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 602 void *arg, uint32_t evt) 603 { 604 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 605 "0271 Illegal State Transition: node x%x " 606 "event x%x, state x%x Data: x%x x%x\n", 607 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 608 ndlp->nlp_flag); 609 return ndlp->nlp_state; 610 } 611 612 static uint32_t 613 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 614 void *arg, uint32_t evt) 615 { 616 /* This transition is only legal if we previously 617 * rcv'ed a PLOGI. Since we don't want 2 discovery threads 618 * working on the same NPortID, do nothing for this thread 619 * to stop it. 620 */ 621 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 622 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 623 "0272 Illegal State Transition: node x%x " 624 "event x%x, state x%x Data: x%x x%x\n", 625 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 626 ndlp->nlp_flag); 627 } 628 return ndlp->nlp_state; 629 } 630 631 /* Start of Discovery State Machine routines */ 632 633 static uint32_t 634 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 635 void *arg, uint32_t evt) 636 { 637 struct lpfc_iocbq *cmdiocb; 638 639 cmdiocb = (struct lpfc_iocbq *) arg; 640 641 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 642 return ndlp->nlp_state; 643 } 644 return NLP_STE_FREED_NODE; 645 } 646 647 static uint32_t 648 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 649 void *arg, uint32_t evt) 650 { 651 lpfc_issue_els_logo(vport, ndlp, 0); 652 return ndlp->nlp_state; 653 } 654 655 static uint32_t 656 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 657 void *arg, uint32_t evt) 658 { 659 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 660 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 661 662 spin_lock_irq(shost->host_lock); 663 ndlp->nlp_flag |= NLP_LOGO_ACC; 664 spin_unlock_irq(shost->host_lock); 665 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 666 667 return ndlp->nlp_state; 668 } 669 670 static uint32_t 671 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 672 void *arg, uint32_t evt) 673 { 674 return NLP_STE_FREED_NODE; 675 } 676 677 static uint32_t 678 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 679 void *arg, uint32_t evt) 680 { 681 return NLP_STE_FREED_NODE; 682 } 683 684 static uint32_t 685 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 686 void *arg, uint32_t evt) 687 { 688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 689 struct lpfc_hba *phba = vport->phba; 690 struct lpfc_iocbq *cmdiocb = arg; 691 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 692 uint32_t *lp = (uint32_t *) pcmd->virt; 693 struct serv_parm *sp = (struct serv_parm *) (lp + 1); 694 struct ls_rjt stat; 695 int port_cmp; 696 697 memset(&stat, 0, sizeof (struct ls_rjt)); 698 699 /* For a PLOGI, we only accept if our portname is less 700 * than the remote portname. 701 */ 702 phba->fc_stat.elsLogiCol++; 703 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 704 sizeof(struct lpfc_name)); 705 706 if (port_cmp >= 0) { 707 /* Reject this request because the remote node will accept 708 ours */ 709 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 710 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 711 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 712 NULL); 713 } else { 714 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && 715 (ndlp->nlp_flag & NLP_NPR_2B_DISC) && 716 (vport->num_disc_nodes)) { 717 spin_lock_irq(shost->host_lock); 718 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 719 spin_unlock_irq(shost->host_lock); 720 /* Check if there are more PLOGIs to be sent */ 721 lpfc_more_plogi(vport); 722 if (vport->num_disc_nodes == 0) { 723 spin_lock_irq(shost->host_lock); 724 vport->fc_flag &= ~FC_NDISC_ACTIVE; 725 spin_unlock_irq(shost->host_lock); 726 lpfc_can_disctmo(vport); 727 lpfc_end_rscn(vport); 728 } 729 } 730 } /* If our portname was less */ 731 732 return ndlp->nlp_state; 733 } 734 735 static uint32_t 736 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 737 void *arg, uint32_t evt) 738 { 739 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 740 struct ls_rjt stat; 741 742 memset(&stat, 0, sizeof (struct ls_rjt)); 743 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 744 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 745 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 746 return ndlp->nlp_state; 747 } 748 749 static uint32_t 750 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 751 void *arg, uint32_t evt) 752 { 753 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 754 755 /* software abort outstanding PLOGI */ 756 lpfc_els_abort(vport->phba, ndlp); 757 758 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 759 return ndlp->nlp_state; 760 } 761 762 static uint32_t 763 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 764 void *arg, uint32_t evt) 765 { 766 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 767 struct lpfc_hba *phba = vport->phba; 768 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 769 770 /* software abort outstanding PLOGI */ 771 lpfc_els_abort(phba, ndlp); 772 773 if (evt == NLP_EVT_RCV_LOGO) { 774 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 775 } else { 776 lpfc_issue_els_logo(vport, ndlp, 0); 777 } 778 779 /* Put ndlp in npr state set plogi timer for 1 sec */ 780 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 781 spin_lock_irq(shost->host_lock); 782 ndlp->nlp_flag |= NLP_DELAY_TMO; 783 spin_unlock_irq(shost->host_lock); 784 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 785 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 786 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 787 788 return ndlp->nlp_state; 789 } 790 791 static uint32_t 792 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, 793 struct lpfc_nodelist *ndlp, 794 void *arg, 795 uint32_t evt) 796 { 797 struct lpfc_hba *phba = vport->phba; 798 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 799 struct lpfc_iocbq *cmdiocb, *rspiocb; 800 struct lpfc_dmabuf *pcmd, *prsp, *mp; 801 uint32_t *lp; 802 IOCB_t *irsp; 803 struct serv_parm *sp; 804 LPFC_MBOXQ_t *mbox; 805 806 cmdiocb = (struct lpfc_iocbq *) arg; 807 rspiocb = cmdiocb->context_un.rsp_iocb; 808 809 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 810 /* Recovery from PLOGI collision logic */ 811 return ndlp->nlp_state; 812 } 813 814 irsp = &rspiocb->iocb; 815 816 if (irsp->ulpStatus) 817 goto out; 818 819 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 820 821 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 822 823 lp = (uint32_t *) prsp->virt; 824 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 825 826 /* Some switches have FDMI servers returning 0 for WWN */ 827 if ((ndlp->nlp_DID != FDMI_DID) && 828 (wwn_to_u64(sp->portName.u.wwn) == 0 || 829 wwn_to_u64(sp->nodeName.u.wwn) == 0)) { 830 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 831 "0142 PLOGI RSP: Invalid WWN.\n"); 832 goto out; 833 } 834 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 835 goto out; 836 /* PLOGI chkparm OK */ 837 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 838 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 839 ndlp->nlp_DID, ndlp->nlp_state, 840 ndlp->nlp_flag, ndlp->nlp_rpi); 841 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) 842 ndlp->nlp_fcp_info |= CLASS2; 843 else 844 ndlp->nlp_fcp_info |= CLASS3; 845 846 ndlp->nlp_class_sup = 0; 847 if (sp->cls1.classValid) 848 ndlp->nlp_class_sup |= FC_COS_CLASS1; 849 if (sp->cls2.classValid) 850 ndlp->nlp_class_sup |= FC_COS_CLASS2; 851 if (sp->cls3.classValid) 852 ndlp->nlp_class_sup |= FC_COS_CLASS3; 853 if (sp->cls4.classValid) 854 ndlp->nlp_class_sup |= FC_COS_CLASS4; 855 ndlp->nlp_maxframe = 856 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 857 858 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 859 if (!mbox) { 860 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 861 "0133 PLOGI: no memory for reg_login " 862 "Data: x%x x%x x%x x%x\n", 863 ndlp->nlp_DID, ndlp->nlp_state, 864 ndlp->nlp_flag, ndlp->nlp_rpi); 865 goto out; 866 } 867 868 lpfc_unreg_rpi(vport, ndlp); 869 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, 871 (uint8_t *) sp, mbox, 0) == 0) { 872 switch (ndlp->nlp_DID) { 873 case NameServer_DID: 874 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 875 break; 876 case FDMI_DID: 877 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 878 break; 879 default: 880 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 881 } 882 mbox->context2 = lpfc_nlp_get(ndlp); 883 mbox->vport = vport; 884 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 885 != MBX_NOT_FINISHED) { 886 lpfc_nlp_set_state(vport, ndlp, 887 NLP_STE_REG_LOGIN_ISSUE); 888 return ndlp->nlp_state; 889 } 890 /* decrement node reference count to the failed mbox 891 * command 892 */ 893 lpfc_nlp_put(ndlp); 894 mp = (struct lpfc_dmabuf *) mbox->context1; 895 lpfc_mbuf_free(phba, mp->virt, mp->phys); 896 kfree(mp); 897 mempool_free(mbox, phba->mbox_mem_pool); 898 899 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 900 "0134 PLOGI: cannot issue reg_login " 901 "Data: x%x x%x x%x x%x\n", 902 ndlp->nlp_DID, ndlp->nlp_state, 903 ndlp->nlp_flag, ndlp->nlp_rpi); 904 } else { 905 mempool_free(mbox, phba->mbox_mem_pool); 906 907 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 908 "0135 PLOGI: cannot format reg_login " 909 "Data: x%x x%x x%x x%x\n", 910 ndlp->nlp_DID, ndlp->nlp_state, 911 ndlp->nlp_flag, ndlp->nlp_rpi); 912 } 913 914 915 out: 916 if (ndlp->nlp_DID == NameServer_DID) { 917 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 918 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 919 "0261 Cannot Register NameServer login\n"); 920 } 921 922 spin_lock_irq(shost->host_lock); 923 ndlp->nlp_flag |= NLP_DEFER_RM; 924 spin_unlock_irq(shost->host_lock); 925 return NLP_STE_FREED_NODE; 926 } 927 928 static uint32_t 929 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 930 void *arg, uint32_t evt) 931 { 932 return ndlp->nlp_state; 933 } 934 935 static uint32_t 936 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, 937 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) 938 { 939 return ndlp->nlp_state; 940 } 941 942 static uint32_t 943 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 944 void *arg, uint32_t evt) 945 { 946 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 947 948 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 949 spin_lock_irq(shost->host_lock); 950 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 951 spin_unlock_irq(shost->host_lock); 952 return ndlp->nlp_state; 953 } else { 954 /* software abort outstanding PLOGI */ 955 lpfc_els_abort(vport->phba, ndlp); 956 957 lpfc_drop_node(vport, ndlp); 958 return NLP_STE_FREED_NODE; 959 } 960 } 961 962 static uint32_t 963 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, 964 struct lpfc_nodelist *ndlp, 965 void *arg, 966 uint32_t evt) 967 { 968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 969 struct lpfc_hba *phba = vport->phba; 970 971 /* Don't do anything that will mess up processing of the 972 * previous RSCN. 973 */ 974 if (vport->fc_flag & FC_RSCN_DEFERRED) 975 return ndlp->nlp_state; 976 977 /* software abort outstanding PLOGI */ 978 lpfc_els_abort(phba, ndlp); 979 980 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 981 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 982 spin_lock_irq(shost->host_lock); 983 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 984 spin_unlock_irq(shost->host_lock); 985 986 return ndlp->nlp_state; 987 } 988 989 static uint32_t 990 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 991 void *arg, uint32_t evt) 992 { 993 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 994 struct lpfc_hba *phba = vport->phba; 995 struct lpfc_iocbq *cmdiocb; 996 997 /* software abort outstanding ADISC */ 998 lpfc_els_abort(phba, ndlp); 999 1000 cmdiocb = (struct lpfc_iocbq *) arg; 1001 1002 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1003 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1004 spin_lock_irq(shost->host_lock); 1005 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1006 spin_unlock_irq(shost->host_lock); 1007 if (vport->num_disc_nodes) 1008 lpfc_more_adisc(vport); 1009 } 1010 return ndlp->nlp_state; 1011 } 1012 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1013 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1014 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1015 1016 return ndlp->nlp_state; 1017 } 1018 1019 static uint32_t 1020 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1021 void *arg, uint32_t evt) 1022 { 1023 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1024 1025 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1026 return ndlp->nlp_state; 1027 } 1028 1029 static uint32_t 1030 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1031 void *arg, uint32_t evt) 1032 { 1033 struct lpfc_hba *phba = vport->phba; 1034 struct lpfc_iocbq *cmdiocb; 1035 1036 cmdiocb = (struct lpfc_iocbq *) arg; 1037 1038 /* software abort outstanding ADISC */ 1039 lpfc_els_abort(phba, ndlp); 1040 1041 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1042 return ndlp->nlp_state; 1043 } 1044 1045 static uint32_t 1046 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, 1047 struct lpfc_nodelist *ndlp, 1048 void *arg, uint32_t evt) 1049 { 1050 struct lpfc_iocbq *cmdiocb; 1051 1052 cmdiocb = (struct lpfc_iocbq *) arg; 1053 1054 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1055 return ndlp->nlp_state; 1056 } 1057 1058 static uint32_t 1059 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1060 void *arg, uint32_t evt) 1061 { 1062 struct lpfc_iocbq *cmdiocb; 1063 1064 cmdiocb = (struct lpfc_iocbq *) arg; 1065 1066 /* Treat like rcv logo */ 1067 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1068 return ndlp->nlp_state; 1069 } 1070 1071 static uint32_t 1072 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, 1073 struct lpfc_nodelist *ndlp, 1074 void *arg, uint32_t evt) 1075 { 1076 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1077 struct lpfc_hba *phba = vport->phba; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb; 1079 IOCB_t *irsp; 1080 ADISC *ap; 1081 int rc; 1082 1083 cmdiocb = (struct lpfc_iocbq *) arg; 1084 rspiocb = cmdiocb->context_un.rsp_iocb; 1085 1086 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1087 irsp = &rspiocb->iocb; 1088 1089 if ((irsp->ulpStatus) || 1090 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1091 /* 1 sec timeout */ 1092 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1093 spin_lock_irq(shost->host_lock); 1094 ndlp->nlp_flag |= NLP_DELAY_TMO; 1095 spin_unlock_irq(shost->host_lock); 1096 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1097 1098 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); 1099 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); 1100 1101 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1102 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1103 lpfc_unreg_rpi(vport, ndlp); 1104 return ndlp->nlp_state; 1105 } 1106 1107 if (phba->sli_rev == LPFC_SLI_REV4) { 1108 rc = lpfc_sli4_resume_rpi(ndlp); 1109 if (rc) { 1110 /* Stay in state and retry. */ 1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1112 return ndlp->nlp_state; 1113 } 1114 } 1115 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1119 } else { 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1122 } 1123 1124 return ndlp->nlp_state; 1125 } 1126 1127 static uint32_t 1128 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1129 void *arg, uint32_t evt) 1130 { 1131 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1132 1133 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1134 spin_lock_irq(shost->host_lock); 1135 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1136 spin_unlock_irq(shost->host_lock); 1137 return ndlp->nlp_state; 1138 } else { 1139 /* software abort outstanding ADISC */ 1140 lpfc_els_abort(vport->phba, ndlp); 1141 1142 lpfc_drop_node(vport, ndlp); 1143 return NLP_STE_FREED_NODE; 1144 } 1145 } 1146 1147 static uint32_t 1148 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, 1149 struct lpfc_nodelist *ndlp, 1150 void *arg, 1151 uint32_t evt) 1152 { 1153 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1154 struct lpfc_hba *phba = vport->phba; 1155 1156 /* Don't do anything that will mess up processing of the 1157 * previous RSCN. 1158 */ 1159 if (vport->fc_flag & FC_RSCN_DEFERRED) 1160 return ndlp->nlp_state; 1161 1162 /* software abort outstanding ADISC */ 1163 lpfc_els_abort(phba, ndlp); 1164 1165 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1166 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1167 spin_lock_irq(shost->host_lock); 1168 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1169 spin_unlock_irq(shost->host_lock); 1170 lpfc_disc_set_adisc(vport, ndlp); 1171 return ndlp->nlp_state; 1172 } 1173 1174 static uint32_t 1175 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, 1176 struct lpfc_nodelist *ndlp, 1177 void *arg, 1178 uint32_t evt) 1179 { 1180 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1181 1182 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1183 return ndlp->nlp_state; 1184 } 1185 1186 static uint32_t 1187 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, 1188 struct lpfc_nodelist *ndlp, 1189 void *arg, 1190 uint32_t evt) 1191 { 1192 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1193 1194 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1195 return ndlp->nlp_state; 1196 } 1197 1198 static uint32_t 1199 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, 1200 struct lpfc_nodelist *ndlp, 1201 void *arg, 1202 uint32_t evt) 1203 { 1204 struct lpfc_hba *phba = vport->phba; 1205 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1206 LPFC_MBOXQ_t *mb; 1207 LPFC_MBOXQ_t *nextmb; 1208 struct lpfc_dmabuf *mp; 1209 1210 cmdiocb = (struct lpfc_iocbq *) arg; 1211 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1213 if ((mb = phba->sli.mbox_active)) { 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1216 lpfc_nlp_put(ndlp); 1217 mb->context2 = NULL; 1218 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1219 } 1220 } 1221 1222 spin_lock_irq(&phba->hbalock); 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1226 if (phba->sli_rev == LPFC_SLI_REV4) { 1227 spin_unlock_irq(&phba->hbalock); 1228 lpfc_sli4_free_rpi(phba, 1229 mb->u.mb.un.varRegLogin.rpi); 1230 spin_lock_irq(&phba->hbalock); 1231 } 1232 mp = (struct lpfc_dmabuf *) (mb->context1); 1233 if (mp) { 1234 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1235 kfree(mp); 1236 } 1237 lpfc_nlp_put(ndlp); 1238 list_del(&mb->list); 1239 phba->sli.mboxq_cnt--; 1240 mempool_free(mb, phba->mbox_mem_pool); 1241 } 1242 } 1243 spin_unlock_irq(&phba->hbalock); 1244 1245 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1246 return ndlp->nlp_state; 1247 } 1248 1249 static uint32_t 1250 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, 1251 struct lpfc_nodelist *ndlp, 1252 void *arg, 1253 uint32_t evt) 1254 { 1255 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1256 1257 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1258 return ndlp->nlp_state; 1259 } 1260 1261 static uint32_t 1262 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, 1263 struct lpfc_nodelist *ndlp, 1264 void *arg, 1265 uint32_t evt) 1266 { 1267 struct lpfc_iocbq *cmdiocb; 1268 1269 cmdiocb = (struct lpfc_iocbq *) arg; 1270 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1271 return ndlp->nlp_state; 1272 } 1273 1274 static uint32_t 1275 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, 1276 struct lpfc_nodelist *ndlp, 1277 void *arg, 1278 uint32_t evt) 1279 { 1280 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1281 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1282 MAILBOX_t *mb = &pmb->u.mb; 1283 uint32_t did = mb->un.varWords[1]; 1284 1285 if (mb->mbxStatus) { 1286 /* RegLogin failed */ 1287 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1288 "0246 RegLogin failed Data: x%x x%x x%x\n", 1289 did, mb->mbxStatus, vport->port_state); 1290 /* 1291 * If RegLogin failed due to lack of HBA resources do not 1292 * retry discovery. 1293 */ 1294 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1295 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1296 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1297 return ndlp->nlp_state; 1298 } 1299 1300 /* Put ndlp in npr state set plogi timer for 1 sec */ 1301 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1302 spin_lock_irq(shost->host_lock); 1303 ndlp->nlp_flag |= NLP_DELAY_TMO; 1304 spin_unlock_irq(shost->host_lock); 1305 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1306 1307 lpfc_issue_els_logo(vport, ndlp, 0); 1308 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1309 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1310 return ndlp->nlp_state; 1311 } 1312 1313 ndlp->nlp_rpi = mb->un.varWords[0]; 1314 ndlp->nlp_flag |= NLP_RPI_VALID; 1315 1316 /* Only if we are not a fabric nport do we issue PRLI */ 1317 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1318 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1319 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1320 lpfc_issue_els_prli(vport, ndlp, 0); 1321 } else { 1322 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1323 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1324 } 1325 return ndlp->nlp_state; 1326 } 1327 1328 static uint32_t 1329 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, 1330 struct lpfc_nodelist *ndlp, 1331 void *arg, 1332 uint32_t evt) 1333 { 1334 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1335 1336 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1337 spin_lock_irq(shost->host_lock); 1338 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1339 spin_unlock_irq(shost->host_lock); 1340 return ndlp->nlp_state; 1341 } else { 1342 lpfc_drop_node(vport, ndlp); 1343 return NLP_STE_FREED_NODE; 1344 } 1345 } 1346 1347 static uint32_t 1348 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, 1349 struct lpfc_nodelist *ndlp, 1350 void *arg, 1351 uint32_t evt) 1352 { 1353 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1354 1355 /* Don't do anything that will mess up processing of the 1356 * previous RSCN. 1357 */ 1358 if (vport->fc_flag & FC_RSCN_DEFERRED) 1359 return ndlp->nlp_state; 1360 1361 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1362 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1363 spin_lock_irq(shost->host_lock); 1364 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1365 spin_unlock_irq(shost->host_lock); 1366 lpfc_disc_set_adisc(vport, ndlp); 1367 return ndlp->nlp_state; 1368 } 1369 1370 static uint32_t 1371 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1372 void *arg, uint32_t evt) 1373 { 1374 struct lpfc_iocbq *cmdiocb; 1375 1376 cmdiocb = (struct lpfc_iocbq *) arg; 1377 1378 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1379 return ndlp->nlp_state; 1380 } 1381 1382 static uint32_t 1383 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1384 void *arg, uint32_t evt) 1385 { 1386 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1387 1388 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1389 return ndlp->nlp_state; 1390 } 1391 1392 static uint32_t 1393 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1394 void *arg, uint32_t evt) 1395 { 1396 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1397 1398 /* Software abort outstanding PRLI before sending acc */ 1399 lpfc_els_abort(vport->phba, ndlp); 1400 1401 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1402 return ndlp->nlp_state; 1403 } 1404 1405 static uint32_t 1406 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1407 void *arg, uint32_t evt) 1408 { 1409 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1410 1411 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1412 return ndlp->nlp_state; 1413 } 1414 1415 /* This routine is envoked when we rcv a PRLO request from a nport 1416 * we are logged into. We should send back a PRLO rsp setting the 1417 * appropriate bits. 1418 * NEXT STATE = PRLI_ISSUE 1419 */ 1420 static uint32_t 1421 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1422 void *arg, uint32_t evt) 1423 { 1424 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1425 1426 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1427 return ndlp->nlp_state; 1428 } 1429 1430 static uint32_t 1431 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1432 void *arg, uint32_t evt) 1433 { 1434 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1435 struct lpfc_iocbq *cmdiocb, *rspiocb; 1436 struct lpfc_hba *phba = vport->phba; 1437 IOCB_t *irsp; 1438 PRLI *npr; 1439 1440 cmdiocb = (struct lpfc_iocbq *) arg; 1441 rspiocb = cmdiocb->context_un.rsp_iocb; 1442 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1443 1444 irsp = &rspiocb->iocb; 1445 if (irsp->ulpStatus) { 1446 if ((vport->port_type == LPFC_NPIV_PORT) && 1447 vport->cfg_restrict_login) { 1448 goto out; 1449 } 1450 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1451 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1452 return ndlp->nlp_state; 1453 } 1454 1455 /* Check out PRLI rsp */ 1456 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1457 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1458 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1459 (npr->prliType == PRLI_FCP_TYPE)) { 1460 if (npr->initiatorFunc) 1461 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1462 if (npr->targetFunc) 1463 ndlp->nlp_type |= NLP_FCP_TARGET; 1464 if (npr->Retry) 1465 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1466 } 1467 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 1468 (vport->port_type == LPFC_NPIV_PORT) && 1469 vport->cfg_restrict_login) { 1470 out: 1471 spin_lock_irq(shost->host_lock); 1472 ndlp->nlp_flag |= NLP_TARGET_REMOVE; 1473 spin_unlock_irq(shost->host_lock); 1474 lpfc_issue_els_logo(vport, ndlp, 0); 1475 1476 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1478 return ndlp->nlp_state; 1479 } 1480 1481 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1482 if (ndlp->nlp_type & NLP_FCP_TARGET) 1483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1484 else 1485 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1486 return ndlp->nlp_state; 1487 } 1488 1489 /*! lpfc_device_rm_prli_issue 1490 * 1491 * \pre 1492 * \post 1493 * \param phba 1494 * \param ndlp 1495 * \param arg 1496 * \param evt 1497 * \return uint32_t 1498 * 1499 * \b Description: 1500 * This routine is envoked when we a request to remove a nport we are in the 1501 * process of PRLIing. We should software abort outstanding prli, unreg 1502 * login, send a logout. We will change node state to UNUSED_NODE, put it 1503 * on plogi list so it can be freed when LOGO completes. 1504 * 1505 */ 1506 1507 static uint32_t 1508 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1509 void *arg, uint32_t evt) 1510 { 1511 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1512 1513 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1514 spin_lock_irq(shost->host_lock); 1515 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1516 spin_unlock_irq(shost->host_lock); 1517 return ndlp->nlp_state; 1518 } else { 1519 /* software abort outstanding PLOGI */ 1520 lpfc_els_abort(vport->phba, ndlp); 1521 1522 lpfc_drop_node(vport, ndlp); 1523 return NLP_STE_FREED_NODE; 1524 } 1525 } 1526 1527 1528 /*! lpfc_device_recov_prli_issue 1529 * 1530 * \pre 1531 * \post 1532 * \param phba 1533 * \param ndlp 1534 * \param arg 1535 * \param evt 1536 * \return uint32_t 1537 * 1538 * \b Description: 1539 * The routine is envoked when the state of a device is unknown, like 1540 * during a link down. We should remove the nodelist entry from the 1541 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1542 * outstanding PRLI command, then free the node entry. 1543 */ 1544 static uint32_t 1545 lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 1546 struct lpfc_nodelist *ndlp, 1547 void *arg, 1548 uint32_t evt) 1549 { 1550 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1551 struct lpfc_hba *phba = vport->phba; 1552 1553 /* Don't do anything that will mess up processing of the 1554 * previous RSCN. 1555 */ 1556 if (vport->fc_flag & FC_RSCN_DEFERRED) 1557 return ndlp->nlp_state; 1558 1559 /* software abort outstanding PRLI */ 1560 lpfc_els_abort(phba, ndlp); 1561 1562 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1563 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1564 spin_lock_irq(shost->host_lock); 1565 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1566 spin_unlock_irq(shost->host_lock); 1567 lpfc_disc_set_adisc(vport, ndlp); 1568 return ndlp->nlp_state; 1569 } 1570 1571 static uint32_t 1572 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1573 void *arg, uint32_t evt) 1574 { 1575 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1576 1577 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1578 return ndlp->nlp_state; 1579 } 1580 1581 static uint32_t 1582 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1583 void *arg, uint32_t evt) 1584 { 1585 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1586 1587 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1588 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1589 return ndlp->nlp_state; 1590 } 1591 1592 static uint32_t 1593 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1594 void *arg, uint32_t evt) 1595 { 1596 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1597 1598 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1599 return ndlp->nlp_state; 1600 } 1601 1602 static uint32_t 1603 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1604 void *arg, uint32_t evt) 1605 { 1606 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1607 1608 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1609 return ndlp->nlp_state; 1610 } 1611 1612 static uint32_t 1613 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1614 void *arg, uint32_t evt) 1615 { 1616 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1617 1618 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1619 return ndlp->nlp_state; 1620 } 1621 1622 static uint32_t 1623 lpfc_device_recov_unmap_node(struct lpfc_vport *vport, 1624 struct lpfc_nodelist *ndlp, 1625 void *arg, 1626 uint32_t evt) 1627 { 1628 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1629 1630 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1631 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1632 spin_lock_irq(shost->host_lock); 1633 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1634 spin_unlock_irq(shost->host_lock); 1635 lpfc_disc_set_adisc(vport, ndlp); 1636 1637 return ndlp->nlp_state; 1638 } 1639 1640 static uint32_t 1641 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1642 void *arg, uint32_t evt) 1643 { 1644 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1645 1646 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1647 return ndlp->nlp_state; 1648 } 1649 1650 static uint32_t 1651 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1652 void *arg, uint32_t evt) 1653 { 1654 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1655 1656 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1657 return ndlp->nlp_state; 1658 } 1659 1660 static uint32_t 1661 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1662 void *arg, uint32_t evt) 1663 { 1664 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1665 1666 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1667 return ndlp->nlp_state; 1668 } 1669 1670 static uint32_t 1671 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, 1672 struct lpfc_nodelist *ndlp, 1673 void *arg, uint32_t evt) 1674 { 1675 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1676 1677 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1678 return ndlp->nlp_state; 1679 } 1680 1681 static uint32_t 1682 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1683 void *arg, uint32_t evt) 1684 { 1685 struct lpfc_hba *phba = vport->phba; 1686 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1687 1688 /* flush the target */ 1689 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1690 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 1691 1692 /* Treat like rcv logo */ 1693 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1694 return ndlp->nlp_state; 1695 } 1696 1697 static uint32_t 1698 lpfc_device_recov_mapped_node(struct lpfc_vport *vport, 1699 struct lpfc_nodelist *ndlp, 1700 void *arg, 1701 uint32_t evt) 1702 { 1703 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1704 1705 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1706 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1707 spin_lock_irq(shost->host_lock); 1708 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1709 spin_unlock_irq(shost->host_lock); 1710 lpfc_disc_set_adisc(vport, ndlp); 1711 return ndlp->nlp_state; 1712 } 1713 1714 static uint32_t 1715 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1716 void *arg, uint32_t evt) 1717 { 1718 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1719 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1720 1721 /* Ignore PLOGI if we have an outstanding LOGO */ 1722 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) 1723 return ndlp->nlp_state; 1724 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1725 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1726 spin_lock_irq(shost->host_lock); 1727 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); 1728 spin_unlock_irq(shost->host_lock); 1729 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1730 /* send PLOGI immediately, move to PLOGI issue state */ 1731 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1732 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1733 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1734 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1735 } 1736 } 1737 return ndlp->nlp_state; 1738 } 1739 1740 static uint32_t 1741 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1742 void *arg, uint32_t evt) 1743 { 1744 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1745 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1746 struct ls_rjt stat; 1747 1748 memset(&stat, 0, sizeof (struct ls_rjt)); 1749 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1750 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1751 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 1752 1753 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1754 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1755 spin_lock_irq(shost->host_lock); 1756 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1757 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1758 spin_unlock_irq(shost->host_lock); 1759 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1760 lpfc_issue_els_adisc(vport, ndlp, 0); 1761 } else { 1762 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1763 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1764 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1765 } 1766 } 1767 return ndlp->nlp_state; 1768 } 1769 1770 static uint32_t 1771 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1772 void *arg, uint32_t evt) 1773 { 1774 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1775 1776 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1777 return ndlp->nlp_state; 1778 } 1779 1780 static uint32_t 1781 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1782 void *arg, uint32_t evt) 1783 { 1784 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1785 1786 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1787 /* 1788 * Do not start discovery if discovery is about to start 1789 * or discovery in progress for this node. Starting discovery 1790 * here will affect the counting of discovery threads. 1791 */ 1792 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1793 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1794 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1795 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1796 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1797 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1798 lpfc_issue_els_adisc(vport, ndlp, 0); 1799 } else { 1800 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1801 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1802 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1803 } 1804 } 1805 return ndlp->nlp_state; 1806 } 1807 1808 static uint32_t 1809 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1810 void *arg, uint32_t evt) 1811 { 1812 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1813 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1814 1815 spin_lock_irq(shost->host_lock); 1816 ndlp->nlp_flag |= NLP_LOGO_ACC; 1817 spin_unlock_irq(shost->host_lock); 1818 1819 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1820 1821 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 1822 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1823 spin_lock_irq(shost->host_lock); 1824 ndlp->nlp_flag |= NLP_DELAY_TMO; 1825 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1826 spin_unlock_irq(shost->host_lock); 1827 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1828 } else { 1829 spin_lock_irq(shost->host_lock); 1830 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1831 spin_unlock_irq(shost->host_lock); 1832 } 1833 return ndlp->nlp_state; 1834 } 1835 1836 static uint32_t 1837 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1838 void *arg, uint32_t evt) 1839 { 1840 struct lpfc_iocbq *cmdiocb, *rspiocb; 1841 IOCB_t *irsp; 1842 1843 cmdiocb = (struct lpfc_iocbq *) arg; 1844 rspiocb = cmdiocb->context_un.rsp_iocb; 1845 1846 irsp = &rspiocb->iocb; 1847 if (irsp->ulpStatus) { 1848 ndlp->nlp_flag |= NLP_DEFER_RM; 1849 return NLP_STE_FREED_NODE; 1850 } 1851 return ndlp->nlp_state; 1852 } 1853 1854 static uint32_t 1855 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1856 void *arg, uint32_t evt) 1857 { 1858 struct lpfc_iocbq *cmdiocb, *rspiocb; 1859 IOCB_t *irsp; 1860 1861 cmdiocb = (struct lpfc_iocbq *) arg; 1862 rspiocb = cmdiocb->context_un.rsp_iocb; 1863 1864 irsp = &rspiocb->iocb; 1865 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1866 lpfc_drop_node(vport, ndlp); 1867 return NLP_STE_FREED_NODE; 1868 } 1869 return ndlp->nlp_state; 1870 } 1871 1872 static uint32_t 1873 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1874 void *arg, uint32_t evt) 1875 { 1876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1877 if (ndlp->nlp_DID == Fabric_DID) { 1878 spin_lock_irq(shost->host_lock); 1879 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1880 spin_unlock_irq(shost->host_lock); 1881 } 1882 lpfc_unreg_rpi(vport, ndlp); 1883 return ndlp->nlp_state; 1884 } 1885 1886 static uint32_t 1887 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1888 void *arg, uint32_t evt) 1889 { 1890 struct lpfc_iocbq *cmdiocb, *rspiocb; 1891 IOCB_t *irsp; 1892 1893 cmdiocb = (struct lpfc_iocbq *) arg; 1894 rspiocb = cmdiocb->context_un.rsp_iocb; 1895 1896 irsp = &rspiocb->iocb; 1897 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1898 lpfc_drop_node(vport, ndlp); 1899 return NLP_STE_FREED_NODE; 1900 } 1901 return ndlp->nlp_state; 1902 } 1903 1904 static uint32_t 1905 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, 1906 struct lpfc_nodelist *ndlp, 1907 void *arg, uint32_t evt) 1908 { 1909 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1910 MAILBOX_t *mb = &pmb->u.mb; 1911 1912 if (!mb->mbxStatus) { 1913 ndlp->nlp_rpi = mb->un.varWords[0]; 1914 ndlp->nlp_flag |= NLP_RPI_VALID; 1915 } else { 1916 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1917 lpfc_drop_node(vport, ndlp); 1918 return NLP_STE_FREED_NODE; 1919 } 1920 } 1921 return ndlp->nlp_state; 1922 } 1923 1924 static uint32_t 1925 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1926 void *arg, uint32_t evt) 1927 { 1928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1929 1930 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1931 spin_lock_irq(shost->host_lock); 1932 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1933 spin_unlock_irq(shost->host_lock); 1934 return ndlp->nlp_state; 1935 } 1936 lpfc_drop_node(vport, ndlp); 1937 return NLP_STE_FREED_NODE; 1938 } 1939 1940 static uint32_t 1941 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1942 void *arg, uint32_t evt) 1943 { 1944 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1945 1946 /* Don't do anything that will mess up processing of the 1947 * previous RSCN. 1948 */ 1949 if (vport->fc_flag & FC_RSCN_DEFERRED) 1950 return ndlp->nlp_state; 1951 1952 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1953 spin_lock_irq(shost->host_lock); 1954 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1955 spin_unlock_irq(shost->host_lock); 1956 return ndlp->nlp_state; 1957 } 1958 1959 1960 /* This next section defines the NPort Discovery State Machine */ 1961 1962 /* There are 4 different double linked lists nodelist entries can reside on. 1963 * The plogi list and adisc list are used when Link Up discovery or RSCN 1964 * processing is needed. Each list holds the nodes that we will send PLOGI 1965 * or ADISC on. These lists will keep track of what nodes will be effected 1966 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 1967 * The unmapped_list will contain all nodes that we have successfully logged 1968 * into at the Fibre Channel level. The mapped_list will contain all nodes 1969 * that are mapped FCP targets. 1970 */ 1971 /* 1972 * The bind list is a list of undiscovered (potentially non-existent) nodes 1973 * that we have saved binding information on. This information is used when 1974 * nodes transition from the unmapped to the mapped list. 1975 */ 1976 /* For UNUSED_NODE state, the node has just been allocated . 1977 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 1978 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 1979 * and put on the unmapped list. For ADISC processing, the node is taken off 1980 * the ADISC list and placed on either the mapped or unmapped list (depending 1981 * on its previous state). Once on the unmapped list, a PRLI is issued and the 1982 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 1983 * changed to UNMAPPED_NODE. If the completion indicates a mapped 1984 * node, the node is taken off the unmapped list. The binding list is checked 1985 * for a valid binding, or a binding is automatically assigned. If binding 1986 * assignment is unsuccessful, the node is left on the unmapped list. If 1987 * binding assignment is successful, the associated binding list entry (if 1988 * any) is removed, and the node is placed on the mapped list. 1989 */ 1990 /* 1991 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1992 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 1993 * expire, all effected nodes will receive a DEVICE_RM event. 1994 */ 1995 /* 1996 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 1997 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 1998 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 1999 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 2000 * we will first process the ADISC list. 32 entries are processed initially and 2001 * ADISC is initited for each one. Completions / Events for each node are 2002 * funnelled thru the state machine. As each node finishes ADISC processing, it 2003 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 2004 * waiting, and the ADISC list count is identically 0, then we are done. For 2005 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 2006 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 2007 * list. 32 entries are processed initially and PLOGI is initited for each one. 2008 * Completions / Events for each node are funnelled thru the state machine. As 2009 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 2010 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 2011 * indentically 0, then we are done. We have now completed discovery / RSCN 2012 * handling. Upon completion, ALL nodes should be on either the mapped or 2013 * unmapped lists. 2014 */ 2015 2016 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 2017 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { 2018 /* Action routine Event Current State */ 2019 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 2020 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 2021 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 2022 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 2023 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 2024 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 2025 lpfc_disc_illegal, /* CMPL_PLOGI */ 2026 lpfc_disc_illegal, /* CMPL_PRLI */ 2027 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 2028 lpfc_disc_illegal, /* CMPL_ADISC */ 2029 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2030 lpfc_device_rm_unused_node, /* DEVICE_RM */ 2031 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 2032 2033 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 2034 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 2035 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 2036 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 2037 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 2038 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 2039 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 2040 lpfc_disc_illegal, /* CMPL_PRLI */ 2041 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ 2042 lpfc_disc_illegal, /* CMPL_ADISC */ 2043 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ 2044 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 2045 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 2046 2047 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 2048 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 2049 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 2050 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 2051 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 2052 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 2053 lpfc_disc_illegal, /* CMPL_PLOGI */ 2054 lpfc_disc_illegal, /* CMPL_PRLI */ 2055 lpfc_disc_illegal, /* CMPL_LOGO */ 2056 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 2057 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2058 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 2059 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 2060 2061 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 2062 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 2063 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 2064 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 2065 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 2066 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 2067 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2068 lpfc_disc_illegal, /* CMPL_PRLI */ 2069 lpfc_disc_illegal, /* CMPL_LOGO */ 2070 lpfc_disc_illegal, /* CMPL_ADISC */ 2071 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 2072 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 2073 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 2074 2075 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 2076 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 2077 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 2078 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 2079 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 2080 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 2081 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2082 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 2083 lpfc_disc_illegal, /* CMPL_LOGO */ 2084 lpfc_disc_illegal, /* CMPL_ADISC */ 2085 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2086 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 2087 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 2088 2089 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 2090 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 2091 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 2092 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 2093 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 2094 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 2095 lpfc_disc_illegal, /* CMPL_PLOGI */ 2096 lpfc_disc_illegal, /* CMPL_PRLI */ 2097 lpfc_disc_illegal, /* CMPL_LOGO */ 2098 lpfc_disc_illegal, /* CMPL_ADISC */ 2099 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2100 lpfc_disc_illegal, /* DEVICE_RM */ 2101 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 2102 2103 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 2104 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 2105 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 2106 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 2107 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 2108 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 2109 lpfc_disc_illegal, /* CMPL_PLOGI */ 2110 lpfc_disc_illegal, /* CMPL_PRLI */ 2111 lpfc_disc_illegal, /* CMPL_LOGO */ 2112 lpfc_disc_illegal, /* CMPL_ADISC */ 2113 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2114 lpfc_disc_illegal, /* DEVICE_RM */ 2115 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 2116 2117 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 2118 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 2119 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 2120 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 2121 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 2122 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 2123 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 2124 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 2125 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 2126 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 2127 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 2128 lpfc_device_rm_npr_node, /* DEVICE_RM */ 2129 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 2130 }; 2131 2132 int 2133 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2134 void *arg, uint32_t evt) 2135 { 2136 uint32_t cur_state, rc; 2137 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2138 uint32_t); 2139 uint32_t got_ndlp = 0; 2140 2141 if (lpfc_nlp_get(ndlp)) 2142 got_ndlp = 1; 2143 2144 cur_state = ndlp->nlp_state; 2145 2146 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2147 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2148 "0211 DSM in event x%x on NPort x%x in " 2149 "state %d Data: x%x\n", 2150 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2151 2152 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2153 "DSM in: evt:%d ste:%d did:x%x", 2154 evt, cur_state, ndlp->nlp_DID); 2155 2156 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2157 rc = (func) (vport, ndlp, arg, evt); 2158 2159 /* DSM out state <rc> on NPort <nlp_DID> */ 2160 if (got_ndlp) { 2161 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2162 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2163 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2164 2165 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2166 "DSM out: ste:%d did:x%x flg:x%x", 2167 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2168 /* Decrement the ndlp reference count held for this function */ 2169 lpfc_nlp_put(ndlp); 2170 } else { 2171 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2172 "0213 DSM out state %d on NPort free\n", rc); 2173 2174 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2175 "DSM out: ste:%d did:x%x flg:x%x", 2176 rc, 0, 0); 2177 } 2178 2179 return rc; 2180 } 2181