1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw.h" 32 #include "lpfc_sli.h" 33 #include "lpfc_disc.h" 34 #include "lpfc_scsi.h" 35 #include "lpfc.h" 36 #include "lpfc_logmsg.h" 37 #include "lpfc_crtn.h" 38 #include "lpfc_vport.h" 39 #include "lpfc_debugfs.h" 40 41 42 /* Called to verify a rcv'ed ADISC was intended for us. */ 43 static int 44 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 45 struct lpfc_name *nn, struct lpfc_name *pn) 46 { 47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 48 * table entry for that node. 49 */ 50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) 51 return 0; 52 53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) 54 return 0; 55 56 /* we match, return success */ 57 return 1; 58 } 59 60 int 61 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 62 struct serv_parm * sp, uint32_t class) 63 { 64 volatile struct serv_parm *hsp = &vport->fc_sparam; 65 uint16_t hsp_value, ssp_value = 0; 66 67 /* 68 * The receive data field size and buffer-to-buffer receive data field 69 * size entries are 16 bits but are represented as two 8-bit fields in 70 * the driver data structure to account for rsvd bits and other control 71 * bits. Reconstruct and compare the fields as a 16-bit values before 72 * correcting the byte values. 73 */ 74 if (sp->cls1.classValid) { 75 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 76 hsp->cls1.rcvDataSizeLsb; 77 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 78 sp->cls1.rcvDataSizeLsb; 79 if (!ssp_value) 80 goto bad_service_param; 81 if (ssp_value > hsp_value) { 82 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 83 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 84 } 85 } else if (class == CLASS1) { 86 goto bad_service_param; 87 } 88 89 if (sp->cls2.classValid) { 90 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 91 hsp->cls2.rcvDataSizeLsb; 92 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 93 sp->cls2.rcvDataSizeLsb; 94 if (!ssp_value) 95 goto bad_service_param; 96 if (ssp_value > hsp_value) { 97 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 98 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 99 } 100 } else if (class == CLASS2) { 101 goto bad_service_param; 102 } 103 104 if (sp->cls3.classValid) { 105 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 106 hsp->cls3.rcvDataSizeLsb; 107 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 108 sp->cls3.rcvDataSizeLsb; 109 if (!ssp_value) 110 goto bad_service_param; 111 if (ssp_value > hsp_value) { 112 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 113 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 114 } 115 } else if (class == CLASS3) { 116 goto bad_service_param; 117 } 118 119 /* 120 * Preserve the upper four bits of the MSB from the PLOGI response. 121 * These bits contain the Buffer-to-Buffer State Change Number 122 * from the target and need to be passed to the FW. 123 */ 124 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 125 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 126 if (ssp_value > hsp_value) { 127 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 128 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 129 (hsp->cmn.bbRcvSizeMsb & 0x0F); 130 } 131 132 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 133 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 134 return 1; 135 bad_service_param: 136 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 137 "0207 Device %x " 138 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 139 "invalid service parameters. Ignoring device.\n", 140 ndlp->nlp_DID, 141 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], 142 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], 143 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], 144 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); 145 return 0; 146 } 147 148 static void * 149 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 150 struct lpfc_iocbq *rspiocb) 151 { 152 struct lpfc_dmabuf *pcmd, *prsp; 153 uint32_t *lp; 154 void *ptr = NULL; 155 IOCB_t *irsp; 156 157 irsp = &rspiocb->iocb; 158 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 159 160 /* For lpfc_els_abort, context2 could be zero'ed to delay 161 * freeing associated memory till after ABTS completes. 162 */ 163 if (pcmd) { 164 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 165 list); 166 if (prsp) { 167 lp = (uint32_t *) prsp->virt; 168 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 169 } 170 } else { 171 /* Force ulpStatus error since we are returning NULL ptr */ 172 if (!(irsp->ulpStatus)) { 173 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 174 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 175 } 176 ptr = NULL; 177 } 178 return ptr; 179 } 180 181 182 /* 183 * Free resources / clean up outstanding I/Os 184 * associated with a LPFC_NODELIST entry. This 185 * routine effectively results in a "software abort". 186 */ 187 int 188 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 189 { 190 LIST_HEAD(completions); 191 struct lpfc_sli *psli = &phba->sli; 192 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 193 struct lpfc_iocbq *iocb, *next_iocb; 194 IOCB_t *cmd; 195 196 /* Abort outstanding I/O on NPort <nlp_DID> */ 197 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 198 "0205 Abort outstanding I/O on NPort x%x " 199 "Data: x%x x%x x%x\n", 200 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 201 ndlp->nlp_rpi); 202 203 lpfc_fabric_abort_nport(ndlp); 204 205 /* First check the txq */ 206 spin_lock_irq(&phba->hbalock); 207 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 208 /* Check to see if iocb matches the nport we are looking for */ 209 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 210 /* It matches, so deque and call compl with anp error */ 211 list_move_tail(&iocb->list, &completions); 212 pring->txq_cnt--; 213 } 214 } 215 216 /* Next check the txcmplq */ 217 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 218 /* Check to see if iocb matches the nport we are looking for */ 219 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 220 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 221 } 222 } 223 spin_unlock_irq(&phba->hbalock); 224 225 while (!list_empty(&completions)) { 226 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 227 cmd = &iocb->iocb; 228 list_del_init(&iocb->list); 229 230 if (!iocb->iocb_cmpl) 231 lpfc_sli_release_iocbq(phba, iocb); 232 else { 233 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 234 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 235 (iocb->iocb_cmpl) (phba, iocb, iocb); 236 } 237 } 238 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 239 return 0; 240 } 241 242 static int 243 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 244 struct lpfc_iocbq *cmdiocb) 245 { 246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 247 struct lpfc_hba *phba = vport->phba; 248 struct lpfc_dmabuf *pcmd; 249 uint32_t *lp; 250 IOCB_t *icmd; 251 struct serv_parm *sp; 252 LPFC_MBOXQ_t *mbox; 253 struct ls_rjt stat; 254 int rc; 255 256 memset(&stat, 0, sizeof (struct ls_rjt)); 257 if (vport->port_state <= LPFC_FLOGI) { 258 /* Before responding to PLOGI, check for pt2pt mode. 259 * If we are pt2pt, with an outstanding FLOGI, abort 260 * the FLOGI and resend it first. 261 */ 262 if (vport->fc_flag & FC_PT2PT) { 263 lpfc_els_abort_flogi(phba); 264 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 265 /* If the other side is supposed to initiate 266 * the PLOGI anyway, just ACC it now and 267 * move on with discovery. 268 */ 269 phba->fc_edtov = FF_DEF_EDTOV; 270 phba->fc_ratov = FF_DEF_RATOV; 271 /* Start discovery - this should just do 272 CLEAR_LA */ 273 lpfc_disc_start(vport); 274 } else 275 lpfc_initial_flogi(vport); 276 } else { 277 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 278 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 279 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 280 ndlp, NULL); 281 return 0; 282 } 283 } 284 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 285 lp = (uint32_t *) pcmd->virt; 286 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 287 if (wwn_to_u64(sp->portName.u.wwn) == 0) { 288 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 289 "0140 PLOGI Reject: invalid nname\n"); 290 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 291 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME; 292 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 293 NULL); 294 return 0; 295 } 296 if (wwn_to_u64(sp->nodeName.u.wwn) == 0) { 297 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 298 "0141 PLOGI Reject: invalid pname\n"); 299 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 300 stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME; 301 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 302 NULL); 303 return 0; 304 } 305 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 306 /* Reject this request because invalid parameters */ 307 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 308 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 309 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 310 NULL); 311 return 0; 312 } 313 icmd = &cmdiocb->iocb; 314 315 /* PLOGI chkparm OK */ 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 317 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 318 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 319 ndlp->nlp_rpi); 320 321 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 322 ndlp->nlp_fcp_info |= CLASS2; 323 else 324 ndlp->nlp_fcp_info |= CLASS3; 325 326 ndlp->nlp_class_sup = 0; 327 if (sp->cls1.classValid) 328 ndlp->nlp_class_sup |= FC_COS_CLASS1; 329 if (sp->cls2.classValid) 330 ndlp->nlp_class_sup |= FC_COS_CLASS2; 331 if (sp->cls3.classValid) 332 ndlp->nlp_class_sup |= FC_COS_CLASS3; 333 if (sp->cls4.classValid) 334 ndlp->nlp_class_sup |= FC_COS_CLASS4; 335 ndlp->nlp_maxframe = 336 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 337 338 /* no need to reg_login if we are already in one of these states */ 339 switch (ndlp->nlp_state) { 340 case NLP_STE_NPR_NODE: 341 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 342 break; 343 case NLP_STE_REG_LOGIN_ISSUE: 344 case NLP_STE_PRLI_ISSUE: 345 case NLP_STE_UNMAPPED_NODE: 346 case NLP_STE_MAPPED_NODE: 347 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 348 return 1; 349 } 350 351 if ((vport->fc_flag & FC_PT2PT) && 352 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 353 /* rcv'ed PLOGI decides what our NPortId will be */ 354 vport->fc_myDID = icmd->un.rcvels.parmRo; 355 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 356 if (mbox == NULL) 357 goto out; 358 lpfc_config_link(phba, mbox); 359 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 360 mbox->vport = vport; 361 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 362 if (rc == MBX_NOT_FINISHED) { 363 mempool_free(mbox, phba->mbox_mem_pool); 364 goto out; 365 } 366 367 lpfc_can_disctmo(vport); 368 } 369 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 370 if (!mbox) 371 goto out; 372 373 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 374 (uint8_t *) sp, mbox, 0); 375 if (rc) { 376 mempool_free(mbox, phba->mbox_mem_pool); 377 goto out; 378 } 379 380 /* ACC PLOGI rsp command needs to execute first, 381 * queue this mbox command to be processed later. 382 */ 383 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 384 /* 385 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 386 * command issued in lpfc_cmpl_els_acc(). 387 */ 388 mbox->vport = vport; 389 spin_lock_irq(shost->host_lock); 390 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 391 spin_unlock_irq(shost->host_lock); 392 393 /* 394 * If there is an outstanding PLOGI issued, abort it before 395 * sending ACC rsp for received PLOGI. If pending plogi 396 * is not canceled here, the plogi will be rejected by 397 * remote port and will be retried. On a configuration with 398 * single discovery thread, this will cause a huge delay in 399 * discovery. Also this will cause multiple state machines 400 * running in parallel for this node. 401 */ 402 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 403 /* software abort outstanding PLOGI */ 404 lpfc_els_abort(phba, ndlp); 405 } 406 407 if ((vport->port_type == LPFC_NPIV_PORT && 408 vport->cfg_restrict_login)) { 409 410 /* In order to preserve RPIs, we want to cleanup 411 * the default RPI the firmware created to rcv 412 * this ELS request. The only way to do this is 413 * to register, then unregister the RPI. 414 */ 415 spin_lock_irq(shost->host_lock); 416 ndlp->nlp_flag |= NLP_RM_DFLT_RPI; 417 spin_unlock_irq(shost->host_lock); 418 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 419 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 420 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 421 ndlp, mbox); 422 return 1; 423 } 424 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 425 return 1; 426 out: 427 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 428 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 429 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 430 return 0; 431 } 432 433 static int 434 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 435 struct lpfc_iocbq *cmdiocb) 436 { 437 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 438 struct lpfc_dmabuf *pcmd; 439 struct serv_parm *sp; 440 struct lpfc_name *pnn, *ppn; 441 struct ls_rjt stat; 442 ADISC *ap; 443 IOCB_t *icmd; 444 uint32_t *lp; 445 uint32_t cmd; 446 447 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 448 lp = (uint32_t *) pcmd->virt; 449 450 cmd = *lp++; 451 if (cmd == ELS_CMD_ADISC) { 452 ap = (ADISC *) lp; 453 pnn = (struct lpfc_name *) & ap->nodeName; 454 ppn = (struct lpfc_name *) & ap->portName; 455 } else { 456 sp = (struct serv_parm *) lp; 457 pnn = (struct lpfc_name *) & sp->nodeName; 458 ppn = (struct lpfc_name *) & sp->portName; 459 } 460 461 icmd = &cmdiocb->iocb; 462 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 463 if (cmd == ELS_CMD_ADISC) { 464 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 465 } else { 466 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 467 NULL); 468 } 469 return 1; 470 } 471 /* Reject this request because invalid parameters */ 472 stat.un.b.lsRjtRsvd0 = 0; 473 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 474 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 475 stat.un.b.vendorUnique = 0; 476 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 477 478 /* 1 sec timeout */ 479 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 480 481 spin_lock_irq(shost->host_lock); 482 ndlp->nlp_flag |= NLP_DELAY_TMO; 483 spin_unlock_irq(shost->host_lock); 484 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 485 ndlp->nlp_prev_state = ndlp->nlp_state; 486 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 487 return 0; 488 } 489 490 static int 491 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 492 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 493 { 494 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 495 496 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 497 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 498 * PLOGIs during LOGO storms from a device. 499 */ 500 spin_lock_irq(shost->host_lock); 501 ndlp->nlp_flag |= NLP_LOGO_ACC; 502 spin_unlock_irq(shost->host_lock); 503 if (els_cmd == ELS_CMD_PRLO) 504 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 505 else 506 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 507 508 if ((!(ndlp->nlp_type & NLP_FABRIC) && 509 ((ndlp->nlp_type & NLP_FCP_TARGET) || 510 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 511 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 512 /* Only try to re-login if this is NOT a Fabric Node */ 513 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 spin_lock_irq(shost->host_lock); 515 ndlp->nlp_flag |= NLP_DELAY_TMO; 516 spin_unlock_irq(shost->host_lock); 517 518 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 519 } 520 ndlp->nlp_prev_state = ndlp->nlp_state; 521 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 522 523 spin_lock_irq(shost->host_lock); 524 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 525 spin_unlock_irq(shost->host_lock); 526 /* The driver has to wait until the ACC completes before it continues 527 * processing the LOGO. The action will resume in 528 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 529 * unreg_login, the driver waits so the ACC does not get aborted. 530 */ 531 return 0; 532 } 533 534 static void 535 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 536 struct lpfc_iocbq *cmdiocb) 537 { 538 struct lpfc_dmabuf *pcmd; 539 uint32_t *lp; 540 PRLI *npr; 541 struct fc_rport *rport = ndlp->rport; 542 u32 roles; 543 544 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 545 lp = (uint32_t *) pcmd->virt; 546 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 547 548 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 549 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 550 if (npr->prliType == PRLI_FCP_TYPE) { 551 if (npr->initiatorFunc) 552 ndlp->nlp_type |= NLP_FCP_INITIATOR; 553 if (npr->targetFunc) 554 ndlp->nlp_type |= NLP_FCP_TARGET; 555 if (npr->Retry) 556 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 557 } 558 if (rport) { 559 /* We need to update the rport role values */ 560 roles = FC_RPORT_ROLE_UNKNOWN; 561 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 562 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 563 if (ndlp->nlp_type & NLP_FCP_TARGET) 564 roles |= FC_RPORT_ROLE_FCP_TARGET; 565 566 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 567 "rport rolechg: role:x%x did:x%x flg:x%x", 568 roles, ndlp->nlp_DID, ndlp->nlp_flag); 569 570 fc_remote_port_rolechg(rport, roles); 571 } 572 } 573 574 static uint32_t 575 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 576 { 577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 579 if (!ndlp->nlp_rpi) { 580 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 return 0; 582 } 583 584 if (!(vport->fc_flag & FC_PT2PT)) { 585 /* Check config parameter use-adisc or FCP-2 */ 586 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 587 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 588 spin_lock_irq(shost->host_lock); 589 ndlp->nlp_flag |= NLP_NPR_ADISC; 590 spin_unlock_irq(shost->host_lock); 591 return 1; 592 } 593 } 594 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 595 lpfc_unreg_rpi(vport, ndlp); 596 return 0; 597 } 598 599 static uint32_t 600 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 601 void *arg, uint32_t evt) 602 { 603 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 604 "0271 Illegal State Transition: node x%x " 605 "event x%x, state x%x Data: x%x x%x\n", 606 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 607 ndlp->nlp_flag); 608 return ndlp->nlp_state; 609 } 610 611 static uint32_t 612 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 613 void *arg, uint32_t evt) 614 { 615 /* This transition is only legal if we previously 616 * rcv'ed a PLOGI. Since we don't want 2 discovery threads 617 * working on the same NPortID, do nothing for this thread 618 * to stop it. 619 */ 620 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 621 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 622 "0272 Illegal State Transition: node x%x " 623 "event x%x, state x%x Data: x%x x%x\n", 624 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 625 ndlp->nlp_flag); 626 } 627 return ndlp->nlp_state; 628 } 629 630 /* Start of Discovery State Machine routines */ 631 632 static uint32_t 633 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 634 void *arg, uint32_t evt) 635 { 636 struct lpfc_iocbq *cmdiocb; 637 638 cmdiocb = (struct lpfc_iocbq *) arg; 639 640 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 641 return ndlp->nlp_state; 642 } 643 return NLP_STE_FREED_NODE; 644 } 645 646 static uint32_t 647 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 648 void *arg, uint32_t evt) 649 { 650 lpfc_issue_els_logo(vport, ndlp, 0); 651 return ndlp->nlp_state; 652 } 653 654 static uint32_t 655 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 656 void *arg, uint32_t evt) 657 { 658 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 659 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 660 661 spin_lock_irq(shost->host_lock); 662 ndlp->nlp_flag |= NLP_LOGO_ACC; 663 spin_unlock_irq(shost->host_lock); 664 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 665 666 return ndlp->nlp_state; 667 } 668 669 static uint32_t 670 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 671 void *arg, uint32_t evt) 672 { 673 return NLP_STE_FREED_NODE; 674 } 675 676 static uint32_t 677 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 678 void *arg, uint32_t evt) 679 { 680 return NLP_STE_FREED_NODE; 681 } 682 683 static uint32_t 684 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 685 void *arg, uint32_t evt) 686 { 687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 688 struct lpfc_hba *phba = vport->phba; 689 struct lpfc_iocbq *cmdiocb = arg; 690 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 691 uint32_t *lp = (uint32_t *) pcmd->virt; 692 struct serv_parm *sp = (struct serv_parm *) (lp + 1); 693 struct ls_rjt stat; 694 int port_cmp; 695 696 memset(&stat, 0, sizeof (struct ls_rjt)); 697 698 /* For a PLOGI, we only accept if our portname is less 699 * than the remote portname. 700 */ 701 phba->fc_stat.elsLogiCol++; 702 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 703 sizeof(struct lpfc_name)); 704 705 if (port_cmp >= 0) { 706 /* Reject this request because the remote node will accept 707 ours */ 708 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 709 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 710 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 711 NULL); 712 } else { 713 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && 714 (ndlp->nlp_flag & NLP_NPR_2B_DISC) && 715 (vport->num_disc_nodes)) { 716 spin_lock_irq(shost->host_lock); 717 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 718 spin_unlock_irq(shost->host_lock); 719 /* Check if there are more PLOGIs to be sent */ 720 lpfc_more_plogi(vport); 721 if (vport->num_disc_nodes == 0) { 722 spin_lock_irq(shost->host_lock); 723 vport->fc_flag &= ~FC_NDISC_ACTIVE; 724 spin_unlock_irq(shost->host_lock); 725 lpfc_can_disctmo(vport); 726 lpfc_end_rscn(vport); 727 } 728 } 729 } /* If our portname was less */ 730 731 return ndlp->nlp_state; 732 } 733 734 static uint32_t 735 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 736 void *arg, uint32_t evt) 737 { 738 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 739 struct ls_rjt stat; 740 741 memset(&stat, 0, sizeof (struct ls_rjt)); 742 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 743 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 744 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 745 return ndlp->nlp_state; 746 } 747 748 static uint32_t 749 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 750 void *arg, uint32_t evt) 751 { 752 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 753 754 /* software abort outstanding PLOGI */ 755 lpfc_els_abort(vport->phba, ndlp); 756 757 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 758 return ndlp->nlp_state; 759 } 760 761 static uint32_t 762 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 763 void *arg, uint32_t evt) 764 { 765 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 766 struct lpfc_hba *phba = vport->phba; 767 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 768 769 /* software abort outstanding PLOGI */ 770 lpfc_els_abort(phba, ndlp); 771 772 if (evt == NLP_EVT_RCV_LOGO) { 773 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 774 } else { 775 lpfc_issue_els_logo(vport, ndlp, 0); 776 } 777 778 /* Put ndlp in npr state set plogi timer for 1 sec */ 779 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 780 spin_lock_irq(shost->host_lock); 781 ndlp->nlp_flag |= NLP_DELAY_TMO; 782 spin_unlock_irq(shost->host_lock); 783 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 784 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 785 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 786 787 return ndlp->nlp_state; 788 } 789 790 static uint32_t 791 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, 792 struct lpfc_nodelist *ndlp, 793 void *arg, 794 uint32_t evt) 795 { 796 struct lpfc_hba *phba = vport->phba; 797 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 798 struct lpfc_iocbq *cmdiocb, *rspiocb; 799 struct lpfc_dmabuf *pcmd, *prsp, *mp; 800 uint32_t *lp; 801 IOCB_t *irsp; 802 struct serv_parm *sp; 803 LPFC_MBOXQ_t *mbox; 804 805 cmdiocb = (struct lpfc_iocbq *) arg; 806 rspiocb = cmdiocb->context_un.rsp_iocb; 807 808 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 809 /* Recovery from PLOGI collision logic */ 810 return ndlp->nlp_state; 811 } 812 813 irsp = &rspiocb->iocb; 814 815 if (irsp->ulpStatus) 816 goto out; 817 818 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 819 820 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 821 822 lp = (uint32_t *) prsp->virt; 823 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 824 825 /* Some switches have FDMI servers returning 0 for WWN */ 826 if ((ndlp->nlp_DID != FDMI_DID) && 827 (wwn_to_u64(sp->portName.u.wwn) == 0 || 828 wwn_to_u64(sp->nodeName.u.wwn) == 0)) { 829 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 830 "0142 PLOGI RSP: Invalid WWN.\n"); 831 goto out; 832 } 833 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 834 goto out; 835 /* PLOGI chkparm OK */ 836 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 837 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 838 ndlp->nlp_DID, ndlp->nlp_state, 839 ndlp->nlp_flag, ndlp->nlp_rpi); 840 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) 841 ndlp->nlp_fcp_info |= CLASS2; 842 else 843 ndlp->nlp_fcp_info |= CLASS3; 844 845 ndlp->nlp_class_sup = 0; 846 if (sp->cls1.classValid) 847 ndlp->nlp_class_sup |= FC_COS_CLASS1; 848 if (sp->cls2.classValid) 849 ndlp->nlp_class_sup |= FC_COS_CLASS2; 850 if (sp->cls3.classValid) 851 ndlp->nlp_class_sup |= FC_COS_CLASS3; 852 if (sp->cls4.classValid) 853 ndlp->nlp_class_sup |= FC_COS_CLASS4; 854 ndlp->nlp_maxframe = 855 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 856 857 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 858 if (!mbox) { 859 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 860 "0133 PLOGI: no memory for reg_login " 861 "Data: x%x x%x x%x x%x\n", 862 ndlp->nlp_DID, ndlp->nlp_state, 863 ndlp->nlp_flag, ndlp->nlp_rpi); 864 goto out; 865 } 866 867 lpfc_unreg_rpi(vport, ndlp); 868 869 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 (uint8_t *) sp, mbox, 0) == 0) { 871 switch (ndlp->nlp_DID) { 872 case NameServer_DID: 873 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 874 break; 875 case FDMI_DID: 876 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 877 break; 878 default: 879 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 880 } 881 mbox->context2 = lpfc_nlp_get(ndlp); 882 mbox->vport = vport; 883 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 884 != MBX_NOT_FINISHED) { 885 lpfc_nlp_set_state(vport, ndlp, 886 NLP_STE_REG_LOGIN_ISSUE); 887 return ndlp->nlp_state; 888 } 889 /* decrement node reference count to the failed mbox 890 * command 891 */ 892 lpfc_nlp_put(ndlp); 893 mp = (struct lpfc_dmabuf *) mbox->context1; 894 lpfc_mbuf_free(phba, mp->virt, mp->phys); 895 kfree(mp); 896 mempool_free(mbox, phba->mbox_mem_pool); 897 898 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 899 "0134 PLOGI: cannot issue reg_login " 900 "Data: x%x x%x x%x x%x\n", 901 ndlp->nlp_DID, ndlp->nlp_state, 902 ndlp->nlp_flag, ndlp->nlp_rpi); 903 } else { 904 mempool_free(mbox, phba->mbox_mem_pool); 905 906 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 907 "0135 PLOGI: cannot format reg_login " 908 "Data: x%x x%x x%x x%x\n", 909 ndlp->nlp_DID, ndlp->nlp_state, 910 ndlp->nlp_flag, ndlp->nlp_rpi); 911 } 912 913 914 out: 915 if (ndlp->nlp_DID == NameServer_DID) { 916 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 917 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 918 "0261 Cannot Register NameServer login\n"); 919 } 920 921 spin_lock_irq(shost->host_lock); 922 ndlp->nlp_flag |= NLP_DEFER_RM; 923 spin_unlock_irq(shost->host_lock); 924 return NLP_STE_FREED_NODE; 925 } 926 927 static uint32_t 928 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 929 void *arg, uint32_t evt) 930 { 931 return ndlp->nlp_state; 932 } 933 934 static uint32_t 935 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, 936 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) 937 { 938 return ndlp->nlp_state; 939 } 940 941 static uint32_t 942 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 943 void *arg, uint32_t evt) 944 { 945 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 946 947 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 948 spin_lock_irq(shost->host_lock); 949 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 950 spin_unlock_irq(shost->host_lock); 951 return ndlp->nlp_state; 952 } else { 953 /* software abort outstanding PLOGI */ 954 lpfc_els_abort(vport->phba, ndlp); 955 956 lpfc_drop_node(vport, ndlp); 957 return NLP_STE_FREED_NODE; 958 } 959 } 960 961 static uint32_t 962 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, 963 struct lpfc_nodelist *ndlp, 964 void *arg, 965 uint32_t evt) 966 { 967 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 968 struct lpfc_hba *phba = vport->phba; 969 970 /* Don't do anything that will mess up processing of the 971 * previous RSCN. 972 */ 973 if (vport->fc_flag & FC_RSCN_DEFERRED) 974 return ndlp->nlp_state; 975 976 /* software abort outstanding PLOGI */ 977 lpfc_els_abort(phba, ndlp); 978 979 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 980 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 981 spin_lock_irq(shost->host_lock); 982 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 983 spin_unlock_irq(shost->host_lock); 984 985 return ndlp->nlp_state; 986 } 987 988 static uint32_t 989 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 990 void *arg, uint32_t evt) 991 { 992 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 993 struct lpfc_hba *phba = vport->phba; 994 struct lpfc_iocbq *cmdiocb; 995 996 /* software abort outstanding ADISC */ 997 lpfc_els_abort(phba, ndlp); 998 999 cmdiocb = (struct lpfc_iocbq *) arg; 1000 1001 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1002 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1003 spin_lock_irq(shost->host_lock); 1004 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1005 spin_unlock_irq(shost->host_lock); 1006 1007 if (vport->num_disc_nodes) { 1008 lpfc_more_adisc(vport); 1009 if ((vport->num_disc_nodes == 0) && 1010 (vport->fc_npr_cnt)) 1011 lpfc_els_disc_plogi(vport); 1012 if (vport->num_disc_nodes == 0) { 1013 spin_lock_irq(shost->host_lock); 1014 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1015 spin_unlock_irq(shost->host_lock); 1016 lpfc_can_disctmo(vport); 1017 lpfc_end_rscn(vport); 1018 } 1019 } 1020 } 1021 return ndlp->nlp_state; 1022 } 1023 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1024 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1025 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1026 1027 return ndlp->nlp_state; 1028 } 1029 1030 static uint32_t 1031 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1032 void *arg, uint32_t evt) 1033 { 1034 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1035 1036 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1037 return ndlp->nlp_state; 1038 } 1039 1040 static uint32_t 1041 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1042 void *arg, uint32_t evt) 1043 { 1044 struct lpfc_hba *phba = vport->phba; 1045 struct lpfc_iocbq *cmdiocb; 1046 1047 cmdiocb = (struct lpfc_iocbq *) arg; 1048 1049 /* software abort outstanding ADISC */ 1050 lpfc_els_abort(phba, ndlp); 1051 1052 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1053 return ndlp->nlp_state; 1054 } 1055 1056 static uint32_t 1057 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, 1058 struct lpfc_nodelist *ndlp, 1059 void *arg, uint32_t evt) 1060 { 1061 struct lpfc_iocbq *cmdiocb; 1062 1063 cmdiocb = (struct lpfc_iocbq *) arg; 1064 1065 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1066 return ndlp->nlp_state; 1067 } 1068 1069 static uint32_t 1070 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1071 void *arg, uint32_t evt) 1072 { 1073 struct lpfc_iocbq *cmdiocb; 1074 1075 cmdiocb = (struct lpfc_iocbq *) arg; 1076 1077 /* Treat like rcv logo */ 1078 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1079 return ndlp->nlp_state; 1080 } 1081 1082 static uint32_t 1083 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, 1084 struct lpfc_nodelist *ndlp, 1085 void *arg, uint32_t evt) 1086 { 1087 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1088 struct lpfc_hba *phba = vport->phba; 1089 struct lpfc_iocbq *cmdiocb, *rspiocb; 1090 IOCB_t *irsp; 1091 ADISC *ap; 1092 1093 cmdiocb = (struct lpfc_iocbq *) arg; 1094 rspiocb = cmdiocb->context_un.rsp_iocb; 1095 1096 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1097 irsp = &rspiocb->iocb; 1098 1099 if ((irsp->ulpStatus) || 1100 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1101 /* 1 sec timeout */ 1102 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1103 spin_lock_irq(shost->host_lock); 1104 ndlp->nlp_flag |= NLP_DELAY_TMO; 1105 spin_unlock_irq(shost->host_lock); 1106 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1107 1108 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); 1109 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); 1110 1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1112 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1113 lpfc_unreg_rpi(vport, ndlp); 1114 return ndlp->nlp_state; 1115 } 1116 1117 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1118 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1119 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1120 } else { 1121 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1122 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1123 } 1124 return ndlp->nlp_state; 1125 } 1126 1127 static uint32_t 1128 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1129 void *arg, uint32_t evt) 1130 { 1131 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1132 1133 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1134 spin_lock_irq(shost->host_lock); 1135 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1136 spin_unlock_irq(shost->host_lock); 1137 return ndlp->nlp_state; 1138 } else { 1139 /* software abort outstanding ADISC */ 1140 lpfc_els_abort(vport->phba, ndlp); 1141 1142 lpfc_drop_node(vport, ndlp); 1143 return NLP_STE_FREED_NODE; 1144 } 1145 } 1146 1147 static uint32_t 1148 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, 1149 struct lpfc_nodelist *ndlp, 1150 void *arg, 1151 uint32_t evt) 1152 { 1153 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1154 struct lpfc_hba *phba = vport->phba; 1155 1156 /* Don't do anything that will mess up processing of the 1157 * previous RSCN. 1158 */ 1159 if (vport->fc_flag & FC_RSCN_DEFERRED) 1160 return ndlp->nlp_state; 1161 1162 /* software abort outstanding ADISC */ 1163 lpfc_els_abort(phba, ndlp); 1164 1165 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1166 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1167 spin_lock_irq(shost->host_lock); 1168 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1169 spin_unlock_irq(shost->host_lock); 1170 lpfc_disc_set_adisc(vport, ndlp); 1171 return ndlp->nlp_state; 1172 } 1173 1174 static uint32_t 1175 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, 1176 struct lpfc_nodelist *ndlp, 1177 void *arg, 1178 uint32_t evt) 1179 { 1180 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1181 1182 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1183 return ndlp->nlp_state; 1184 } 1185 1186 static uint32_t 1187 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, 1188 struct lpfc_nodelist *ndlp, 1189 void *arg, 1190 uint32_t evt) 1191 { 1192 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1193 1194 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1195 return ndlp->nlp_state; 1196 } 1197 1198 static uint32_t 1199 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, 1200 struct lpfc_nodelist *ndlp, 1201 void *arg, 1202 uint32_t evt) 1203 { 1204 struct lpfc_hba *phba = vport->phba; 1205 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1206 LPFC_MBOXQ_t *mb; 1207 LPFC_MBOXQ_t *nextmb; 1208 struct lpfc_dmabuf *mp; 1209 1210 cmdiocb = (struct lpfc_iocbq *) arg; 1211 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1213 if ((mb = phba->sli.mbox_active)) { 1214 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1216 lpfc_nlp_put(ndlp); 1217 mb->context2 = NULL; 1218 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1219 } 1220 } 1221 1222 spin_lock_irq(&phba->hbalock); 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1224 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1226 mp = (struct lpfc_dmabuf *) (mb->context1); 1227 if (mp) { 1228 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1229 kfree(mp); 1230 } 1231 lpfc_nlp_put(ndlp); 1232 list_del(&mb->list); 1233 mempool_free(mb, phba->mbox_mem_pool); 1234 } 1235 } 1236 spin_unlock_irq(&phba->hbalock); 1237 1238 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1239 return ndlp->nlp_state; 1240 } 1241 1242 static uint32_t 1243 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, 1244 struct lpfc_nodelist *ndlp, 1245 void *arg, 1246 uint32_t evt) 1247 { 1248 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1249 1250 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1251 return ndlp->nlp_state; 1252 } 1253 1254 static uint32_t 1255 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, 1256 struct lpfc_nodelist *ndlp, 1257 void *arg, 1258 uint32_t evt) 1259 { 1260 struct lpfc_iocbq *cmdiocb; 1261 1262 cmdiocb = (struct lpfc_iocbq *) arg; 1263 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1264 return ndlp->nlp_state; 1265 } 1266 1267 static uint32_t 1268 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, 1269 struct lpfc_nodelist *ndlp, 1270 void *arg, 1271 uint32_t evt) 1272 { 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1275 MAILBOX_t *mb = &pmb->mb; 1276 uint32_t did = mb->un.varWords[1]; 1277 1278 if (mb->mbxStatus) { 1279 /* RegLogin failed */ 1280 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1281 "0246 RegLogin failed Data: x%x x%x x%x\n", 1282 did, mb->mbxStatus, vport->port_state); 1283 /* 1284 * If RegLogin failed due to lack of HBA resources do not 1285 * retry discovery. 1286 */ 1287 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1288 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1289 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1290 return ndlp->nlp_state; 1291 } 1292 1293 /* Put ndlp in npr state set plogi timer for 1 sec */ 1294 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1295 spin_lock_irq(shost->host_lock); 1296 ndlp->nlp_flag |= NLP_DELAY_TMO; 1297 spin_unlock_irq(shost->host_lock); 1298 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1299 1300 lpfc_issue_els_logo(vport, ndlp, 0); 1301 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1302 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1303 return ndlp->nlp_state; 1304 } 1305 1306 ndlp->nlp_rpi = mb->un.varWords[0]; 1307 1308 /* Only if we are not a fabric nport do we issue PRLI */ 1309 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1311 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1312 lpfc_issue_els_prli(vport, ndlp, 0); 1313 } else { 1314 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1315 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1316 } 1317 return ndlp->nlp_state; 1318 } 1319 1320 static uint32_t 1321 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, 1322 struct lpfc_nodelist *ndlp, 1323 void *arg, 1324 uint32_t evt) 1325 { 1326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1327 1328 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1329 spin_lock_irq(shost->host_lock); 1330 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1331 spin_unlock_irq(shost->host_lock); 1332 return ndlp->nlp_state; 1333 } else { 1334 lpfc_drop_node(vport, ndlp); 1335 return NLP_STE_FREED_NODE; 1336 } 1337 } 1338 1339 static uint32_t 1340 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, 1341 struct lpfc_nodelist *ndlp, 1342 void *arg, 1343 uint32_t evt) 1344 { 1345 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1346 1347 /* Don't do anything that will mess up processing of the 1348 * previous RSCN. 1349 */ 1350 if (vport->fc_flag & FC_RSCN_DEFERRED) 1351 return ndlp->nlp_state; 1352 1353 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1354 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1355 spin_lock_irq(shost->host_lock); 1356 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1357 spin_unlock_irq(shost->host_lock); 1358 lpfc_disc_set_adisc(vport, ndlp); 1359 return ndlp->nlp_state; 1360 } 1361 1362 static uint32_t 1363 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1364 void *arg, uint32_t evt) 1365 { 1366 struct lpfc_iocbq *cmdiocb; 1367 1368 cmdiocb = (struct lpfc_iocbq *) arg; 1369 1370 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1371 return ndlp->nlp_state; 1372 } 1373 1374 static uint32_t 1375 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1376 void *arg, uint32_t evt) 1377 { 1378 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1379 1380 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1381 return ndlp->nlp_state; 1382 } 1383 1384 static uint32_t 1385 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1386 void *arg, uint32_t evt) 1387 { 1388 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1389 1390 /* Software abort outstanding PRLI before sending acc */ 1391 lpfc_els_abort(vport->phba, ndlp); 1392 1393 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1394 return ndlp->nlp_state; 1395 } 1396 1397 static uint32_t 1398 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1399 void *arg, uint32_t evt) 1400 { 1401 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1402 1403 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1404 return ndlp->nlp_state; 1405 } 1406 1407 /* This routine is envoked when we rcv a PRLO request from a nport 1408 * we are logged into. We should send back a PRLO rsp setting the 1409 * appropriate bits. 1410 * NEXT STATE = PRLI_ISSUE 1411 */ 1412 static uint32_t 1413 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1414 void *arg, uint32_t evt) 1415 { 1416 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1417 1418 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1419 return ndlp->nlp_state; 1420 } 1421 1422 static uint32_t 1423 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1424 void *arg, uint32_t evt) 1425 { 1426 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1427 struct lpfc_iocbq *cmdiocb, *rspiocb; 1428 struct lpfc_hba *phba = vport->phba; 1429 IOCB_t *irsp; 1430 PRLI *npr; 1431 1432 cmdiocb = (struct lpfc_iocbq *) arg; 1433 rspiocb = cmdiocb->context_un.rsp_iocb; 1434 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1435 1436 irsp = &rspiocb->iocb; 1437 if (irsp->ulpStatus) { 1438 if ((vport->port_type == LPFC_NPIV_PORT) && 1439 vport->cfg_restrict_login) { 1440 goto out; 1441 } 1442 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1443 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1444 return ndlp->nlp_state; 1445 } 1446 1447 /* Check out PRLI rsp */ 1448 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1449 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1450 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1451 (npr->prliType == PRLI_FCP_TYPE)) { 1452 if (npr->initiatorFunc) 1453 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1454 if (npr->targetFunc) 1455 ndlp->nlp_type |= NLP_FCP_TARGET; 1456 if (npr->Retry) 1457 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1458 } 1459 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 1460 (vport->port_type == LPFC_NPIV_PORT) && 1461 vport->cfg_restrict_login) { 1462 out: 1463 spin_lock_irq(shost->host_lock); 1464 ndlp->nlp_flag |= NLP_TARGET_REMOVE; 1465 spin_unlock_irq(shost->host_lock); 1466 lpfc_issue_els_logo(vport, ndlp, 0); 1467 1468 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1470 return ndlp->nlp_state; 1471 } 1472 1473 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1474 if (ndlp->nlp_type & NLP_FCP_TARGET) 1475 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1476 else 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1478 return ndlp->nlp_state; 1479 } 1480 1481 /*! lpfc_device_rm_prli_issue 1482 * 1483 * \pre 1484 * \post 1485 * \param phba 1486 * \param ndlp 1487 * \param arg 1488 * \param evt 1489 * \return uint32_t 1490 * 1491 * \b Description: 1492 * This routine is envoked when we a request to remove a nport we are in the 1493 * process of PRLIing. We should software abort outstanding prli, unreg 1494 * login, send a logout. We will change node state to UNUSED_NODE, put it 1495 * on plogi list so it can be freed when LOGO completes. 1496 * 1497 */ 1498 1499 static uint32_t 1500 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1501 void *arg, uint32_t evt) 1502 { 1503 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1504 1505 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1506 spin_lock_irq(shost->host_lock); 1507 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1508 spin_unlock_irq(shost->host_lock); 1509 return ndlp->nlp_state; 1510 } else { 1511 /* software abort outstanding PLOGI */ 1512 lpfc_els_abort(vport->phba, ndlp); 1513 1514 lpfc_drop_node(vport, ndlp); 1515 return NLP_STE_FREED_NODE; 1516 } 1517 } 1518 1519 1520 /*! lpfc_device_recov_prli_issue 1521 * 1522 * \pre 1523 * \post 1524 * \param phba 1525 * \param ndlp 1526 * \param arg 1527 * \param evt 1528 * \return uint32_t 1529 * 1530 * \b Description: 1531 * The routine is envoked when the state of a device is unknown, like 1532 * during a link down. We should remove the nodelist entry from the 1533 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1534 * outstanding PRLI command, then free the node entry. 1535 */ 1536 static uint32_t 1537 lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 1538 struct lpfc_nodelist *ndlp, 1539 void *arg, 1540 uint32_t evt) 1541 { 1542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1543 struct lpfc_hba *phba = vport->phba; 1544 1545 /* Don't do anything that will mess up processing of the 1546 * previous RSCN. 1547 */ 1548 if (vport->fc_flag & FC_RSCN_DEFERRED) 1549 return ndlp->nlp_state; 1550 1551 /* software abort outstanding PRLI */ 1552 lpfc_els_abort(phba, ndlp); 1553 1554 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1555 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1556 spin_lock_irq(shost->host_lock); 1557 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1558 spin_unlock_irq(shost->host_lock); 1559 lpfc_disc_set_adisc(vport, ndlp); 1560 return ndlp->nlp_state; 1561 } 1562 1563 static uint32_t 1564 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1565 void *arg, uint32_t evt) 1566 { 1567 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1568 1569 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1570 return ndlp->nlp_state; 1571 } 1572 1573 static uint32_t 1574 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1575 void *arg, uint32_t evt) 1576 { 1577 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1578 1579 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1580 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1581 return ndlp->nlp_state; 1582 } 1583 1584 static uint32_t 1585 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1586 void *arg, uint32_t evt) 1587 { 1588 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1589 1590 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1591 return ndlp->nlp_state; 1592 } 1593 1594 static uint32_t 1595 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1596 void *arg, uint32_t evt) 1597 { 1598 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1599 1600 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1601 return ndlp->nlp_state; 1602 } 1603 1604 static uint32_t 1605 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1606 void *arg, uint32_t evt) 1607 { 1608 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1609 1610 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1611 return ndlp->nlp_state; 1612 } 1613 1614 static uint32_t 1615 lpfc_device_recov_unmap_node(struct lpfc_vport *vport, 1616 struct lpfc_nodelist *ndlp, 1617 void *arg, 1618 uint32_t evt) 1619 { 1620 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1621 1622 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1623 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1624 spin_lock_irq(shost->host_lock); 1625 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1626 spin_unlock_irq(shost->host_lock); 1627 lpfc_disc_set_adisc(vport, ndlp); 1628 1629 return ndlp->nlp_state; 1630 } 1631 1632 static uint32_t 1633 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1634 void *arg, uint32_t evt) 1635 { 1636 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1637 1638 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1639 return ndlp->nlp_state; 1640 } 1641 1642 static uint32_t 1643 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1644 void *arg, uint32_t evt) 1645 { 1646 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1647 1648 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1649 return ndlp->nlp_state; 1650 } 1651 1652 static uint32_t 1653 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1654 void *arg, uint32_t evt) 1655 { 1656 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1657 1658 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1659 return ndlp->nlp_state; 1660 } 1661 1662 static uint32_t 1663 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, 1664 struct lpfc_nodelist *ndlp, 1665 void *arg, uint32_t evt) 1666 { 1667 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1668 1669 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1670 return ndlp->nlp_state; 1671 } 1672 1673 static uint32_t 1674 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1675 void *arg, uint32_t evt) 1676 { 1677 struct lpfc_hba *phba = vport->phba; 1678 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1679 1680 /* flush the target */ 1681 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1682 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 1683 1684 /* Treat like rcv logo */ 1685 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1686 return ndlp->nlp_state; 1687 } 1688 1689 static uint32_t 1690 lpfc_device_recov_mapped_node(struct lpfc_vport *vport, 1691 struct lpfc_nodelist *ndlp, 1692 void *arg, 1693 uint32_t evt) 1694 { 1695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1696 1697 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1698 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1699 spin_lock_irq(shost->host_lock); 1700 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1701 spin_unlock_irq(shost->host_lock); 1702 lpfc_disc_set_adisc(vport, ndlp); 1703 return ndlp->nlp_state; 1704 } 1705 1706 static uint32_t 1707 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1708 void *arg, uint32_t evt) 1709 { 1710 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1711 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1712 1713 /* Ignore PLOGI if we have an outstanding LOGO */ 1714 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) 1715 return ndlp->nlp_state; 1716 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1717 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1718 spin_lock_irq(shost->host_lock); 1719 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); 1720 spin_unlock_irq(shost->host_lock); 1721 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1722 /* send PLOGI immediately, move to PLOGI issue state */ 1723 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1724 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1725 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1726 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1727 } 1728 } 1729 return ndlp->nlp_state; 1730 } 1731 1732 static uint32_t 1733 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1734 void *arg, uint32_t evt) 1735 { 1736 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1737 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1738 struct ls_rjt stat; 1739 1740 memset(&stat, 0, sizeof (struct ls_rjt)); 1741 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1742 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1743 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 1744 1745 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1746 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1747 spin_lock_irq(shost->host_lock); 1748 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1749 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1750 spin_unlock_irq(shost->host_lock); 1751 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1752 lpfc_issue_els_adisc(vport, ndlp, 0); 1753 } else { 1754 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1755 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1756 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1757 } 1758 } 1759 return ndlp->nlp_state; 1760 } 1761 1762 static uint32_t 1763 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1764 void *arg, uint32_t evt) 1765 { 1766 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1767 1768 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1769 return ndlp->nlp_state; 1770 } 1771 1772 static uint32_t 1773 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1774 void *arg, uint32_t evt) 1775 { 1776 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1777 1778 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1779 /* 1780 * Do not start discovery if discovery is about to start 1781 * or discovery in progress for this node. Starting discovery 1782 * here will affect the counting of discovery threads. 1783 */ 1784 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1785 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1786 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1787 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1788 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1789 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1790 lpfc_issue_els_adisc(vport, ndlp, 0); 1791 } else { 1792 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1793 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1794 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1795 } 1796 } 1797 return ndlp->nlp_state; 1798 } 1799 1800 static uint32_t 1801 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1802 void *arg, uint32_t evt) 1803 { 1804 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1805 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1806 1807 spin_lock_irq(shost->host_lock); 1808 ndlp->nlp_flag |= NLP_LOGO_ACC; 1809 spin_unlock_irq(shost->host_lock); 1810 1811 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1812 1813 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 1814 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1815 spin_lock_irq(shost->host_lock); 1816 ndlp->nlp_flag |= NLP_DELAY_TMO; 1817 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1818 spin_unlock_irq(shost->host_lock); 1819 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1820 } else { 1821 spin_lock_irq(shost->host_lock); 1822 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1823 spin_unlock_irq(shost->host_lock); 1824 } 1825 return ndlp->nlp_state; 1826 } 1827 1828 static uint32_t 1829 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1830 void *arg, uint32_t evt) 1831 { 1832 struct lpfc_iocbq *cmdiocb, *rspiocb; 1833 IOCB_t *irsp; 1834 1835 cmdiocb = (struct lpfc_iocbq *) arg; 1836 rspiocb = cmdiocb->context_un.rsp_iocb; 1837 1838 irsp = &rspiocb->iocb; 1839 if (irsp->ulpStatus) { 1840 ndlp->nlp_flag |= NLP_DEFER_RM; 1841 return NLP_STE_FREED_NODE; 1842 } 1843 return ndlp->nlp_state; 1844 } 1845 1846 static uint32_t 1847 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1848 void *arg, uint32_t evt) 1849 { 1850 struct lpfc_iocbq *cmdiocb, *rspiocb; 1851 IOCB_t *irsp; 1852 1853 cmdiocb = (struct lpfc_iocbq *) arg; 1854 rspiocb = cmdiocb->context_un.rsp_iocb; 1855 1856 irsp = &rspiocb->iocb; 1857 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1858 lpfc_drop_node(vport, ndlp); 1859 return NLP_STE_FREED_NODE; 1860 } 1861 return ndlp->nlp_state; 1862 } 1863 1864 static uint32_t 1865 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1866 void *arg, uint32_t evt) 1867 { 1868 lpfc_unreg_rpi(vport, ndlp); 1869 /* This routine does nothing, just return the current state */ 1870 return ndlp->nlp_state; 1871 } 1872 1873 static uint32_t 1874 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1875 void *arg, uint32_t evt) 1876 { 1877 struct lpfc_iocbq *cmdiocb, *rspiocb; 1878 IOCB_t *irsp; 1879 1880 cmdiocb = (struct lpfc_iocbq *) arg; 1881 rspiocb = cmdiocb->context_un.rsp_iocb; 1882 1883 irsp = &rspiocb->iocb; 1884 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1885 lpfc_drop_node(vport, ndlp); 1886 return NLP_STE_FREED_NODE; 1887 } 1888 return ndlp->nlp_state; 1889 } 1890 1891 static uint32_t 1892 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, 1893 struct lpfc_nodelist *ndlp, 1894 void *arg, uint32_t evt) 1895 { 1896 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1897 MAILBOX_t *mb = &pmb->mb; 1898 1899 if (!mb->mbxStatus) 1900 ndlp->nlp_rpi = mb->un.varWords[0]; 1901 else { 1902 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1903 lpfc_drop_node(vport, ndlp); 1904 return NLP_STE_FREED_NODE; 1905 } 1906 } 1907 return ndlp->nlp_state; 1908 } 1909 1910 static uint32_t 1911 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1912 void *arg, uint32_t evt) 1913 { 1914 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1915 1916 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1917 spin_lock_irq(shost->host_lock); 1918 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1919 spin_unlock_irq(shost->host_lock); 1920 return ndlp->nlp_state; 1921 } 1922 lpfc_drop_node(vport, ndlp); 1923 return NLP_STE_FREED_NODE; 1924 } 1925 1926 static uint32_t 1927 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1928 void *arg, uint32_t evt) 1929 { 1930 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1931 1932 /* Don't do anything that will mess up processing of the 1933 * previous RSCN. 1934 */ 1935 if (vport->fc_flag & FC_RSCN_DEFERRED) 1936 return ndlp->nlp_state; 1937 1938 spin_lock_irq(shost->host_lock); 1939 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1940 spin_unlock_irq(shost->host_lock); 1941 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1942 return ndlp->nlp_state; 1943 } 1944 1945 1946 /* This next section defines the NPort Discovery State Machine */ 1947 1948 /* There are 4 different double linked lists nodelist entries can reside on. 1949 * The plogi list and adisc list are used when Link Up discovery or RSCN 1950 * processing is needed. Each list holds the nodes that we will send PLOGI 1951 * or ADISC on. These lists will keep track of what nodes will be effected 1952 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 1953 * The unmapped_list will contain all nodes that we have successfully logged 1954 * into at the Fibre Channel level. The mapped_list will contain all nodes 1955 * that are mapped FCP targets. 1956 */ 1957 /* 1958 * The bind list is a list of undiscovered (potentially non-existent) nodes 1959 * that we have saved binding information on. This information is used when 1960 * nodes transition from the unmapped to the mapped list. 1961 */ 1962 /* For UNUSED_NODE state, the node has just been allocated . 1963 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 1964 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 1965 * and put on the unmapped list. For ADISC processing, the node is taken off 1966 * the ADISC list and placed on either the mapped or unmapped list (depending 1967 * on its previous state). Once on the unmapped list, a PRLI is issued and the 1968 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 1969 * changed to UNMAPPED_NODE. If the completion indicates a mapped 1970 * node, the node is taken off the unmapped list. The binding list is checked 1971 * for a valid binding, or a binding is automatically assigned. If binding 1972 * assignment is unsuccessful, the node is left on the unmapped list. If 1973 * binding assignment is successful, the associated binding list entry (if 1974 * any) is removed, and the node is placed on the mapped list. 1975 */ 1976 /* 1977 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1978 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 1979 * expire, all effected nodes will receive a DEVICE_RM event. 1980 */ 1981 /* 1982 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 1983 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 1984 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 1985 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 1986 * we will first process the ADISC list. 32 entries are processed initially and 1987 * ADISC is initited for each one. Completions / Events for each node are 1988 * funnelled thru the state machine. As each node finishes ADISC processing, it 1989 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 1990 * waiting, and the ADISC list count is identically 0, then we are done. For 1991 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 1992 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 1993 * list. 32 entries are processed initially and PLOGI is initited for each one. 1994 * Completions / Events for each node are funnelled thru the state machine. As 1995 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 1996 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 1997 * indentically 0, then we are done. We have now completed discovery / RSCN 1998 * handling. Upon completion, ALL nodes should be on either the mapped or 1999 * unmapped lists. 2000 */ 2001 2002 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 2003 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { 2004 /* Action routine Event Current State */ 2005 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 2006 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 2007 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 2008 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 2009 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 2010 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 2011 lpfc_disc_illegal, /* CMPL_PLOGI */ 2012 lpfc_disc_illegal, /* CMPL_PRLI */ 2013 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 2014 lpfc_disc_illegal, /* CMPL_ADISC */ 2015 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2016 lpfc_device_rm_unused_node, /* DEVICE_RM */ 2017 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 2018 2019 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 2020 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 2021 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 2022 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 2023 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 2024 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 2025 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 2026 lpfc_disc_illegal, /* CMPL_PRLI */ 2027 lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */ 2028 lpfc_disc_illegal, /* CMPL_ADISC */ 2029 lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */ 2030 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 2031 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 2032 2033 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 2034 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 2035 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 2036 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 2037 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 2038 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 2039 lpfc_disc_illegal, /* CMPL_PLOGI */ 2040 lpfc_disc_illegal, /* CMPL_PRLI */ 2041 lpfc_disc_illegal, /* CMPL_LOGO */ 2042 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 2043 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2044 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 2045 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 2046 2047 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 2048 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 2049 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 2050 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 2051 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 2052 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 2053 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2054 lpfc_disc_illegal, /* CMPL_PRLI */ 2055 lpfc_disc_illegal, /* CMPL_LOGO */ 2056 lpfc_disc_illegal, /* CMPL_ADISC */ 2057 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 2058 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 2059 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 2060 2061 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 2062 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 2063 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 2064 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 2065 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 2066 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 2067 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */ 2068 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 2069 lpfc_disc_illegal, /* CMPL_LOGO */ 2070 lpfc_disc_illegal, /* CMPL_ADISC */ 2071 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2072 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 2073 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 2074 2075 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 2076 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 2077 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 2078 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 2079 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 2080 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 2081 lpfc_disc_illegal, /* CMPL_PLOGI */ 2082 lpfc_disc_illegal, /* CMPL_PRLI */ 2083 lpfc_disc_illegal, /* CMPL_LOGO */ 2084 lpfc_disc_illegal, /* CMPL_ADISC */ 2085 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2086 lpfc_disc_illegal, /* DEVICE_RM */ 2087 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 2088 2089 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 2090 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 2091 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 2092 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 2093 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 2094 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 2095 lpfc_disc_illegal, /* CMPL_PLOGI */ 2096 lpfc_disc_illegal, /* CMPL_PRLI */ 2097 lpfc_disc_illegal, /* CMPL_LOGO */ 2098 lpfc_disc_illegal, /* CMPL_ADISC */ 2099 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2100 lpfc_disc_illegal, /* DEVICE_RM */ 2101 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 2102 2103 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 2104 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 2105 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 2106 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 2107 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 2108 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 2109 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 2110 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 2111 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 2112 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 2113 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 2114 lpfc_device_rm_npr_node, /* DEVICE_RM */ 2115 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 2116 }; 2117 2118 int 2119 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2120 void *arg, uint32_t evt) 2121 { 2122 uint32_t cur_state, rc; 2123 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2124 uint32_t); 2125 uint32_t got_ndlp = 0; 2126 2127 if (lpfc_nlp_get(ndlp)) 2128 got_ndlp = 1; 2129 2130 cur_state = ndlp->nlp_state; 2131 2132 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2133 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2134 "0211 DSM in event x%x on NPort x%x in " 2135 "state %d Data: x%x\n", 2136 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2137 2138 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2139 "DSM in: evt:%d ste:%d did:x%x", 2140 evt, cur_state, ndlp->nlp_DID); 2141 2142 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2143 rc = (func) (vport, ndlp, arg, evt); 2144 2145 /* DSM out state <rc> on NPort <nlp_DID> */ 2146 if (got_ndlp) { 2147 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2148 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2149 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2150 2151 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2152 "DSM out: ste:%d did:x%x flg:x%x", 2153 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2154 /* Decrement the ndlp reference count held for this function */ 2155 lpfc_nlp_put(ndlp); 2156 } else { 2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2158 "0212 DSM out state %d on NPort free\n", rc); 2159 2160 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2161 "DSM out: ste:%d did:x%x flg:x%x", 2162 rc, 0, 0); 2163 } 2164 2165 return rc; 2166 } 2167