1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw.h" 32 #include "lpfc_sli.h" 33 #include "lpfc_disc.h" 34 #include "lpfc_scsi.h" 35 #include "lpfc.h" 36 #include "lpfc_logmsg.h" 37 #include "lpfc_crtn.h" 38 #include "lpfc_vport.h" 39 #include "lpfc_debugfs.h" 40 41 42 /* Called to verify a rcv'ed ADISC was intended for us. */ 43 static int 44 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 45 struct lpfc_name *nn, struct lpfc_name *pn) 46 { 47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 48 * table entry for that node. 49 */ 50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) 51 return 0; 52 53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) 54 return 0; 55 56 /* we match, return success */ 57 return 1; 58 } 59 60 int 61 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 62 struct serv_parm * sp, uint32_t class) 63 { 64 volatile struct serv_parm *hsp = &vport->fc_sparam; 65 uint16_t hsp_value, ssp_value = 0; 66 67 /* 68 * The receive data field size and buffer-to-buffer receive data field 69 * size entries are 16 bits but are represented as two 8-bit fields in 70 * the driver data structure to account for rsvd bits and other control 71 * bits. Reconstruct and compare the fields as a 16-bit values before 72 * correcting the byte values. 73 */ 74 if (sp->cls1.classValid) { 75 hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) | 76 hsp->cls1.rcvDataSizeLsb; 77 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 78 sp->cls1.rcvDataSizeLsb; 79 if (!ssp_value) 80 goto bad_service_param; 81 if (ssp_value > hsp_value) { 82 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 83 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 84 } 85 } else if (class == CLASS1) { 86 goto bad_service_param; 87 } 88 89 if (sp->cls2.classValid) { 90 hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) | 91 hsp->cls2.rcvDataSizeLsb; 92 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 93 sp->cls2.rcvDataSizeLsb; 94 if (!ssp_value) 95 goto bad_service_param; 96 if (ssp_value > hsp_value) { 97 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 98 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 99 } 100 } else if (class == CLASS2) { 101 goto bad_service_param; 102 } 103 104 if (sp->cls3.classValid) { 105 hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) | 106 hsp->cls3.rcvDataSizeLsb; 107 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 108 sp->cls3.rcvDataSizeLsb; 109 if (!ssp_value) 110 goto bad_service_param; 111 if (ssp_value > hsp_value) { 112 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 113 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 114 } 115 } else if (class == CLASS3) { 116 goto bad_service_param; 117 } 118 119 /* 120 * Preserve the upper four bits of the MSB from the PLOGI response. 121 * These bits contain the Buffer-to-Buffer State Change Number 122 * from the target and need to be passed to the FW. 123 */ 124 hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb; 125 ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb; 126 if (ssp_value > hsp_value) { 127 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb; 128 sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) | 129 (hsp->cmn.bbRcvSizeMsb & 0x0F); 130 } 131 132 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 133 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 134 return 1; 135 bad_service_param: 136 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 137 "0207 Device %x " 138 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " 139 "invalid service parameters. Ignoring device.\n", 140 ndlp->nlp_DID, 141 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], 142 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], 143 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], 144 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); 145 return 0; 146 } 147 148 static void * 149 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 150 struct lpfc_iocbq *rspiocb) 151 { 152 struct lpfc_dmabuf *pcmd, *prsp; 153 uint32_t *lp; 154 void *ptr = NULL; 155 IOCB_t *irsp; 156 157 irsp = &rspiocb->iocb; 158 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 159 160 /* For lpfc_els_abort, context2 could be zero'ed to delay 161 * freeing associated memory till after ABTS completes. 162 */ 163 if (pcmd) { 164 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, 165 list); 166 if (prsp) { 167 lp = (uint32_t *) prsp->virt; 168 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t)); 169 } 170 } else { 171 /* Force ulpStatus error since we are returning NULL ptr */ 172 if (!(irsp->ulpStatus)) { 173 irsp->ulpStatus = IOSTAT_LOCAL_REJECT; 174 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED; 175 } 176 ptr = NULL; 177 } 178 return ptr; 179 } 180 181 182 /* 183 * Free resources / clean up outstanding I/Os 184 * associated with a LPFC_NODELIST entry. This 185 * routine effectively results in a "software abort". 186 */ 187 int 188 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 189 { 190 LIST_HEAD(completions); 191 struct lpfc_sli *psli = &phba->sli; 192 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 193 struct lpfc_iocbq *iocb, *next_iocb; 194 IOCB_t *cmd; 195 196 /* Abort outstanding I/O on NPort <nlp_DID> */ 197 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 198 "0205 Abort outstanding I/O on NPort x%x " 199 "Data: x%x x%x x%x\n", 200 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 201 ndlp->nlp_rpi); 202 203 lpfc_fabric_abort_nport(ndlp); 204 205 /* First check the txq */ 206 spin_lock_irq(&phba->hbalock); 207 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 208 /* Check to see if iocb matches the nport we are looking for */ 209 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 210 /* It matches, so deque and call compl with anp error */ 211 list_move_tail(&iocb->list, &completions); 212 pring->txq_cnt--; 213 } 214 } 215 216 /* Next check the txcmplq */ 217 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 218 /* Check to see if iocb matches the nport we are looking for */ 219 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 220 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 221 } 222 } 223 spin_unlock_irq(&phba->hbalock); 224 225 while (!list_empty(&completions)) { 226 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 227 cmd = &iocb->iocb; 228 list_del_init(&iocb->list); 229 230 if (!iocb->iocb_cmpl) 231 lpfc_sli_release_iocbq(phba, iocb); 232 else { 233 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 234 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 235 (iocb->iocb_cmpl) (phba, iocb, iocb); 236 } 237 } 238 239 /* If we are delaying issuing an ELS command, cancel it */ 240 if (ndlp->nlp_flag & NLP_DELAY_TMO) 241 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 242 return 0; 243 } 244 245 static int 246 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 247 struct lpfc_iocbq *cmdiocb) 248 { 249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 250 struct lpfc_hba *phba = vport->phba; 251 struct lpfc_dmabuf *pcmd; 252 uint32_t *lp; 253 IOCB_t *icmd; 254 struct serv_parm *sp; 255 LPFC_MBOXQ_t *mbox; 256 struct ls_rjt stat; 257 int rc; 258 259 memset(&stat, 0, sizeof (struct ls_rjt)); 260 if (vport->port_state <= LPFC_FLOGI) { 261 /* Before responding to PLOGI, check for pt2pt mode. 262 * If we are pt2pt, with an outstanding FLOGI, abort 263 * the FLOGI and resend it first. 264 */ 265 if (vport->fc_flag & FC_PT2PT) { 266 lpfc_els_abort_flogi(phba); 267 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 268 /* If the other side is supposed to initiate 269 * the PLOGI anyway, just ACC it now and 270 * move on with discovery. 271 */ 272 phba->fc_edtov = FF_DEF_EDTOV; 273 phba->fc_ratov = FF_DEF_RATOV; 274 /* Start discovery - this should just do 275 CLEAR_LA */ 276 lpfc_disc_start(vport); 277 } else 278 lpfc_initial_flogi(vport); 279 } else { 280 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 281 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 282 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 283 ndlp, NULL); 284 return 0; 285 } 286 } 287 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 288 lp = (uint32_t *) pcmd->virt; 289 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 290 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { 291 /* Reject this request because invalid parameters */ 292 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 293 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 294 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 295 NULL); 296 return 0; 297 } 298 icmd = &cmdiocb->iocb; 299 300 /* PLOGI chkparm OK */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 303 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 304 ndlp->nlp_rpi); 305 306 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 307 ndlp->nlp_fcp_info |= CLASS2; 308 else 309 ndlp->nlp_fcp_info |= CLASS3; 310 311 ndlp->nlp_class_sup = 0; 312 if (sp->cls1.classValid) 313 ndlp->nlp_class_sup |= FC_COS_CLASS1; 314 if (sp->cls2.classValid) 315 ndlp->nlp_class_sup |= FC_COS_CLASS2; 316 if (sp->cls3.classValid) 317 ndlp->nlp_class_sup |= FC_COS_CLASS3; 318 if (sp->cls4.classValid) 319 ndlp->nlp_class_sup |= FC_COS_CLASS4; 320 ndlp->nlp_maxframe = 321 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 322 323 /* no need to reg_login if we are already in one of these states */ 324 switch (ndlp->nlp_state) { 325 case NLP_STE_NPR_NODE: 326 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 327 break; 328 case NLP_STE_REG_LOGIN_ISSUE: 329 case NLP_STE_PRLI_ISSUE: 330 case NLP_STE_UNMAPPED_NODE: 331 case NLP_STE_MAPPED_NODE: 332 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 333 return 1; 334 } 335 336 if ((vport->fc_flag & FC_PT2PT) && 337 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 338 /* rcv'ed PLOGI decides what our NPortId will be */ 339 vport->fc_myDID = icmd->un.rcvels.parmRo; 340 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 341 if (mbox == NULL) 342 goto out; 343 lpfc_config_link(phba, mbox); 344 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 345 mbox->vport = vport; 346 rc = lpfc_sli_issue_mbox 347 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 348 if (rc == MBX_NOT_FINISHED) { 349 mempool_free(mbox, phba->mbox_mem_pool); 350 goto out; 351 } 352 353 lpfc_can_disctmo(vport); 354 } 355 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 356 if (!mbox) 357 goto out; 358 359 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 360 (uint8_t *) sp, mbox, 0); 361 if (rc) { 362 mempool_free(mbox, phba->mbox_mem_pool); 363 goto out; 364 } 365 366 /* ACC PLOGI rsp command needs to execute first, 367 * queue this mbox command to be processed later. 368 */ 369 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 370 /* 371 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 372 * command issued in lpfc_cmpl_els_acc(). 373 */ 374 mbox->vport = vport; 375 spin_lock_irq(shost->host_lock); 376 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 377 spin_unlock_irq(shost->host_lock); 378 379 /* 380 * If there is an outstanding PLOGI issued, abort it before 381 * sending ACC rsp for received PLOGI. If pending plogi 382 * is not canceled here, the plogi will be rejected by 383 * remote port and will be retried. On a configuration with 384 * single discovery thread, this will cause a huge delay in 385 * discovery. Also this will cause multiple state machines 386 * running in parallel for this node. 387 */ 388 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { 389 /* software abort outstanding PLOGI */ 390 lpfc_els_abort(phba, ndlp); 391 } 392 393 if ((vport->port_type == LPFC_NPIV_PORT && 394 vport->cfg_restrict_login)) { 395 396 /* In order to preserve RPIs, we want to cleanup 397 * the default RPI the firmware created to rcv 398 * this ELS request. The only way to do this is 399 * to register, then unregister the RPI. 400 */ 401 spin_lock_irq(shost->host_lock); 402 ndlp->nlp_flag |= NLP_RM_DFLT_RPI; 403 spin_unlock_irq(shost->host_lock); 404 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 405 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 406 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 407 ndlp, mbox); 408 return 1; 409 } 410 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 411 return 1; 412 413 out: 414 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 415 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 416 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 417 return 0; 418 } 419 420 static int 421 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 422 struct lpfc_iocbq *cmdiocb) 423 { 424 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 425 struct lpfc_dmabuf *pcmd; 426 struct serv_parm *sp; 427 struct lpfc_name *pnn, *ppn; 428 struct ls_rjt stat; 429 ADISC *ap; 430 IOCB_t *icmd; 431 uint32_t *lp; 432 uint32_t cmd; 433 434 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 435 lp = (uint32_t *) pcmd->virt; 436 437 cmd = *lp++; 438 if (cmd == ELS_CMD_ADISC) { 439 ap = (ADISC *) lp; 440 pnn = (struct lpfc_name *) & ap->nodeName; 441 ppn = (struct lpfc_name *) & ap->portName; 442 } else { 443 sp = (struct serv_parm *) lp; 444 pnn = (struct lpfc_name *) & sp->nodeName; 445 ppn = (struct lpfc_name *) & sp->portName; 446 } 447 448 icmd = &cmdiocb->iocb; 449 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 450 if (cmd == ELS_CMD_ADISC) { 451 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 452 } else { 453 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 454 NULL); 455 } 456 return 1; 457 } 458 /* Reject this request because invalid parameters */ 459 stat.un.b.lsRjtRsvd0 = 0; 460 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 461 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 462 stat.un.b.vendorUnique = 0; 463 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 464 465 /* 1 sec timeout */ 466 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 467 468 spin_lock_irq(shost->host_lock); 469 ndlp->nlp_flag |= NLP_DELAY_TMO; 470 spin_unlock_irq(shost->host_lock); 471 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 472 ndlp->nlp_prev_state = ndlp->nlp_state; 473 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 474 return 0; 475 } 476 477 static int 478 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 479 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 480 { 481 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 482 483 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 484 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 485 * PLOGIs during LOGO storms from a device. 486 */ 487 spin_lock_irq(shost->host_lock); 488 ndlp->nlp_flag |= NLP_LOGO_ACC; 489 spin_unlock_irq(shost->host_lock); 490 if (els_cmd == ELS_CMD_PRLO) 491 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 492 else 493 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 494 495 if (!(ndlp->nlp_type & NLP_FABRIC) || 496 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 497 /* Only try to re-login if this is NOT a Fabric Node */ 498 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 499 spin_lock_irq(shost->host_lock); 500 ndlp->nlp_flag |= NLP_DELAY_TMO; 501 spin_unlock_irq(shost->host_lock); 502 503 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 504 ndlp->nlp_prev_state = ndlp->nlp_state; 505 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 506 } else { 507 ndlp->nlp_prev_state = ndlp->nlp_state; 508 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 509 } 510 511 spin_lock_irq(shost->host_lock); 512 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 513 spin_unlock_irq(shost->host_lock); 514 /* The driver has to wait until the ACC completes before it continues 515 * processing the LOGO. The action will resume in 516 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 517 * unreg_login, the driver waits so the ACC does not get aborted. 518 */ 519 return 0; 520 } 521 522 static void 523 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 524 struct lpfc_iocbq *cmdiocb) 525 { 526 struct lpfc_dmabuf *pcmd; 527 uint32_t *lp; 528 PRLI *npr; 529 struct fc_rport *rport = ndlp->rport; 530 u32 roles; 531 532 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 533 lp = (uint32_t *) pcmd->virt; 534 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 535 536 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 537 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 538 if (npr->prliType == PRLI_FCP_TYPE) { 539 if (npr->initiatorFunc) 540 ndlp->nlp_type |= NLP_FCP_INITIATOR; 541 if (npr->targetFunc) 542 ndlp->nlp_type |= NLP_FCP_TARGET; 543 if (npr->Retry) 544 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 545 } 546 if (rport) { 547 /* We need to update the rport role values */ 548 roles = FC_RPORT_ROLE_UNKNOWN; 549 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 550 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 551 if (ndlp->nlp_type & NLP_FCP_TARGET) 552 roles |= FC_RPORT_ROLE_FCP_TARGET; 553 554 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 555 "rport rolechg: role:x%x did:x%x flg:x%x", 556 roles, ndlp->nlp_DID, ndlp->nlp_flag); 557 558 fc_remote_port_rolechg(rport, roles); 559 } 560 } 561 562 static uint32_t 563 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 564 { 565 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 566 567 if (!ndlp->nlp_rpi) { 568 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 569 return 0; 570 } 571 572 /* Check config parameter use-adisc or FCP-2 */ 573 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 574 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 575 spin_lock_irq(shost->host_lock); 576 ndlp->nlp_flag |= NLP_NPR_ADISC; 577 spin_unlock_irq(shost->host_lock); 578 return 1; 579 } 580 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 lpfc_unreg_rpi(vport, ndlp); 582 return 0; 583 } 584 585 static uint32_t 586 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 587 void *arg, uint32_t evt) 588 { 589 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 590 "0253 Illegal State Transition: node x%x " 591 "event x%x, state x%x Data: x%x x%x\n", 592 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 593 ndlp->nlp_flag); 594 return ndlp->nlp_state; 595 } 596 597 /* Start of Discovery State Machine routines */ 598 599 static uint32_t 600 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 601 void *arg, uint32_t evt) 602 { 603 struct lpfc_iocbq *cmdiocb; 604 605 cmdiocb = (struct lpfc_iocbq *) arg; 606 607 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 608 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 609 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 610 return ndlp->nlp_state; 611 } 612 lpfc_drop_node(vport, ndlp); 613 return NLP_STE_FREED_NODE; 614 } 615 616 static uint32_t 617 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 618 void *arg, uint32_t evt) 619 { 620 lpfc_issue_els_logo(vport, ndlp, 0); 621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 622 return ndlp->nlp_state; 623 } 624 625 static uint32_t 626 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 627 void *arg, uint32_t evt) 628 { 629 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 630 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 631 632 spin_lock_irq(shost->host_lock); 633 ndlp->nlp_flag |= NLP_LOGO_ACC; 634 spin_unlock_irq(shost->host_lock); 635 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 636 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 637 638 return ndlp->nlp_state; 639 } 640 641 static uint32_t 642 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 643 void *arg, uint32_t evt) 644 { 645 lpfc_drop_node(vport, ndlp); 646 return NLP_STE_FREED_NODE; 647 } 648 649 static uint32_t 650 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 651 void *arg, uint32_t evt) 652 { 653 lpfc_drop_node(vport, ndlp); 654 return NLP_STE_FREED_NODE; 655 } 656 657 static uint32_t 658 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 659 void *arg, uint32_t evt) 660 { 661 struct lpfc_hba *phba = vport->phba; 662 struct lpfc_iocbq *cmdiocb = arg; 663 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 664 uint32_t *lp = (uint32_t *) pcmd->virt; 665 struct serv_parm *sp = (struct serv_parm *) (lp + 1); 666 struct ls_rjt stat; 667 int port_cmp; 668 669 memset(&stat, 0, sizeof (struct ls_rjt)); 670 671 /* For a PLOGI, we only accept if our portname is less 672 * than the remote portname. 673 */ 674 phba->fc_stat.elsLogiCol++; 675 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 676 sizeof(struct lpfc_name)); 677 678 if (port_cmp >= 0) { 679 /* Reject this request because the remote node will accept 680 ours */ 681 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 682 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 683 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 684 NULL); 685 } else { 686 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 687 } /* If our portname was less */ 688 689 return ndlp->nlp_state; 690 } 691 692 static uint32_t 693 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 694 void *arg, uint32_t evt) 695 { 696 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 697 struct ls_rjt stat; 698 699 memset(&stat, 0, sizeof (struct ls_rjt)); 700 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 701 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 702 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 703 return ndlp->nlp_state; 704 } 705 706 static uint32_t 707 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 708 void *arg, uint32_t evt) 709 { 710 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 711 712 /* software abort outstanding PLOGI */ 713 lpfc_els_abort(vport->phba, ndlp); 714 715 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 716 return ndlp->nlp_state; 717 } 718 719 static uint32_t 720 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 721 void *arg, uint32_t evt) 722 { 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 724 struct lpfc_hba *phba = vport->phba; 725 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 726 727 /* software abort outstanding PLOGI */ 728 lpfc_els_abort(phba, ndlp); 729 730 if (evt == NLP_EVT_RCV_LOGO) { 731 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 732 } else { 733 lpfc_issue_els_logo(vport, ndlp, 0); 734 } 735 736 /* Put ndlp in npr state set plogi timer for 1 sec */ 737 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 738 spin_lock_irq(shost->host_lock); 739 ndlp->nlp_flag |= NLP_DELAY_TMO; 740 spin_unlock_irq(shost->host_lock); 741 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 742 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 743 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 744 745 return ndlp->nlp_state; 746 } 747 748 static uint32_t 749 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, 750 struct lpfc_nodelist *ndlp, 751 void *arg, 752 uint32_t evt) 753 { 754 struct lpfc_hba *phba = vport->phba; 755 struct lpfc_iocbq *cmdiocb, *rspiocb; 756 struct lpfc_dmabuf *pcmd, *prsp, *mp; 757 uint32_t *lp; 758 IOCB_t *irsp; 759 struct serv_parm *sp; 760 LPFC_MBOXQ_t *mbox; 761 762 cmdiocb = (struct lpfc_iocbq *) arg; 763 rspiocb = cmdiocb->context_un.rsp_iocb; 764 765 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 766 /* Recovery from PLOGI collision logic */ 767 return ndlp->nlp_state; 768 } 769 770 irsp = &rspiocb->iocb; 771 772 if (irsp->ulpStatus) 773 goto out; 774 775 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 776 777 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 778 779 lp = (uint32_t *) prsp->virt; 780 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 781 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) 782 goto out; 783 /* PLOGI chkparm OK */ 784 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 785 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 786 ndlp->nlp_DID, ndlp->nlp_state, 787 ndlp->nlp_flag, ndlp->nlp_rpi); 788 if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) 789 ndlp->nlp_fcp_info |= CLASS2; 790 else 791 ndlp->nlp_fcp_info |= CLASS3; 792 793 ndlp->nlp_class_sup = 0; 794 if (sp->cls1.classValid) 795 ndlp->nlp_class_sup |= FC_COS_CLASS1; 796 if (sp->cls2.classValid) 797 ndlp->nlp_class_sup |= FC_COS_CLASS2; 798 if (sp->cls3.classValid) 799 ndlp->nlp_class_sup |= FC_COS_CLASS3; 800 if (sp->cls4.classValid) 801 ndlp->nlp_class_sup |= FC_COS_CLASS4; 802 ndlp->nlp_maxframe = 803 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 804 805 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 806 if (!mbox) { 807 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 808 "0133 PLOGI: no memory for reg_login " 809 "Data: x%x x%x x%x x%x\n", 810 ndlp->nlp_DID, ndlp->nlp_state, 811 ndlp->nlp_flag, ndlp->nlp_rpi); 812 goto out; 813 } 814 815 lpfc_unreg_rpi(vport, ndlp); 816 817 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 818 (uint8_t *) sp, mbox, 0) == 0) { 819 switch (ndlp->nlp_DID) { 820 case NameServer_DID: 821 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 822 break; 823 case FDMI_DID: 824 mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; 825 break; 826 default: 827 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 828 } 829 mbox->context2 = lpfc_nlp_get(ndlp); 830 mbox->vport = vport; 831 if (lpfc_sli_issue_mbox(phba, mbox, 832 (MBX_NOWAIT | MBX_STOP_IOCB)) 833 != MBX_NOT_FINISHED) { 834 lpfc_nlp_set_state(vport, ndlp, 835 NLP_STE_REG_LOGIN_ISSUE); 836 return ndlp->nlp_state; 837 } 838 lpfc_nlp_put(ndlp); 839 mp = (struct lpfc_dmabuf *) mbox->context1; 840 lpfc_mbuf_free(phba, mp->virt, mp->phys); 841 kfree(mp); 842 mempool_free(mbox, phba->mbox_mem_pool); 843 844 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 845 "0134 PLOGI: cannot issue reg_login " 846 "Data: x%x x%x x%x x%x\n", 847 ndlp->nlp_DID, ndlp->nlp_state, 848 ndlp->nlp_flag, ndlp->nlp_rpi); 849 } else { 850 mempool_free(mbox, phba->mbox_mem_pool); 851 852 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 853 "0135 PLOGI: cannot format reg_login " 854 "Data: x%x x%x x%x x%x\n", 855 ndlp->nlp_DID, ndlp->nlp_state, 856 ndlp->nlp_flag, ndlp->nlp_rpi); 857 } 858 859 860 out: 861 if (ndlp->nlp_DID == NameServer_DID) { 862 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 863 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 864 "0261 Cannot Register NameServer login\n"); 865 } 866 867 /* Free this node since the driver cannot login or has the wrong 868 sparm */ 869 lpfc_drop_node(vport, ndlp); 870 return NLP_STE_FREED_NODE; 871 } 872 873 static uint32_t 874 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 875 void *arg, uint32_t evt) 876 { 877 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 878 879 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 880 spin_lock_irq(shost->host_lock); 881 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 882 spin_unlock_irq(shost->host_lock); 883 return ndlp->nlp_state; 884 } else { 885 /* software abort outstanding PLOGI */ 886 lpfc_els_abort(vport->phba, ndlp); 887 888 lpfc_drop_node(vport, ndlp); 889 return NLP_STE_FREED_NODE; 890 } 891 } 892 893 static uint32_t 894 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, 895 struct lpfc_nodelist *ndlp, 896 void *arg, 897 uint32_t evt) 898 { 899 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 900 struct lpfc_hba *phba = vport->phba; 901 902 /* Don't do anything that will mess up processing of the 903 * previous RSCN. 904 */ 905 if (vport->fc_flag & FC_RSCN_DEFERRED) 906 return ndlp->nlp_state; 907 908 /* software abort outstanding PLOGI */ 909 lpfc_els_abort(phba, ndlp); 910 911 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 912 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 913 spin_lock_irq(shost->host_lock); 914 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 915 spin_unlock_irq(shost->host_lock); 916 917 return ndlp->nlp_state; 918 } 919 920 static uint32_t 921 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 922 void *arg, uint32_t evt) 923 { 924 struct lpfc_hba *phba = vport->phba; 925 struct lpfc_iocbq *cmdiocb; 926 927 /* software abort outstanding ADISC */ 928 lpfc_els_abort(phba, ndlp); 929 930 cmdiocb = (struct lpfc_iocbq *) arg; 931 932 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) 933 return ndlp->nlp_state; 934 935 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 936 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 937 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 938 939 return ndlp->nlp_state; 940 } 941 942 static uint32_t 943 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 944 void *arg, uint32_t evt) 945 { 946 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 947 948 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 949 return ndlp->nlp_state; 950 } 951 952 static uint32_t 953 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 954 void *arg, uint32_t evt) 955 { 956 struct lpfc_hba *phba = vport->phba; 957 struct lpfc_iocbq *cmdiocb; 958 959 cmdiocb = (struct lpfc_iocbq *) arg; 960 961 /* software abort outstanding ADISC */ 962 lpfc_els_abort(phba, ndlp); 963 964 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 965 return ndlp->nlp_state; 966 } 967 968 static uint32_t 969 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, 970 struct lpfc_nodelist *ndlp, 971 void *arg, uint32_t evt) 972 { 973 struct lpfc_iocbq *cmdiocb; 974 975 cmdiocb = (struct lpfc_iocbq *) arg; 976 977 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 978 return ndlp->nlp_state; 979 } 980 981 static uint32_t 982 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 983 void *arg, uint32_t evt) 984 { 985 struct lpfc_iocbq *cmdiocb; 986 987 cmdiocb = (struct lpfc_iocbq *) arg; 988 989 /* Treat like rcv logo */ 990 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 991 return ndlp->nlp_state; 992 } 993 994 static uint32_t 995 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, 996 struct lpfc_nodelist *ndlp, 997 void *arg, uint32_t evt) 998 { 999 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1000 struct lpfc_hba *phba = vport->phba; 1001 struct lpfc_iocbq *cmdiocb, *rspiocb; 1002 IOCB_t *irsp; 1003 ADISC *ap; 1004 1005 cmdiocb = (struct lpfc_iocbq *) arg; 1006 rspiocb = cmdiocb->context_un.rsp_iocb; 1007 1008 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1009 irsp = &rspiocb->iocb; 1010 1011 if ((irsp->ulpStatus) || 1012 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1013 /* 1 sec timeout */ 1014 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1015 spin_lock_irq(shost->host_lock); 1016 ndlp->nlp_flag |= NLP_DELAY_TMO; 1017 spin_unlock_irq(shost->host_lock); 1018 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1019 1020 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); 1021 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); 1022 1023 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1024 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1025 lpfc_unreg_rpi(vport, ndlp); 1026 return ndlp->nlp_state; 1027 } 1028 1029 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1030 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1031 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1032 } else { 1033 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1034 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1035 } 1036 return ndlp->nlp_state; 1037 } 1038 1039 static uint32_t 1040 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1041 void *arg, uint32_t evt) 1042 { 1043 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1044 1045 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1046 spin_lock_irq(shost->host_lock); 1047 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1048 spin_unlock_irq(shost->host_lock); 1049 return ndlp->nlp_state; 1050 } else { 1051 /* software abort outstanding ADISC */ 1052 lpfc_els_abort(vport->phba, ndlp); 1053 1054 lpfc_drop_node(vport, ndlp); 1055 return NLP_STE_FREED_NODE; 1056 } 1057 } 1058 1059 static uint32_t 1060 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, 1061 struct lpfc_nodelist *ndlp, 1062 void *arg, 1063 uint32_t evt) 1064 { 1065 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1066 struct lpfc_hba *phba = vport->phba; 1067 1068 /* Don't do anything that will mess up processing of the 1069 * previous RSCN. 1070 */ 1071 if (vport->fc_flag & FC_RSCN_DEFERRED) 1072 return ndlp->nlp_state; 1073 1074 /* software abort outstanding ADISC */ 1075 lpfc_els_abort(phba, ndlp); 1076 1077 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1078 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1079 spin_lock_irq(shost->host_lock); 1080 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1081 spin_unlock_irq(shost->host_lock); 1082 lpfc_disc_set_adisc(vport, ndlp); 1083 return ndlp->nlp_state; 1084 } 1085 1086 static uint32_t 1087 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, 1088 struct lpfc_nodelist *ndlp, 1089 void *arg, 1090 uint32_t evt) 1091 { 1092 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1093 1094 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1095 return ndlp->nlp_state; 1096 } 1097 1098 static uint32_t 1099 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, 1100 struct lpfc_nodelist *ndlp, 1101 void *arg, 1102 uint32_t evt) 1103 { 1104 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1105 1106 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1107 return ndlp->nlp_state; 1108 } 1109 1110 static uint32_t 1111 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, 1112 struct lpfc_nodelist *ndlp, 1113 void *arg, 1114 uint32_t evt) 1115 { 1116 struct lpfc_hba *phba = vport->phba; 1117 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1118 LPFC_MBOXQ_t *mb; 1119 LPFC_MBOXQ_t *nextmb; 1120 struct lpfc_dmabuf *mp; 1121 1122 cmdiocb = (struct lpfc_iocbq *) arg; 1123 1124 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1125 if ((mb = phba->sli.mbox_active)) { 1126 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1127 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1128 lpfc_nlp_put(ndlp); 1129 mb->context2 = NULL; 1130 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1131 } 1132 } 1133 1134 spin_lock_irq(&phba->hbalock); 1135 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1136 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1137 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1138 mp = (struct lpfc_dmabuf *) (mb->context1); 1139 if (mp) { 1140 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1141 kfree(mp); 1142 } 1143 lpfc_nlp_put(ndlp); 1144 list_del(&mb->list); 1145 mempool_free(mb, phba->mbox_mem_pool); 1146 } 1147 } 1148 spin_unlock_irq(&phba->hbalock); 1149 1150 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1151 return ndlp->nlp_state; 1152 } 1153 1154 static uint32_t 1155 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, 1156 struct lpfc_nodelist *ndlp, 1157 void *arg, 1158 uint32_t evt) 1159 { 1160 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1161 1162 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1163 return ndlp->nlp_state; 1164 } 1165 1166 static uint32_t 1167 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, 1168 struct lpfc_nodelist *ndlp, 1169 void *arg, 1170 uint32_t evt) 1171 { 1172 struct lpfc_iocbq *cmdiocb; 1173 1174 cmdiocb = (struct lpfc_iocbq *) arg; 1175 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1176 return ndlp->nlp_state; 1177 } 1178 1179 static uint32_t 1180 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, 1181 struct lpfc_nodelist *ndlp, 1182 void *arg, 1183 uint32_t evt) 1184 { 1185 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1186 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1187 MAILBOX_t *mb = &pmb->mb; 1188 uint32_t did = mb->un.varWords[1]; 1189 1190 if (mb->mbxStatus) { 1191 /* RegLogin failed */ 1192 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1193 "0246 RegLogin failed Data: x%x x%x x%x\n", 1194 did, mb->mbxStatus, vport->port_state); 1195 /* 1196 * If RegLogin failed due to lack of HBA resources do not 1197 * retry discovery. 1198 */ 1199 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1200 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1201 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1202 return ndlp->nlp_state; 1203 } 1204 1205 /* Put ndlp in npr state set plogi timer for 1 sec */ 1206 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1207 spin_lock_irq(shost->host_lock); 1208 ndlp->nlp_flag |= NLP_DELAY_TMO; 1209 spin_unlock_irq(shost->host_lock); 1210 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1211 1212 lpfc_issue_els_logo(vport, ndlp, 0); 1213 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1214 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1215 return ndlp->nlp_state; 1216 } 1217 1218 ndlp->nlp_rpi = mb->un.varWords[0]; 1219 1220 /* Only if we are not a fabric nport do we issue PRLI */ 1221 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1222 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1223 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1224 lpfc_issue_els_prli(vport, ndlp, 0); 1225 } else { 1226 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1227 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1228 } 1229 return ndlp->nlp_state; 1230 } 1231 1232 static uint32_t 1233 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, 1234 struct lpfc_nodelist *ndlp, 1235 void *arg, 1236 uint32_t evt) 1237 { 1238 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1239 1240 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1241 spin_lock_irq(shost->host_lock); 1242 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1243 spin_unlock_irq(shost->host_lock); 1244 return ndlp->nlp_state; 1245 } else { 1246 lpfc_drop_node(vport, ndlp); 1247 return NLP_STE_FREED_NODE; 1248 } 1249 } 1250 1251 static uint32_t 1252 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, 1253 struct lpfc_nodelist *ndlp, 1254 void *arg, 1255 uint32_t evt) 1256 { 1257 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1258 1259 /* Don't do anything that will mess up processing of the 1260 * previous RSCN. 1261 */ 1262 if (vport->fc_flag & FC_RSCN_DEFERRED) 1263 return ndlp->nlp_state; 1264 1265 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1266 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1267 spin_lock_irq(shost->host_lock); 1268 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1269 spin_unlock_irq(shost->host_lock); 1270 lpfc_disc_set_adisc(vport, ndlp); 1271 return ndlp->nlp_state; 1272 } 1273 1274 static uint32_t 1275 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1276 void *arg, uint32_t evt) 1277 { 1278 struct lpfc_iocbq *cmdiocb; 1279 1280 cmdiocb = (struct lpfc_iocbq *) arg; 1281 1282 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1283 return ndlp->nlp_state; 1284 } 1285 1286 static uint32_t 1287 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1288 void *arg, uint32_t evt) 1289 { 1290 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1291 1292 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1293 return ndlp->nlp_state; 1294 } 1295 1296 static uint32_t 1297 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1298 void *arg, uint32_t evt) 1299 { 1300 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1301 1302 /* Software abort outstanding PRLI before sending acc */ 1303 lpfc_els_abort(vport->phba, ndlp); 1304 1305 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1306 return ndlp->nlp_state; 1307 } 1308 1309 static uint32_t 1310 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1311 void *arg, uint32_t evt) 1312 { 1313 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1314 1315 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1316 return ndlp->nlp_state; 1317 } 1318 1319 /* This routine is envoked when we rcv a PRLO request from a nport 1320 * we are logged into. We should send back a PRLO rsp setting the 1321 * appropriate bits. 1322 * NEXT STATE = PRLI_ISSUE 1323 */ 1324 static uint32_t 1325 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1326 void *arg, uint32_t evt) 1327 { 1328 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1329 1330 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1331 return ndlp->nlp_state; 1332 } 1333 1334 static uint32_t 1335 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1336 void *arg, uint32_t evt) 1337 { 1338 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1339 struct lpfc_iocbq *cmdiocb, *rspiocb; 1340 struct lpfc_hba *phba = vport->phba; 1341 IOCB_t *irsp; 1342 PRLI *npr; 1343 1344 cmdiocb = (struct lpfc_iocbq *) arg; 1345 rspiocb = cmdiocb->context_un.rsp_iocb; 1346 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb); 1347 1348 irsp = &rspiocb->iocb; 1349 if (irsp->ulpStatus) { 1350 if ((vport->port_type == LPFC_NPIV_PORT) && 1351 vport->cfg_restrict_login) { 1352 goto out; 1353 } 1354 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1355 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1356 return ndlp->nlp_state; 1357 } 1358 1359 /* Check out PRLI rsp */ 1360 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1361 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1362 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1363 (npr->prliType == PRLI_FCP_TYPE)) { 1364 if (npr->initiatorFunc) 1365 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1366 if (npr->targetFunc) 1367 ndlp->nlp_type |= NLP_FCP_TARGET; 1368 if (npr->Retry) 1369 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1370 } 1371 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 1372 (vport->port_type == LPFC_NPIV_PORT) && 1373 vport->cfg_restrict_login) { 1374 out: 1375 spin_lock_irq(shost->host_lock); 1376 ndlp->nlp_flag |= NLP_TARGET_REMOVE; 1377 spin_unlock_irq(shost->host_lock); 1378 lpfc_issue_els_logo(vport, ndlp, 0); 1379 1380 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1381 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1382 return ndlp->nlp_state; 1383 } 1384 1385 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1386 if (ndlp->nlp_type & NLP_FCP_TARGET) 1387 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1388 else 1389 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1390 return ndlp->nlp_state; 1391 } 1392 1393 /*! lpfc_device_rm_prli_issue 1394 * 1395 * \pre 1396 * \post 1397 * \param phba 1398 * \param ndlp 1399 * \param arg 1400 * \param evt 1401 * \return uint32_t 1402 * 1403 * \b Description: 1404 * This routine is envoked when we a request to remove a nport we are in the 1405 * process of PRLIing. We should software abort outstanding prli, unreg 1406 * login, send a logout. We will change node state to UNUSED_NODE, put it 1407 * on plogi list so it can be freed when LOGO completes. 1408 * 1409 */ 1410 1411 static uint32_t 1412 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1413 void *arg, uint32_t evt) 1414 { 1415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1416 1417 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1418 spin_lock_irq(shost->host_lock); 1419 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1420 spin_unlock_irq(shost->host_lock); 1421 return ndlp->nlp_state; 1422 } else { 1423 /* software abort outstanding PLOGI */ 1424 lpfc_els_abort(vport->phba, ndlp); 1425 1426 lpfc_drop_node(vport, ndlp); 1427 return NLP_STE_FREED_NODE; 1428 } 1429 } 1430 1431 1432 /*! lpfc_device_recov_prli_issue 1433 * 1434 * \pre 1435 * \post 1436 * \param phba 1437 * \param ndlp 1438 * \param arg 1439 * \param evt 1440 * \return uint32_t 1441 * 1442 * \b Description: 1443 * The routine is envoked when the state of a device is unknown, like 1444 * during a link down. We should remove the nodelist entry from the 1445 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1446 * outstanding PRLI command, then free the node entry. 1447 */ 1448 static uint32_t 1449 lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 1450 struct lpfc_nodelist *ndlp, 1451 void *arg, 1452 uint32_t evt) 1453 { 1454 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1455 struct lpfc_hba *phba = vport->phba; 1456 1457 /* Don't do anything that will mess up processing of the 1458 * previous RSCN. 1459 */ 1460 if (vport->fc_flag & FC_RSCN_DEFERRED) 1461 return ndlp->nlp_state; 1462 1463 /* software abort outstanding PRLI */ 1464 lpfc_els_abort(phba, ndlp); 1465 1466 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1467 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1468 spin_lock_irq(shost->host_lock); 1469 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1470 spin_unlock_irq(shost->host_lock); 1471 lpfc_disc_set_adisc(vport, ndlp); 1472 return ndlp->nlp_state; 1473 } 1474 1475 static uint32_t 1476 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1477 void *arg, uint32_t evt) 1478 { 1479 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1480 1481 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1482 return ndlp->nlp_state; 1483 } 1484 1485 static uint32_t 1486 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1487 void *arg, uint32_t evt) 1488 { 1489 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1490 1491 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1492 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1493 return ndlp->nlp_state; 1494 } 1495 1496 static uint32_t 1497 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1498 void *arg, uint32_t evt) 1499 { 1500 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1501 1502 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1503 return ndlp->nlp_state; 1504 } 1505 1506 static uint32_t 1507 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1508 void *arg, uint32_t evt) 1509 { 1510 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1511 1512 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1513 return ndlp->nlp_state; 1514 } 1515 1516 static uint32_t 1517 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1518 void *arg, uint32_t evt) 1519 { 1520 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1521 1522 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 1523 return ndlp->nlp_state; 1524 } 1525 1526 static uint32_t 1527 lpfc_device_recov_unmap_node(struct lpfc_vport *vport, 1528 struct lpfc_nodelist *ndlp, 1529 void *arg, 1530 uint32_t evt) 1531 { 1532 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1533 1534 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1535 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1536 spin_lock_irq(shost->host_lock); 1537 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1538 spin_unlock_irq(shost->host_lock); 1539 lpfc_disc_set_adisc(vport, ndlp); 1540 1541 return ndlp->nlp_state; 1542 } 1543 1544 static uint32_t 1545 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1546 void *arg, uint32_t evt) 1547 { 1548 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1549 1550 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 1551 return ndlp->nlp_state; 1552 } 1553 1554 static uint32_t 1555 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1556 void *arg, uint32_t evt) 1557 { 1558 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1559 1560 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1561 return ndlp->nlp_state; 1562 } 1563 1564 static uint32_t 1565 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1566 void *arg, uint32_t evt) 1567 { 1568 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1569 1570 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1571 return ndlp->nlp_state; 1572 } 1573 1574 static uint32_t 1575 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, 1576 struct lpfc_nodelist *ndlp, 1577 void *arg, uint32_t evt) 1578 { 1579 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1580 1581 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1582 return ndlp->nlp_state; 1583 } 1584 1585 static uint32_t 1586 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1587 void *arg, uint32_t evt) 1588 { 1589 struct lpfc_hba *phba = vport->phba; 1590 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1591 1592 /* flush the target */ 1593 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1594 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 1595 1596 /* Treat like rcv logo */ 1597 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1598 return ndlp->nlp_state; 1599 } 1600 1601 static uint32_t 1602 lpfc_device_recov_mapped_node(struct lpfc_vport *vport, 1603 struct lpfc_nodelist *ndlp, 1604 void *arg, 1605 uint32_t evt) 1606 { 1607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1608 1609 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1610 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1611 spin_lock_irq(shost->host_lock); 1612 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1613 spin_unlock_irq(shost->host_lock); 1614 lpfc_disc_set_adisc(vport, ndlp); 1615 return ndlp->nlp_state; 1616 } 1617 1618 static uint32_t 1619 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1620 void *arg, uint32_t evt) 1621 { 1622 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1623 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1624 1625 /* Ignore PLOGI if we have an outstanding LOGO */ 1626 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { 1627 return ndlp->nlp_state; 1628 } 1629 1630 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1631 spin_lock_irq(shost->host_lock); 1632 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1633 spin_unlock_irq(shost->host_lock); 1634 return ndlp->nlp_state; 1635 } 1636 1637 /* send PLOGI immediately, move to PLOGI issue state */ 1638 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1639 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1640 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1641 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1642 } 1643 1644 return ndlp->nlp_state; 1645 } 1646 1647 static uint32_t 1648 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1649 void *arg, uint32_t evt) 1650 { 1651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1652 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1653 struct ls_rjt stat; 1654 1655 memset(&stat, 0, sizeof (struct ls_rjt)); 1656 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1657 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1658 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 1659 1660 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1661 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1662 spin_lock_irq(shost->host_lock); 1663 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1664 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1665 spin_unlock_irq(shost->host_lock); 1666 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1667 lpfc_issue_els_adisc(vport, ndlp, 0); 1668 } else { 1669 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1670 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1671 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1672 } 1673 } 1674 return ndlp->nlp_state; 1675 } 1676 1677 static uint32_t 1678 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1679 void *arg, uint32_t evt) 1680 { 1681 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1682 1683 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1684 return ndlp->nlp_state; 1685 } 1686 1687 static uint32_t 1688 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1689 void *arg, uint32_t evt) 1690 { 1691 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1692 1693 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1694 1695 /* 1696 * Do not start discovery if discovery is about to start 1697 * or discovery in progress for this node. Starting discovery 1698 * here will affect the counting of discovery threads. 1699 */ 1700 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1701 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 1702 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1703 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1704 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1705 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1706 lpfc_issue_els_adisc(vport, ndlp, 0); 1707 } else { 1708 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1709 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1710 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1711 } 1712 } 1713 return ndlp->nlp_state; 1714 } 1715 1716 static uint32_t 1717 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1718 void *arg, uint32_t evt) 1719 { 1720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1721 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1722 1723 spin_lock_irq(shost->host_lock); 1724 ndlp->nlp_flag |= NLP_LOGO_ACC; 1725 spin_unlock_irq(shost->host_lock); 1726 1727 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 1728 1729 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 1730 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1731 spin_lock_irq(shost->host_lock); 1732 ndlp->nlp_flag |= NLP_DELAY_TMO; 1733 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1734 spin_unlock_irq(shost->host_lock); 1735 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1736 } else { 1737 spin_lock_irq(shost->host_lock); 1738 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1739 spin_unlock_irq(shost->host_lock); 1740 } 1741 return ndlp->nlp_state; 1742 } 1743 1744 static uint32_t 1745 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1746 void *arg, uint32_t evt) 1747 { 1748 struct lpfc_iocbq *cmdiocb, *rspiocb; 1749 IOCB_t *irsp; 1750 1751 cmdiocb = (struct lpfc_iocbq *) arg; 1752 rspiocb = cmdiocb->context_un.rsp_iocb; 1753 1754 irsp = &rspiocb->iocb; 1755 if (irsp->ulpStatus) { 1756 lpfc_drop_node(vport, ndlp); 1757 return NLP_STE_FREED_NODE; 1758 } 1759 return ndlp->nlp_state; 1760 } 1761 1762 static uint32_t 1763 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1764 void *arg, uint32_t evt) 1765 { 1766 struct lpfc_iocbq *cmdiocb, *rspiocb; 1767 IOCB_t *irsp; 1768 1769 cmdiocb = (struct lpfc_iocbq *) arg; 1770 rspiocb = cmdiocb->context_un.rsp_iocb; 1771 1772 irsp = &rspiocb->iocb; 1773 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1774 lpfc_drop_node(vport, ndlp); 1775 return NLP_STE_FREED_NODE; 1776 } 1777 return ndlp->nlp_state; 1778 } 1779 1780 static uint32_t 1781 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1782 void *arg, uint32_t evt) 1783 { 1784 lpfc_unreg_rpi(vport, ndlp); 1785 /* This routine does nothing, just return the current state */ 1786 return ndlp->nlp_state; 1787 } 1788 1789 static uint32_t 1790 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1791 void *arg, uint32_t evt) 1792 { 1793 struct lpfc_iocbq *cmdiocb, *rspiocb; 1794 IOCB_t *irsp; 1795 1796 cmdiocb = (struct lpfc_iocbq *) arg; 1797 rspiocb = cmdiocb->context_un.rsp_iocb; 1798 1799 irsp = &rspiocb->iocb; 1800 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1801 lpfc_drop_node(vport, ndlp); 1802 return NLP_STE_FREED_NODE; 1803 } 1804 return ndlp->nlp_state; 1805 } 1806 1807 static uint32_t 1808 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, 1809 struct lpfc_nodelist *ndlp, 1810 void *arg, uint32_t evt) 1811 { 1812 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1813 MAILBOX_t *mb = &pmb->mb; 1814 1815 if (!mb->mbxStatus) 1816 ndlp->nlp_rpi = mb->un.varWords[0]; 1817 else { 1818 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1819 lpfc_drop_node(vport, ndlp); 1820 return NLP_STE_FREED_NODE; 1821 } 1822 } 1823 return ndlp->nlp_state; 1824 } 1825 1826 static uint32_t 1827 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1828 void *arg, uint32_t evt) 1829 { 1830 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1831 1832 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1833 spin_lock_irq(shost->host_lock); 1834 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1835 spin_unlock_irq(shost->host_lock); 1836 return ndlp->nlp_state; 1837 } 1838 lpfc_drop_node(vport, ndlp); 1839 return NLP_STE_FREED_NODE; 1840 } 1841 1842 static uint32_t 1843 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1844 void *arg, uint32_t evt) 1845 { 1846 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1847 1848 /* Don't do anything that will mess up processing of the 1849 * previous RSCN. 1850 */ 1851 if (vport->fc_flag & FC_RSCN_DEFERRED) 1852 return ndlp->nlp_state; 1853 1854 spin_lock_irq(shost->host_lock); 1855 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1856 spin_unlock_irq(shost->host_lock); 1857 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1858 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1859 } 1860 return ndlp->nlp_state; 1861 } 1862 1863 1864 /* This next section defines the NPort Discovery State Machine */ 1865 1866 /* There are 4 different double linked lists nodelist entries can reside on. 1867 * The plogi list and adisc list are used when Link Up discovery or RSCN 1868 * processing is needed. Each list holds the nodes that we will send PLOGI 1869 * or ADISC on. These lists will keep track of what nodes will be effected 1870 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up). 1871 * The unmapped_list will contain all nodes that we have successfully logged 1872 * into at the Fibre Channel level. The mapped_list will contain all nodes 1873 * that are mapped FCP targets. 1874 */ 1875 /* 1876 * The bind list is a list of undiscovered (potentially non-existent) nodes 1877 * that we have saved binding information on. This information is used when 1878 * nodes transition from the unmapped to the mapped list. 1879 */ 1880 /* For UNUSED_NODE state, the node has just been allocated . 1881 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on 1882 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list 1883 * and put on the unmapped list. For ADISC processing, the node is taken off 1884 * the ADISC list and placed on either the mapped or unmapped list (depending 1885 * on its previous state). Once on the unmapped list, a PRLI is issued and the 1886 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is 1887 * changed to UNMAPPED_NODE. If the completion indicates a mapped 1888 * node, the node is taken off the unmapped list. The binding list is checked 1889 * for a valid binding, or a binding is automatically assigned. If binding 1890 * assignment is unsuccessful, the node is left on the unmapped list. If 1891 * binding assignment is successful, the associated binding list entry (if 1892 * any) is removed, and the node is placed on the mapped list. 1893 */ 1894 /* 1895 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1896 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers 1897 * expire, all effected nodes will receive a DEVICE_RM event. 1898 */ 1899 /* 1900 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists 1901 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap 1902 * check, additional nodes may be added or removed (via DEVICE_RM) to / from 1903 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated, 1904 * we will first process the ADISC list. 32 entries are processed initially and 1905 * ADISC is initited for each one. Completions / Events for each node are 1906 * funnelled thru the state machine. As each node finishes ADISC processing, it 1907 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are 1908 * waiting, and the ADISC list count is identically 0, then we are done. For 1909 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we 1910 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI 1911 * list. 32 entries are processed initially and PLOGI is initited for each one. 1912 * Completions / Events for each node are funnelled thru the state machine. As 1913 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting 1914 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is 1915 * indentically 0, then we are done. We have now completed discovery / RSCN 1916 * handling. Upon completion, ALL nodes should be on either the mapped or 1917 * unmapped lists. 1918 */ 1919 1920 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 1921 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { 1922 /* Action routine Event Current State */ 1923 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 1924 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 1925 lpfc_rcv_logo_unused_node, /* RCV_LOGO */ 1926 lpfc_rcv_els_unused_node, /* RCV_ADISC */ 1927 lpfc_rcv_els_unused_node, /* RCV_PDISC */ 1928 lpfc_rcv_els_unused_node, /* RCV_PRLO */ 1929 lpfc_disc_illegal, /* CMPL_PLOGI */ 1930 lpfc_disc_illegal, /* CMPL_PRLI */ 1931 lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */ 1932 lpfc_disc_illegal, /* CMPL_ADISC */ 1933 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1934 lpfc_device_rm_unused_node, /* DEVICE_RM */ 1935 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 1936 1937 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 1938 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ 1939 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 1940 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 1941 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 1942 lpfc_rcv_els_plogi_issue, /* RCV_PRLO */ 1943 lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */ 1944 lpfc_disc_illegal, /* CMPL_PRLI */ 1945 lpfc_disc_illegal, /* CMPL_LOGO */ 1946 lpfc_disc_illegal, /* CMPL_ADISC */ 1947 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1948 lpfc_device_rm_plogi_issue, /* DEVICE_RM */ 1949 lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */ 1950 1951 lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */ 1952 lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */ 1953 lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */ 1954 lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */ 1955 lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */ 1956 lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */ 1957 lpfc_disc_illegal, /* CMPL_PLOGI */ 1958 lpfc_disc_illegal, /* CMPL_PRLI */ 1959 lpfc_disc_illegal, /* CMPL_LOGO */ 1960 lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */ 1961 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1962 lpfc_device_rm_adisc_issue, /* DEVICE_RM */ 1963 lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */ 1964 1965 lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */ 1966 lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */ 1967 lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */ 1968 lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */ 1969 lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */ 1970 lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */ 1971 lpfc_disc_illegal, /* CMPL_PLOGI */ 1972 lpfc_disc_illegal, /* CMPL_PRLI */ 1973 lpfc_disc_illegal, /* CMPL_LOGO */ 1974 lpfc_disc_illegal, /* CMPL_ADISC */ 1975 lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */ 1976 lpfc_device_rm_reglogin_issue, /* DEVICE_RM */ 1977 lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */ 1978 1979 lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */ 1980 lpfc_rcv_prli_prli_issue, /* RCV_PRLI */ 1981 lpfc_rcv_logo_prli_issue, /* RCV_LOGO */ 1982 lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */ 1983 lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */ 1984 lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */ 1985 lpfc_disc_illegal, /* CMPL_PLOGI */ 1986 lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */ 1987 lpfc_disc_illegal, /* CMPL_LOGO */ 1988 lpfc_disc_illegal, /* CMPL_ADISC */ 1989 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 1990 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 1991 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 1992 1993 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 1994 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 1995 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 1996 lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */ 1997 lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */ 1998 lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */ 1999 lpfc_disc_illegal, /* CMPL_PLOGI */ 2000 lpfc_disc_illegal, /* CMPL_PRLI */ 2001 lpfc_disc_illegal, /* CMPL_LOGO */ 2002 lpfc_disc_illegal, /* CMPL_ADISC */ 2003 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2004 lpfc_disc_illegal, /* DEVICE_RM */ 2005 lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */ 2006 2007 lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */ 2008 lpfc_rcv_prli_mapped_node, /* RCV_PRLI */ 2009 lpfc_rcv_logo_mapped_node, /* RCV_LOGO */ 2010 lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */ 2011 lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */ 2012 lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */ 2013 lpfc_disc_illegal, /* CMPL_PLOGI */ 2014 lpfc_disc_illegal, /* CMPL_PRLI */ 2015 lpfc_disc_illegal, /* CMPL_LOGO */ 2016 lpfc_disc_illegal, /* CMPL_ADISC */ 2017 lpfc_disc_illegal, /* CMPL_REG_LOGIN */ 2018 lpfc_disc_illegal, /* DEVICE_RM */ 2019 lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */ 2020 2021 lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */ 2022 lpfc_rcv_prli_npr_node, /* RCV_PRLI */ 2023 lpfc_rcv_logo_npr_node, /* RCV_LOGO */ 2024 lpfc_rcv_padisc_npr_node, /* RCV_ADISC */ 2025 lpfc_rcv_padisc_npr_node, /* RCV_PDISC */ 2026 lpfc_rcv_prlo_npr_node, /* RCV_PRLO */ 2027 lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */ 2028 lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */ 2029 lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */ 2030 lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */ 2031 lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */ 2032 lpfc_device_rm_npr_node, /* DEVICE_RM */ 2033 lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */ 2034 }; 2035 2036 int 2037 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2038 void *arg, uint32_t evt) 2039 { 2040 uint32_t cur_state, rc; 2041 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2042 uint32_t); 2043 2044 lpfc_nlp_get(ndlp); 2045 cur_state = ndlp->nlp_state; 2046 2047 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2048 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2049 "0211 DSM in event x%x on NPort x%x in " 2050 "state %d Data: x%x\n", 2051 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2052 2053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2054 "DSM in: evt:%d ste:%d did:x%x", 2055 evt, cur_state, ndlp->nlp_DID); 2056 2057 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2058 rc = (func) (vport, ndlp, arg, evt); 2059 2060 /* DSM out state <rc> on NPort <nlp_DID> */ 2061 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2062 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2063 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2064 2065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2066 "DSM out: ste:%d did:x%x flg:x%x", 2067 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2068 2069 lpfc_nlp_put(ndlp); 2070 2071 return rc; 2072 } 2073