1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 /* See Fibre Channel protocol T11 FC-LS for details */ 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 31 #include "lpfc_hw.h" 32 #include "lpfc_sli.h" 33 #include "lpfc_disc.h" 34 #include "lpfc_scsi.h" 35 #include "lpfc.h" 36 #include "lpfc_logmsg.h" 37 #include "lpfc_crtn.h" 38 #include "lpfc_vport.h" 39 #include "lpfc_debugfs.h" 40 41 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 42 struct lpfc_iocbq *); 43 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 44 struct lpfc_iocbq *); 45 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 46 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 47 struct lpfc_nodelist *ndlp, uint8_t retry); 48 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 49 struct lpfc_iocbq *iocb); 50 static void lpfc_register_new_vport(struct lpfc_hba *phba, 51 struct lpfc_vport *vport, 52 struct lpfc_nodelist *ndlp); 53 54 static int lpfc_max_els_tries = 3; 55 56 int 57 lpfc_els_chk_latt(struct lpfc_vport *vport) 58 { 59 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 60 struct lpfc_hba *phba = vport->phba; 61 uint32_t ha_copy; 62 63 if (vport->port_state >= LPFC_VPORT_READY || 64 phba->link_state == LPFC_LINK_DOWN) 65 return 0; 66 67 /* Read the HBA Host Attention Register */ 68 ha_copy = readl(phba->HAregaddr); 69 70 if (!(ha_copy & HA_LATT)) 71 return 0; 72 73 /* Pending Link Event during Discovery */ 74 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 75 "0237 Pending Link Event during " 76 "Discovery: State x%x\n", 77 phba->pport->port_state); 78 79 /* CLEAR_LA should re-enable link attention events and 80 * we should then imediately take a LATT event. The 81 * LATT processing should call lpfc_linkdown() which 82 * will cleanup any left over in-progress discovery 83 * events. 84 */ 85 spin_lock_irq(shost->host_lock); 86 vport->fc_flag |= FC_ABORT_DISCOVERY; 87 spin_unlock_irq(shost->host_lock); 88 89 if (phba->link_state != LPFC_CLEAR_LA) 90 lpfc_issue_clear_la(phba, vport); 91 92 return 1; 93 } 94 95 static struct lpfc_iocbq * 96 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 97 uint16_t cmdSize, uint8_t retry, 98 struct lpfc_nodelist *ndlp, uint32_t did, 99 uint32_t elscmd) 100 { 101 struct lpfc_hba *phba = vport->phba; 102 struct lpfc_iocbq *elsiocb; 103 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 104 struct ulp_bde64 *bpl; 105 IOCB_t *icmd; 106 107 108 if (!lpfc_is_link_up(phba)) 109 return NULL; 110 111 /* Allocate buffer for command iocb */ 112 elsiocb = lpfc_sli_get_iocbq(phba); 113 114 if (elsiocb == NULL) 115 return NULL; 116 icmd = &elsiocb->iocb; 117 118 /* fill in BDEs for command */ 119 /* Allocate buffer for command payload */ 120 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 121 if (pcmd) 122 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 123 if (!pcmd || !pcmd->virt) 124 goto els_iocb_free_pcmb_exit; 125 126 INIT_LIST_HEAD(&pcmd->list); 127 128 /* Allocate buffer for response payload */ 129 if (expectRsp) { 130 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 131 if (prsp) 132 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 133 &prsp->phys); 134 if (!prsp || !prsp->virt) 135 goto els_iocb_free_prsp_exit; 136 INIT_LIST_HEAD(&prsp->list); 137 } else { 138 prsp = NULL; 139 } 140 141 /* Allocate buffer for Buffer ptr list */ 142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 143 if (pbuflist) 144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 145 &pbuflist->phys); 146 if (!pbuflist || !pbuflist->virt) 147 goto els_iocb_free_pbuf_exit; 148 149 INIT_LIST_HEAD(&pbuflist->list); 150 151 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 152 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 153 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 154 icmd->un.elsreq64.remoteID = did; /* DID */ 155 if (expectRsp) { 156 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 157 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 158 icmd->ulpTimeout = phba->fc_ratov * 2; 159 } else { 160 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); 161 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 162 } 163 icmd->ulpBdeCount = 1; 164 icmd->ulpLe = 1; 165 icmd->ulpClass = CLASS3; 166 167 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 168 icmd->un.elsreq64.myID = vport->fc_myDID; 169 170 /* For ELS_REQUEST64_CR, use the VPI by default */ 171 icmd->ulpContext = vport->vpi; 172 icmd->ulpCt_h = 0; 173 icmd->ulpCt_l = 1; 174 } 175 176 bpl = (struct ulp_bde64 *) pbuflist->virt; 177 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 178 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 179 bpl->tus.f.bdeSize = cmdSize; 180 bpl->tus.f.bdeFlags = 0; 181 bpl->tus.w = le32_to_cpu(bpl->tus.w); 182 183 if (expectRsp) { 184 bpl++; 185 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 186 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 187 bpl->tus.f.bdeSize = FCELSSIZE; 188 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 189 bpl->tus.w = le32_to_cpu(bpl->tus.w); 190 } 191 192 /* prevent preparing iocb with NULL ndlp reference */ 193 elsiocb->context1 = lpfc_nlp_get(ndlp); 194 if (!elsiocb->context1) 195 goto els_iocb_free_pbuf_exit; 196 elsiocb->context2 = pcmd; 197 elsiocb->context3 = pbuflist; 198 elsiocb->retry = retry; 199 elsiocb->vport = vport; 200 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 201 202 if (prsp) { 203 list_add(&prsp->list, &pcmd->list); 204 } 205 if (expectRsp) { 206 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 208 "0116 Xmit ELS command x%x to remote " 209 "NPORT x%x I/O tag: x%x, port state: x%x\n", 210 elscmd, did, elsiocb->iotag, 211 vport->port_state); 212 } else { 213 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 214 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 215 "0117 Xmit ELS response x%x to remote " 216 "NPORT x%x I/O tag: x%x, size: x%x\n", 217 elscmd, ndlp->nlp_DID, elsiocb->iotag, 218 cmdSize); 219 } 220 return elsiocb; 221 222 els_iocb_free_pbuf_exit: 223 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 224 kfree(pbuflist); 225 226 els_iocb_free_prsp_exit: 227 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 228 kfree(prsp); 229 230 els_iocb_free_pcmb_exit: 231 kfree(pcmd); 232 lpfc_sli_release_iocbq(phba, elsiocb); 233 return NULL; 234 } 235 236 static int 237 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 238 { 239 struct lpfc_hba *phba = vport->phba; 240 LPFC_MBOXQ_t *mbox; 241 struct lpfc_dmabuf *mp; 242 struct lpfc_nodelist *ndlp; 243 struct serv_parm *sp; 244 int rc; 245 int err = 0; 246 247 sp = &phba->fc_fabparam; 248 ndlp = lpfc_findnode_did(vport, Fabric_DID); 249 if (!ndlp) { 250 err = 1; 251 goto fail; 252 } 253 254 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 255 if (!mbox) { 256 err = 2; 257 goto fail; 258 } 259 260 vport->port_state = LPFC_FABRIC_CFG_LINK; 261 lpfc_config_link(phba, mbox); 262 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 263 mbox->vport = vport; 264 265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 266 if (rc == MBX_NOT_FINISHED) { 267 err = 3; 268 goto fail_free_mbox; 269 } 270 271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 272 if (!mbox) { 273 err = 4; 274 goto fail; 275 } 276 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 277 0); 278 if (rc) { 279 err = 5; 280 goto fail_free_mbox; 281 } 282 283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 284 mbox->vport = vport; 285 mbox->context2 = lpfc_nlp_get(ndlp); 286 287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 288 if (rc == MBX_NOT_FINISHED) { 289 err = 6; 290 goto fail_issue_reg_login; 291 } 292 293 return 0; 294 295 fail_issue_reg_login: 296 lpfc_nlp_put(ndlp); 297 mp = (struct lpfc_dmabuf *) mbox->context1; 298 lpfc_mbuf_free(phba, mp->virt, mp->phys); 299 kfree(mp); 300 fail_free_mbox: 301 mempool_free(mbox, phba->mbox_mem_pool); 302 303 fail: 304 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 305 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 306 "0249 Cannot issue Register Fabric login: Err %d\n", err); 307 return -ENXIO; 308 } 309 310 static int 311 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 312 struct serv_parm *sp, IOCB_t *irsp) 313 { 314 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 315 struct lpfc_hba *phba = vport->phba; 316 struct lpfc_nodelist *np; 317 struct lpfc_nodelist *next_np; 318 319 spin_lock_irq(shost->host_lock); 320 vport->fc_flag |= FC_FABRIC; 321 spin_unlock_irq(shost->host_lock); 322 323 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 324 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 325 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 326 327 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 328 329 if (phba->fc_topology == TOPOLOGY_LOOP) { 330 spin_lock_irq(shost->host_lock); 331 vport->fc_flag |= FC_PUBLIC_LOOP; 332 spin_unlock_irq(shost->host_lock); 333 } else { 334 /* 335 * If we are a N-port connected to a Fabric, fixup sparam's so 336 * logins to devices on remote loops work. 337 */ 338 vport->fc_sparam.cmn.altBbCredit = 1; 339 } 340 341 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 342 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 343 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 344 ndlp->nlp_class_sup = 0; 345 if (sp->cls1.classValid) 346 ndlp->nlp_class_sup |= FC_COS_CLASS1; 347 if (sp->cls2.classValid) 348 ndlp->nlp_class_sup |= FC_COS_CLASS2; 349 if (sp->cls3.classValid) 350 ndlp->nlp_class_sup |= FC_COS_CLASS3; 351 if (sp->cls4.classValid) 352 ndlp->nlp_class_sup |= FC_COS_CLASS4; 353 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 354 sp->cmn.bbRcvSizeLsb; 355 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 356 357 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 358 if (sp->cmn.response_multiple_NPort) { 359 lpfc_printf_vlog(vport, KERN_WARNING, 360 LOG_ELS | LOG_VPORT, 361 "1816 FLOGI NPIV supported, " 362 "response data 0x%x\n", 363 sp->cmn.response_multiple_NPort); 364 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 365 } else { 366 /* Because we asked f/w for NPIV it still expects us 367 to call reg_vnpid atleast for the physcial host */ 368 lpfc_printf_vlog(vport, KERN_WARNING, 369 LOG_ELS | LOG_VPORT, 370 "1817 Fabric does not support NPIV " 371 "- configuring single port mode.\n"); 372 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 373 } 374 } 375 376 if ((vport->fc_prevDID != vport->fc_myDID) && 377 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 378 379 /* If our NportID changed, we need to ensure all 380 * remaining NPORTs get unreg_login'ed. 381 */ 382 list_for_each_entry_safe(np, next_np, 383 &vport->fc_nodes, nlp_listp) { 384 if ((np->nlp_state != NLP_STE_NPR_NODE) || 385 !(np->nlp_flag & NLP_NPR_ADISC)) 386 continue; 387 spin_lock_irq(shost->host_lock); 388 np->nlp_flag &= ~NLP_NPR_ADISC; 389 spin_unlock_irq(shost->host_lock); 390 lpfc_unreg_rpi(vport, np); 391 } 392 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 393 lpfc_mbx_unreg_vpi(vport); 394 spin_lock_irq(shost->host_lock); 395 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 396 spin_unlock_irq(shost->host_lock); 397 } 398 } 399 400 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 401 402 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 403 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 404 lpfc_register_new_vport(phba, vport, ndlp); 405 return 0; 406 } 407 lpfc_issue_fabric_reglogin(vport); 408 return 0; 409 } 410 411 /* 412 * We FLOGIed into an NPort, initiate pt2pt protocol 413 */ 414 static int 415 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 416 struct serv_parm *sp) 417 { 418 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 419 struct lpfc_hba *phba = vport->phba; 420 LPFC_MBOXQ_t *mbox; 421 int rc; 422 423 spin_lock_irq(shost->host_lock); 424 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 425 spin_unlock_irq(shost->host_lock); 426 427 phba->fc_edtov = FF_DEF_EDTOV; 428 phba->fc_ratov = FF_DEF_RATOV; 429 rc = memcmp(&vport->fc_portname, &sp->portName, 430 sizeof(vport->fc_portname)); 431 if (rc >= 0) { 432 /* This side will initiate the PLOGI */ 433 spin_lock_irq(shost->host_lock); 434 vport->fc_flag |= FC_PT2PT_PLOGI; 435 spin_unlock_irq(shost->host_lock); 436 437 /* 438 * N_Port ID cannot be 0, set our to LocalID the other 439 * side will be RemoteID. 440 */ 441 442 /* not equal */ 443 if (rc) 444 vport->fc_myDID = PT2PT_LocalID; 445 446 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 447 if (!mbox) 448 goto fail; 449 450 lpfc_config_link(phba, mbox); 451 452 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 453 mbox->vport = vport; 454 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 455 if (rc == MBX_NOT_FINISHED) { 456 mempool_free(mbox, phba->mbox_mem_pool); 457 goto fail; 458 } 459 lpfc_nlp_put(ndlp); 460 461 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 462 if (!ndlp) { 463 /* 464 * Cannot find existing Fabric ndlp, so allocate a 465 * new one 466 */ 467 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 468 if (!ndlp) 469 goto fail; 470 471 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 472 } 473 474 memcpy(&ndlp->nlp_portname, &sp->portName, 475 sizeof(struct lpfc_name)); 476 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 477 sizeof(struct lpfc_name)); 478 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 479 spin_lock_irq(shost->host_lock); 480 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 481 spin_unlock_irq(shost->host_lock); 482 } else { 483 /* This side will wait for the PLOGI */ 484 lpfc_nlp_put(ndlp); 485 } 486 487 /* If we are pt2pt with another NPort, force NPIV off! */ 488 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 489 490 spin_lock_irq(shost->host_lock); 491 vport->fc_flag |= FC_PT2PT; 492 spin_unlock_irq(shost->host_lock); 493 494 /* Start discovery - this should just do CLEAR_LA */ 495 lpfc_disc_start(vport); 496 return 0; 497 fail: 498 return -ENXIO; 499 } 500 501 static void 502 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 503 struct lpfc_iocbq *rspiocb) 504 { 505 struct lpfc_vport *vport = cmdiocb->vport; 506 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 507 IOCB_t *irsp = &rspiocb->iocb; 508 struct lpfc_nodelist *ndlp = cmdiocb->context1; 509 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 510 struct serv_parm *sp; 511 int rc; 512 513 /* Check to see if link went down during discovery */ 514 if (lpfc_els_chk_latt(vport)) { 515 /* One additional decrement on node reference count to 516 * trigger the release of the node 517 */ 518 lpfc_nlp_put(ndlp); 519 goto out; 520 } 521 522 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 523 "FLOGI cmpl: status:x%x/x%x state:x%x", 524 irsp->ulpStatus, irsp->un.ulpWord[4], 525 vport->port_state); 526 527 if (irsp->ulpStatus) { 528 /* Check for retry */ 529 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 530 goto out; 531 532 /* FLOGI failed, so there is no fabric */ 533 spin_lock_irq(shost->host_lock); 534 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 535 spin_unlock_irq(shost->host_lock); 536 537 /* If private loop, then allow max outstanding els to be 538 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 539 * alpa map would take too long otherwise. 540 */ 541 if (phba->alpa_map[0] == 0) { 542 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 543 } 544 545 /* FLOGI failure */ 546 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 547 "0100 FLOGI failure Data: x%x x%x " 548 "x%x\n", 549 irsp->ulpStatus, irsp->un.ulpWord[4], 550 irsp->ulpTimeout); 551 goto flogifail; 552 } 553 554 /* 555 * The FLogI succeeded. Sync the data for the CPU before 556 * accessing it. 557 */ 558 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 559 560 sp = prsp->virt + sizeof(uint32_t); 561 562 /* FLOGI completes successfully */ 563 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 564 "0101 FLOGI completes sucessfully " 565 "Data: x%x x%x x%x x%x\n", 566 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 567 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 568 569 if (vport->port_state == LPFC_FLOGI) { 570 /* 571 * If Common Service Parameters indicate Nport 572 * we are point to point, if Fport we are Fabric. 573 */ 574 if (sp->cmn.fPort) 575 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 576 else 577 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 578 579 if (!rc) 580 goto out; 581 } 582 583 flogifail: 584 lpfc_nlp_put(ndlp); 585 586 if (!lpfc_error_lost_link(irsp)) { 587 /* FLOGI failed, so just use loop map to make discovery list */ 588 lpfc_disc_list_loopmap(vport); 589 590 /* Start discovery */ 591 lpfc_disc_start(vport); 592 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 593 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 594 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 595 (phba->link_state != LPFC_CLEAR_LA)) { 596 /* If FLOGI failed enable link interrupt. */ 597 lpfc_issue_clear_la(phba, vport); 598 } 599 out: 600 lpfc_els_free_iocb(phba, cmdiocb); 601 } 602 603 static int 604 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 605 uint8_t retry) 606 { 607 struct lpfc_hba *phba = vport->phba; 608 struct serv_parm *sp; 609 IOCB_t *icmd; 610 struct lpfc_iocbq *elsiocb; 611 struct lpfc_sli_ring *pring; 612 uint8_t *pcmd; 613 uint16_t cmdsize; 614 uint32_t tmo; 615 int rc; 616 617 pring = &phba->sli.ring[LPFC_ELS_RING]; 618 619 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 620 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 621 ndlp->nlp_DID, ELS_CMD_FLOGI); 622 623 if (!elsiocb) 624 return 1; 625 626 icmd = &elsiocb->iocb; 627 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 628 629 /* For FLOGI request, remainder of payload is service parameters */ 630 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 631 pcmd += sizeof(uint32_t); 632 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 633 sp = (struct serv_parm *) pcmd; 634 635 /* Setup CSPs accordingly for Fabric */ 636 sp->cmn.e_d_tov = 0; 637 sp->cmn.w2.r_a_tov = 0; 638 sp->cls1.classValid = 0; 639 sp->cls2.seqDelivery = 1; 640 sp->cls3.seqDelivery = 1; 641 if (sp->cmn.fcphLow < FC_PH3) 642 sp->cmn.fcphLow = FC_PH3; 643 if (sp->cmn.fcphHigh < FC_PH3) 644 sp->cmn.fcphHigh = FC_PH3; 645 646 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 647 sp->cmn.request_multiple_Nport = 1; 648 649 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 650 icmd->ulpCt_h = 1; 651 icmd->ulpCt_l = 0; 652 } 653 654 if (phba->fc_topology != TOPOLOGY_LOOP) { 655 icmd->un.elsreq64.myID = 0; 656 icmd->un.elsreq64.fl = 1; 657 } 658 659 tmo = phba->fc_ratov; 660 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 661 lpfc_set_disctmo(vport); 662 phba->fc_ratov = tmo; 663 664 phba->fc_stat.elsXmitFLOGI++; 665 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 666 667 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 668 "Issue FLOGI: opt:x%x", 669 phba->sli3_options, 0, 0); 670 671 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 672 if (rc == IOCB_ERROR) { 673 lpfc_els_free_iocb(phba, elsiocb); 674 return 1; 675 } 676 return 0; 677 } 678 679 int 680 lpfc_els_abort_flogi(struct lpfc_hba *phba) 681 { 682 struct lpfc_sli_ring *pring; 683 struct lpfc_iocbq *iocb, *next_iocb; 684 struct lpfc_nodelist *ndlp; 685 IOCB_t *icmd; 686 687 /* Abort outstanding I/O on NPort <nlp_DID> */ 688 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 689 "0201 Abort outstanding I/O on NPort x%x\n", 690 Fabric_DID); 691 692 pring = &phba->sli.ring[LPFC_ELS_RING]; 693 694 /* 695 * Check the txcmplq for an iocb that matches the nport the driver is 696 * searching for. 697 */ 698 spin_lock_irq(&phba->hbalock); 699 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 700 icmd = &iocb->iocb; 701 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && 702 icmd->un.elsreq64.bdl.ulpIoTag32) { 703 ndlp = (struct lpfc_nodelist *)(iocb->context1); 704 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { 705 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 706 } 707 } 708 } 709 spin_unlock_irq(&phba->hbalock); 710 711 return 0; 712 } 713 714 int 715 lpfc_initial_flogi(struct lpfc_vport *vport) 716 { 717 struct lpfc_hba *phba = vport->phba; 718 struct lpfc_nodelist *ndlp; 719 720 vport->port_state = LPFC_FLOGI; 721 lpfc_set_disctmo(vport); 722 723 /* First look for the Fabric ndlp */ 724 ndlp = lpfc_findnode_did(vport, Fabric_DID); 725 if (!ndlp) { 726 /* Cannot find existing Fabric ndlp, so allocate a new one */ 727 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 728 if (!ndlp) 729 return 0; 730 lpfc_nlp_init(vport, ndlp, Fabric_DID); 731 } else { 732 lpfc_dequeue_node(vport, ndlp); 733 } 734 735 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 736 /* This decrement of reference count to node shall kick off 737 * the release of the node. 738 */ 739 lpfc_nlp_put(ndlp); 740 } 741 return 1; 742 } 743 744 int 745 lpfc_initial_fdisc(struct lpfc_vport *vport) 746 { 747 struct lpfc_hba *phba = vport->phba; 748 struct lpfc_nodelist *ndlp; 749 750 /* First look for the Fabric ndlp */ 751 ndlp = lpfc_findnode_did(vport, Fabric_DID); 752 if (!ndlp) { 753 /* Cannot find existing Fabric ndlp, so allocate a new one */ 754 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 755 if (!ndlp) 756 return 0; 757 lpfc_nlp_init(vport, ndlp, Fabric_DID); 758 } else { 759 lpfc_dequeue_node(vport, ndlp); 760 } 761 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 762 /* decrement node reference count to trigger the release of 763 * the node. 764 */ 765 lpfc_nlp_put(ndlp); 766 return 0; 767 } 768 return 1; 769 } 770 771 void 772 lpfc_more_plogi(struct lpfc_vport *vport) 773 { 774 int sentplogi; 775 776 if (vport->num_disc_nodes) 777 vport->num_disc_nodes--; 778 779 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 780 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 781 "0232 Continue discovery with %d PLOGIs to go " 782 "Data: x%x x%x x%x\n", 783 vport->num_disc_nodes, vport->fc_plogi_cnt, 784 vport->fc_flag, vport->port_state); 785 /* Check to see if there are more PLOGIs to be sent */ 786 if (vport->fc_flag & FC_NLP_MORE) 787 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 788 sentplogi = lpfc_els_disc_plogi(vport); 789 790 return; 791 } 792 793 static struct lpfc_nodelist * 794 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 795 struct lpfc_nodelist *ndlp) 796 { 797 struct lpfc_vport *vport = ndlp->vport; 798 struct lpfc_nodelist *new_ndlp; 799 struct lpfc_rport_data *rdata; 800 struct fc_rport *rport; 801 struct serv_parm *sp; 802 uint8_t name[sizeof(struct lpfc_name)]; 803 uint32_t rc; 804 805 /* Fabric nodes can have the same WWPN so we don't bother searching 806 * by WWPN. Just return the ndlp that was given to us. 807 */ 808 if (ndlp->nlp_type & NLP_FABRIC) 809 return ndlp; 810 811 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 812 memset(name, 0, sizeof(struct lpfc_name)); 813 814 /* Now we find out if the NPort we are logging into, matches the WWPN 815 * we have for that ndlp. If not, we have some work to do. 816 */ 817 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 818 819 if (new_ndlp == ndlp) 820 return ndlp; 821 822 if (!new_ndlp) { 823 rc = memcmp(&ndlp->nlp_portname, name, 824 sizeof(struct lpfc_name)); 825 if (!rc) 826 return ndlp; 827 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 828 if (!new_ndlp) 829 return ndlp; 830 831 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 832 } 833 834 lpfc_unreg_rpi(vport, new_ndlp); 835 new_ndlp->nlp_DID = ndlp->nlp_DID; 836 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 837 838 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 839 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 840 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 841 842 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 843 844 /* Move this back to NPR state */ 845 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 846 /* The new_ndlp is replacing ndlp totally, so we need 847 * to put ndlp on UNUSED list and try to free it. 848 */ 849 850 /* Fix up the rport accordingly */ 851 rport = ndlp->rport; 852 if (rport) { 853 rdata = rport->dd_data; 854 if (rdata->pnode == ndlp) { 855 lpfc_nlp_put(ndlp); 856 ndlp->rport = NULL; 857 rdata->pnode = lpfc_nlp_get(new_ndlp); 858 new_ndlp->rport = rport; 859 } 860 new_ndlp->nlp_type = ndlp->nlp_type; 861 } 862 863 lpfc_drop_node(vport, ndlp); 864 } 865 else { 866 lpfc_unreg_rpi(vport, ndlp); 867 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 868 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 869 } 870 return new_ndlp; 871 } 872 873 void 874 lpfc_end_rscn(struct lpfc_vport *vport) 875 { 876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 877 878 if (vport->fc_flag & FC_RSCN_MODE) { 879 /* 880 * Check to see if more RSCNs came in while we were 881 * processing this one. 882 */ 883 if (vport->fc_rscn_id_cnt || 884 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 885 lpfc_els_handle_rscn(vport); 886 else { 887 spin_lock_irq(shost->host_lock); 888 vport->fc_flag &= ~FC_RSCN_MODE; 889 spin_unlock_irq(shost->host_lock); 890 } 891 } 892 } 893 894 static void 895 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 896 struct lpfc_iocbq *rspiocb) 897 { 898 struct lpfc_vport *vport = cmdiocb->vport; 899 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 900 IOCB_t *irsp; 901 struct lpfc_nodelist *ndlp; 902 struct lpfc_dmabuf *prsp; 903 int disc, rc, did, type; 904 905 /* we pass cmdiocb to state machine which needs rspiocb as well */ 906 cmdiocb->context_un.rsp_iocb = rspiocb; 907 908 irsp = &rspiocb->iocb; 909 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 910 "PLOGI cmpl: status:x%x/x%x did:x%x", 911 irsp->ulpStatus, irsp->un.ulpWord[4], 912 irsp->un.elsreq64.remoteID); 913 914 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 915 if (!ndlp) { 916 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 917 "0136 PLOGI completes to NPort x%x " 918 "with no ndlp. Data: x%x x%x x%x\n", 919 irsp->un.elsreq64.remoteID, 920 irsp->ulpStatus, irsp->un.ulpWord[4], 921 irsp->ulpIoTag); 922 goto out; 923 } 924 925 /* Since ndlp can be freed in the disc state machine, note if this node 926 * is being used during discovery. 927 */ 928 spin_lock_irq(shost->host_lock); 929 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 930 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 931 spin_unlock_irq(shost->host_lock); 932 rc = 0; 933 934 /* PLOGI completes to NPort <nlp_DID> */ 935 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 936 "0102 PLOGI completes to NPort x%x " 937 "Data: x%x x%x x%x x%x x%x\n", 938 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 939 irsp->ulpTimeout, disc, vport->num_disc_nodes); 940 /* Check to see if link went down during discovery */ 941 if (lpfc_els_chk_latt(vport)) { 942 spin_lock_irq(shost->host_lock); 943 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 944 spin_unlock_irq(shost->host_lock); 945 goto out; 946 } 947 948 /* ndlp could be freed in DSM, save these values now */ 949 type = ndlp->nlp_type; 950 did = ndlp->nlp_DID; 951 952 if (irsp->ulpStatus) { 953 /* Check for retry */ 954 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 955 /* ELS command is being retried */ 956 if (disc) { 957 spin_lock_irq(shost->host_lock); 958 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 959 spin_unlock_irq(shost->host_lock); 960 } 961 goto out; 962 } 963 /* PLOGI failed */ 964 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 965 if (lpfc_error_lost_link(irsp)) { 966 rc = NLP_STE_FREED_NODE; 967 } else { 968 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 969 NLP_EVT_CMPL_PLOGI); 970 } 971 } else { 972 /* Good status, call state machine */ 973 prsp = list_entry(((struct lpfc_dmabuf *) 974 cmdiocb->context2)->list.next, 975 struct lpfc_dmabuf, list); 976 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 977 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 978 NLP_EVT_CMPL_PLOGI); 979 } 980 981 if (disc && vport->num_disc_nodes) { 982 /* Check to see if there are more PLOGIs to be sent */ 983 lpfc_more_plogi(vport); 984 985 if (vport->num_disc_nodes == 0) { 986 spin_lock_irq(shost->host_lock); 987 vport->fc_flag &= ~FC_NDISC_ACTIVE; 988 spin_unlock_irq(shost->host_lock); 989 990 lpfc_can_disctmo(vport); 991 lpfc_end_rscn(vport); 992 } 993 } 994 995 out: 996 lpfc_els_free_iocb(phba, cmdiocb); 997 return; 998 } 999 1000 int 1001 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 1002 { 1003 struct lpfc_hba *phba = vport->phba; 1004 struct serv_parm *sp; 1005 IOCB_t *icmd; 1006 struct lpfc_nodelist *ndlp; 1007 struct lpfc_iocbq *elsiocb; 1008 struct lpfc_sli_ring *pring; 1009 struct lpfc_sli *psli; 1010 uint8_t *pcmd; 1011 uint16_t cmdsize; 1012 int ret; 1013 1014 psli = &phba->sli; 1015 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1016 1017 ndlp = lpfc_findnode_did(vport, did); 1018 /* If ndlp if not NULL, we will bump the reference count on it */ 1019 1020 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1021 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1022 ELS_CMD_PLOGI); 1023 if (!elsiocb) 1024 return 1; 1025 1026 icmd = &elsiocb->iocb; 1027 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1028 1029 /* For PLOGI request, remainder of payload is service parameters */ 1030 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 1031 pcmd += sizeof(uint32_t); 1032 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1033 sp = (struct serv_parm *) pcmd; 1034 1035 if (sp->cmn.fcphLow < FC_PH_4_3) 1036 sp->cmn.fcphLow = FC_PH_4_3; 1037 1038 if (sp->cmn.fcphHigh < FC_PH3) 1039 sp->cmn.fcphHigh = FC_PH3; 1040 1041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1042 "Issue PLOGI: did:x%x", 1043 did, 0, 0); 1044 1045 phba->fc_stat.elsXmitPLOGI++; 1046 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1047 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1048 1049 if (ret == IOCB_ERROR) { 1050 lpfc_els_free_iocb(phba, elsiocb); 1051 return 1; 1052 } 1053 return 0; 1054 } 1055 1056 static void 1057 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1058 struct lpfc_iocbq *rspiocb) 1059 { 1060 struct lpfc_vport *vport = cmdiocb->vport; 1061 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1062 IOCB_t *irsp; 1063 struct lpfc_sli *psli; 1064 struct lpfc_nodelist *ndlp; 1065 1066 psli = &phba->sli; 1067 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1068 cmdiocb->context_un.rsp_iocb = rspiocb; 1069 1070 irsp = &(rspiocb->iocb); 1071 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1072 spin_lock_irq(shost->host_lock); 1073 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1074 spin_unlock_irq(shost->host_lock); 1075 1076 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1077 "PRLI cmpl: status:x%x/x%x did:x%x", 1078 irsp->ulpStatus, irsp->un.ulpWord[4], 1079 ndlp->nlp_DID); 1080 /* PRLI completes to NPort <nlp_DID> */ 1081 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1082 "0103 PRLI completes to NPort x%x " 1083 "Data: x%x x%x x%x x%x\n", 1084 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1085 irsp->ulpTimeout, vport->num_disc_nodes); 1086 1087 vport->fc_prli_sent--; 1088 /* Check to see if link went down during discovery */ 1089 if (lpfc_els_chk_latt(vport)) 1090 goto out; 1091 1092 if (irsp->ulpStatus) { 1093 /* Check for retry */ 1094 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1095 /* ELS command is being retried */ 1096 goto out; 1097 } 1098 /* PRLI failed */ 1099 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1100 if (lpfc_error_lost_link(irsp)) { 1101 goto out; 1102 } else { 1103 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1104 NLP_EVT_CMPL_PRLI); 1105 } 1106 } else { 1107 /* Good status, call state machine */ 1108 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1109 NLP_EVT_CMPL_PRLI); 1110 } 1111 1112 out: 1113 lpfc_els_free_iocb(phba, cmdiocb); 1114 return; 1115 } 1116 1117 int 1118 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1119 uint8_t retry) 1120 { 1121 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1122 struct lpfc_hba *phba = vport->phba; 1123 PRLI *npr; 1124 IOCB_t *icmd; 1125 struct lpfc_iocbq *elsiocb; 1126 struct lpfc_sli_ring *pring; 1127 struct lpfc_sli *psli; 1128 uint8_t *pcmd; 1129 uint16_t cmdsize; 1130 1131 psli = &phba->sli; 1132 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1133 1134 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1135 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1136 ndlp->nlp_DID, ELS_CMD_PRLI); 1137 if (!elsiocb) 1138 return 1; 1139 1140 icmd = &elsiocb->iocb; 1141 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1142 1143 /* For PRLI request, remainder of payload is service parameters */ 1144 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); 1145 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1146 pcmd += sizeof(uint32_t); 1147 1148 /* For PRLI, remainder of payload is PRLI parameter page */ 1149 npr = (PRLI *) pcmd; 1150 /* 1151 * If our firmware version is 3.20 or later, 1152 * set the following bits for FC-TAPE support. 1153 */ 1154 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 1155 npr->ConfmComplAllowed = 1; 1156 npr->Retry = 1; 1157 npr->TaskRetryIdReq = 1; 1158 } 1159 npr->estabImagePair = 1; 1160 npr->readXferRdyDis = 1; 1161 1162 /* For FCP support */ 1163 npr->prliType = PRLI_FCP_TYPE; 1164 npr->initiatorFunc = 1; 1165 1166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1167 "Issue PRLI: did:x%x", 1168 ndlp->nlp_DID, 0, 0); 1169 1170 phba->fc_stat.elsXmitPRLI++; 1171 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 1172 spin_lock_irq(shost->host_lock); 1173 ndlp->nlp_flag |= NLP_PRLI_SND; 1174 spin_unlock_irq(shost->host_lock); 1175 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1176 spin_lock_irq(shost->host_lock); 1177 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1178 spin_unlock_irq(shost->host_lock); 1179 lpfc_els_free_iocb(phba, elsiocb); 1180 return 1; 1181 } 1182 vport->fc_prli_sent++; 1183 return 0; 1184 } 1185 1186 void 1187 lpfc_more_adisc(struct lpfc_vport *vport) 1188 { 1189 int sentadisc; 1190 1191 if (vport->num_disc_nodes) 1192 vport->num_disc_nodes--; 1193 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 1194 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1195 "0210 Continue discovery with %d ADISCs to go " 1196 "Data: x%x x%x x%x\n", 1197 vport->num_disc_nodes, vport->fc_adisc_cnt, 1198 vport->fc_flag, vport->port_state); 1199 /* Check to see if there are more ADISCs to be sent */ 1200 if (vport->fc_flag & FC_NLP_MORE) { 1201 lpfc_set_disctmo(vport); 1202 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 1203 sentadisc = lpfc_els_disc_adisc(vport); 1204 } 1205 return; 1206 } 1207 1208 static void 1209 lpfc_rscn_disc(struct lpfc_vport *vport) 1210 { 1211 lpfc_can_disctmo(vport); 1212 1213 /* RSCN discovery */ 1214 /* go thru NPR nodes and issue ELS PLOGIs */ 1215 if (vport->fc_npr_cnt) 1216 if (lpfc_els_disc_plogi(vport)) 1217 return; 1218 1219 lpfc_end_rscn(vport); 1220 } 1221 1222 static void 1223 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1224 struct lpfc_iocbq *rspiocb) 1225 { 1226 struct lpfc_vport *vport = cmdiocb->vport; 1227 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1228 IOCB_t *irsp; 1229 struct lpfc_nodelist *ndlp; 1230 int disc; 1231 1232 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1233 cmdiocb->context_un.rsp_iocb = rspiocb; 1234 1235 irsp = &(rspiocb->iocb); 1236 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1237 1238 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1239 "ADISC cmpl: status:x%x/x%x did:x%x", 1240 irsp->ulpStatus, irsp->un.ulpWord[4], 1241 ndlp->nlp_DID); 1242 1243 /* Since ndlp can be freed in the disc state machine, note if this node 1244 * is being used during discovery. 1245 */ 1246 spin_lock_irq(shost->host_lock); 1247 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1248 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 1249 spin_unlock_irq(shost->host_lock); 1250 /* ADISC completes to NPort <nlp_DID> */ 1251 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1252 "0104 ADISC completes to NPort x%x " 1253 "Data: x%x x%x x%x x%x x%x\n", 1254 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1255 irsp->ulpTimeout, disc, vport->num_disc_nodes); 1256 /* Check to see if link went down during discovery */ 1257 if (lpfc_els_chk_latt(vport)) { 1258 spin_lock_irq(shost->host_lock); 1259 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1260 spin_unlock_irq(shost->host_lock); 1261 goto out; 1262 } 1263 1264 if (irsp->ulpStatus) { 1265 /* Check for retry */ 1266 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1267 /* ELS command is being retried */ 1268 if (disc) { 1269 spin_lock_irq(shost->host_lock); 1270 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1271 spin_unlock_irq(shost->host_lock); 1272 lpfc_set_disctmo(vport); 1273 } 1274 goto out; 1275 } 1276 /* ADISC failed */ 1277 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1278 if (!lpfc_error_lost_link(irsp)) { 1279 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1280 NLP_EVT_CMPL_ADISC); 1281 } 1282 } else { 1283 /* Good status, call state machine */ 1284 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1285 NLP_EVT_CMPL_ADISC); 1286 } 1287 1288 if (disc && vport->num_disc_nodes) { 1289 /* Check to see if there are more ADISCs to be sent */ 1290 lpfc_more_adisc(vport); 1291 1292 /* Check to see if we are done with ADISC authentication */ 1293 if (vport->num_disc_nodes == 0) { 1294 /* If we get here, there is nothing left to ADISC */ 1295 /* 1296 * For NPIV, cmpl_reg_vpi will set port_state to READY, 1297 * and continue discovery. 1298 */ 1299 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1300 !(vport->fc_flag & FC_RSCN_MODE)) { 1301 lpfc_issue_reg_vpi(phba, vport); 1302 goto out; 1303 } 1304 /* 1305 * For SLI2, we need to set port_state to READY 1306 * and continue discovery. 1307 */ 1308 if (vport->port_state < LPFC_VPORT_READY) { 1309 /* If we get here, there is nothing to ADISC */ 1310 if (vport->port_type == LPFC_PHYSICAL_PORT) 1311 lpfc_issue_clear_la(phba, vport); 1312 1313 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 1314 vport->num_disc_nodes = 0; 1315 /* go thru NPR list, issue ELS PLOGIs */ 1316 if (vport->fc_npr_cnt) 1317 lpfc_els_disc_plogi(vport); 1318 1319 if (!vport->num_disc_nodes) { 1320 spin_lock_irq(shost->host_lock); 1321 vport->fc_flag &= 1322 ~FC_NDISC_ACTIVE; 1323 spin_unlock_irq( 1324 shost->host_lock); 1325 lpfc_can_disctmo(vport); 1326 } 1327 } 1328 vport->port_state = LPFC_VPORT_READY; 1329 } else { 1330 lpfc_rscn_disc(vport); 1331 } 1332 } 1333 } 1334 out: 1335 lpfc_els_free_iocb(phba, cmdiocb); 1336 return; 1337 } 1338 1339 int 1340 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1341 uint8_t retry) 1342 { 1343 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1344 struct lpfc_hba *phba = vport->phba; 1345 ADISC *ap; 1346 IOCB_t *icmd; 1347 struct lpfc_iocbq *elsiocb; 1348 struct lpfc_sli *psli = &phba->sli; 1349 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 1350 uint8_t *pcmd; 1351 uint16_t cmdsize; 1352 1353 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 1354 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1355 ndlp->nlp_DID, ELS_CMD_ADISC); 1356 if (!elsiocb) 1357 return 1; 1358 1359 icmd = &elsiocb->iocb; 1360 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1361 1362 /* For ADISC request, remainder of payload is service parameters */ 1363 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 1364 pcmd += sizeof(uint32_t); 1365 1366 /* Fill in ADISC payload */ 1367 ap = (ADISC *) pcmd; 1368 ap->hardAL_PA = phba->fc_pref_ALPA; 1369 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 1370 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 1371 ap->DID = be32_to_cpu(vport->fc_myDID); 1372 1373 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1374 "Issue ADISC: did:x%x", 1375 ndlp->nlp_DID, 0, 0); 1376 1377 phba->fc_stat.elsXmitADISC++; 1378 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 1379 spin_lock_irq(shost->host_lock); 1380 ndlp->nlp_flag |= NLP_ADISC_SND; 1381 spin_unlock_irq(shost->host_lock); 1382 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1383 spin_lock_irq(shost->host_lock); 1384 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1385 spin_unlock_irq(shost->host_lock); 1386 lpfc_els_free_iocb(phba, elsiocb); 1387 return 1; 1388 } 1389 return 0; 1390 } 1391 1392 static void 1393 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1394 struct lpfc_iocbq *rspiocb) 1395 { 1396 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1397 struct lpfc_vport *vport = ndlp->vport; 1398 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1399 IOCB_t *irsp; 1400 struct lpfc_sli *psli; 1401 1402 psli = &phba->sli; 1403 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1404 cmdiocb->context_un.rsp_iocb = rspiocb; 1405 1406 irsp = &(rspiocb->iocb); 1407 spin_lock_irq(shost->host_lock); 1408 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1409 spin_unlock_irq(shost->host_lock); 1410 1411 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1412 "LOGO cmpl: status:x%x/x%x did:x%x", 1413 irsp->ulpStatus, irsp->un.ulpWord[4], 1414 ndlp->nlp_DID); 1415 /* LOGO completes to NPort <nlp_DID> */ 1416 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1417 "0105 LOGO completes to NPort x%x " 1418 "Data: x%x x%x x%x x%x\n", 1419 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1420 irsp->ulpTimeout, vport->num_disc_nodes); 1421 /* Check to see if link went down during discovery */ 1422 if (lpfc_els_chk_latt(vport)) 1423 goto out; 1424 1425 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 1426 /* NLP_EVT_DEVICE_RM should unregister the RPI 1427 * which should abort all outstanding IOs. 1428 */ 1429 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1430 NLP_EVT_DEVICE_RM); 1431 goto out; 1432 } 1433 1434 if (irsp->ulpStatus) { 1435 /* Check for retry */ 1436 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1437 /* ELS command is being retried */ 1438 goto out; 1439 /* LOGO failed */ 1440 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1441 if (lpfc_error_lost_link(irsp)) 1442 goto out; 1443 else 1444 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1445 NLP_EVT_CMPL_LOGO); 1446 } else { 1447 /* Good status, call state machine. 1448 * This will unregister the rpi if needed. 1449 */ 1450 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1451 NLP_EVT_CMPL_LOGO); 1452 } 1453 1454 out: 1455 lpfc_els_free_iocb(phba, cmdiocb); 1456 return; 1457 } 1458 1459 int 1460 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1461 uint8_t retry) 1462 { 1463 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1464 struct lpfc_hba *phba = vport->phba; 1465 IOCB_t *icmd; 1466 struct lpfc_iocbq *elsiocb; 1467 struct lpfc_sli_ring *pring; 1468 struct lpfc_sli *psli; 1469 uint8_t *pcmd; 1470 uint16_t cmdsize; 1471 int rc; 1472 1473 psli = &phba->sli; 1474 pring = &psli->ring[LPFC_ELS_RING]; 1475 1476 spin_lock_irq(shost->host_lock); 1477 if (ndlp->nlp_flag & NLP_LOGO_SND) { 1478 spin_unlock_irq(shost->host_lock); 1479 return 0; 1480 } 1481 spin_unlock_irq(shost->host_lock); 1482 1483 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 1484 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1485 ndlp->nlp_DID, ELS_CMD_LOGO); 1486 if (!elsiocb) 1487 return 1; 1488 1489 icmd = &elsiocb->iocb; 1490 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1491 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 1492 pcmd += sizeof(uint32_t); 1493 1494 /* Fill in LOGO payload */ 1495 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 1496 pcmd += sizeof(uint32_t); 1497 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 1498 1499 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1500 "Issue LOGO: did:x%x", 1501 ndlp->nlp_DID, 0, 0); 1502 1503 phba->fc_stat.elsXmitLOGO++; 1504 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 1505 spin_lock_irq(shost->host_lock); 1506 ndlp->nlp_flag |= NLP_LOGO_SND; 1507 spin_unlock_irq(shost->host_lock); 1508 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1509 1510 if (rc == IOCB_ERROR) { 1511 spin_lock_irq(shost->host_lock); 1512 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1513 spin_unlock_irq(shost->host_lock); 1514 lpfc_els_free_iocb(phba, elsiocb); 1515 return 1; 1516 } 1517 return 0; 1518 } 1519 1520 static void 1521 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1522 struct lpfc_iocbq *rspiocb) 1523 { 1524 struct lpfc_vport *vport = cmdiocb->vport; 1525 IOCB_t *irsp; 1526 1527 irsp = &rspiocb->iocb; 1528 1529 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1530 "ELS cmd cmpl: status:x%x/x%x did:x%x", 1531 irsp->ulpStatus, irsp->un.ulpWord[4], 1532 irsp->un.elsreq64.remoteID); 1533 /* ELS cmd tag <ulpIoTag> completes */ 1534 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1535 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 1536 irsp->ulpIoTag, irsp->ulpStatus, 1537 irsp->un.ulpWord[4], irsp->ulpTimeout); 1538 /* Check to see if link went down during discovery */ 1539 lpfc_els_chk_latt(vport); 1540 lpfc_els_free_iocb(phba, cmdiocb); 1541 return; 1542 } 1543 1544 int 1545 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 1546 { 1547 struct lpfc_hba *phba = vport->phba; 1548 IOCB_t *icmd; 1549 struct lpfc_iocbq *elsiocb; 1550 struct lpfc_sli_ring *pring; 1551 struct lpfc_sli *psli; 1552 uint8_t *pcmd; 1553 uint16_t cmdsize; 1554 struct lpfc_nodelist *ndlp; 1555 1556 psli = &phba->sli; 1557 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1558 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 1559 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1560 if (!ndlp) 1561 return 1; 1562 1563 lpfc_nlp_init(vport, ndlp, nportid); 1564 1565 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1566 ndlp->nlp_DID, ELS_CMD_SCR); 1567 1568 if (!elsiocb) { 1569 /* This will trigger the release of the node just 1570 * allocated 1571 */ 1572 lpfc_nlp_put(ndlp); 1573 return 1; 1574 } 1575 1576 icmd = &elsiocb->iocb; 1577 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1578 1579 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 1580 pcmd += sizeof(uint32_t); 1581 1582 /* For SCR, remainder of payload is SCR parameter page */ 1583 memset(pcmd, 0, sizeof(SCR)); 1584 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 1585 1586 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1587 "Issue SCR: did:x%x", 1588 ndlp->nlp_DID, 0, 0); 1589 1590 phba->fc_stat.elsXmitSCR++; 1591 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1592 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1593 /* The additional lpfc_nlp_put will cause the following 1594 * lpfc_els_free_iocb routine to trigger the rlease of 1595 * the node. 1596 */ 1597 lpfc_nlp_put(ndlp); 1598 lpfc_els_free_iocb(phba, elsiocb); 1599 return 1; 1600 } 1601 /* This will cause the callback-function lpfc_cmpl_els_cmd to 1602 * trigger the release of node. 1603 */ 1604 lpfc_nlp_put(ndlp); 1605 return 0; 1606 } 1607 1608 static int 1609 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 1610 { 1611 struct lpfc_hba *phba = vport->phba; 1612 IOCB_t *icmd; 1613 struct lpfc_iocbq *elsiocb; 1614 struct lpfc_sli_ring *pring; 1615 struct lpfc_sli *psli; 1616 FARP *fp; 1617 uint8_t *pcmd; 1618 uint32_t *lp; 1619 uint16_t cmdsize; 1620 struct lpfc_nodelist *ondlp; 1621 struct lpfc_nodelist *ndlp; 1622 1623 psli = &phba->sli; 1624 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1625 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 1626 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1627 if (!ndlp) 1628 return 1; 1629 1630 lpfc_nlp_init(vport, ndlp, nportid); 1631 1632 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1633 ndlp->nlp_DID, ELS_CMD_RNID); 1634 if (!elsiocb) { 1635 /* This will trigger the release of the node just 1636 * allocated 1637 */ 1638 lpfc_nlp_put(ndlp); 1639 return 1; 1640 } 1641 1642 icmd = &elsiocb->iocb; 1643 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1644 1645 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 1646 pcmd += sizeof(uint32_t); 1647 1648 /* Fill in FARPR payload */ 1649 fp = (FARP *) (pcmd); 1650 memset(fp, 0, sizeof(FARP)); 1651 lp = (uint32_t *) pcmd; 1652 *lp++ = be32_to_cpu(nportid); 1653 *lp++ = be32_to_cpu(vport->fc_myDID); 1654 fp->Rflags = 0; 1655 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 1656 1657 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 1658 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 1659 ondlp = lpfc_findnode_did(vport, nportid); 1660 if (ondlp) { 1661 memcpy(&fp->OportName, &ondlp->nlp_portname, 1662 sizeof(struct lpfc_name)); 1663 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1664 sizeof(struct lpfc_name)); 1665 } 1666 1667 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1668 "Issue FARPR: did:x%x", 1669 ndlp->nlp_DID, 0, 0); 1670 1671 phba->fc_stat.elsXmitFARPR++; 1672 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1673 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1674 /* The additional lpfc_nlp_put will cause the following 1675 * lpfc_els_free_iocb routine to trigger the release of 1676 * the node. 1677 */ 1678 lpfc_nlp_put(ndlp); 1679 lpfc_els_free_iocb(phba, elsiocb); 1680 return 1; 1681 } 1682 /* This will cause the callback-function lpfc_cmpl_els_cmd to 1683 * trigger the release of the node. 1684 */ 1685 lpfc_nlp_put(ndlp); 1686 return 0; 1687 } 1688 1689 void 1690 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 1691 { 1692 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1693 1694 spin_lock_irq(shost->host_lock); 1695 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1696 spin_unlock_irq(shost->host_lock); 1697 del_timer_sync(&nlp->nlp_delayfunc); 1698 nlp->nlp_last_elscmd = 0; 1699 1700 if (!list_empty(&nlp->els_retry_evt.evt_listp)) 1701 list_del_init(&nlp->els_retry_evt.evt_listp); 1702 1703 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1704 spin_lock_irq(shost->host_lock); 1705 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1706 spin_unlock_irq(shost->host_lock); 1707 if (vport->num_disc_nodes) { 1708 /* Check to see if there are more 1709 * PLOGIs to be sent 1710 */ 1711 lpfc_more_plogi(vport); 1712 1713 if (vport->num_disc_nodes == 0) { 1714 spin_lock_irq(shost->host_lock); 1715 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1716 spin_unlock_irq(shost->host_lock); 1717 lpfc_can_disctmo(vport); 1718 lpfc_end_rscn(vport); 1719 } 1720 } 1721 } 1722 return; 1723 } 1724 1725 void 1726 lpfc_els_retry_delay(unsigned long ptr) 1727 { 1728 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 1729 struct lpfc_vport *vport = ndlp->vport; 1730 struct lpfc_hba *phba = vport->phba; 1731 unsigned long flags; 1732 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 1733 1734 ndlp = (struct lpfc_nodelist *) ptr; 1735 phba = ndlp->vport->phba; 1736 evtp = &ndlp->els_retry_evt; 1737 1738 spin_lock_irqsave(&phba->hbalock, flags); 1739 if (!list_empty(&evtp->evt_listp)) { 1740 spin_unlock_irqrestore(&phba->hbalock, flags); 1741 return; 1742 } 1743 1744 /* We need to hold the node by incrementing the reference 1745 * count until the queued work is done 1746 */ 1747 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 1748 evtp->evt = LPFC_EVT_ELS_RETRY; 1749 list_add_tail(&evtp->evt_listp, &phba->work_list); 1750 if (phba->work_wait) 1751 lpfc_worker_wake_up(phba); 1752 1753 spin_unlock_irqrestore(&phba->hbalock, flags); 1754 return; 1755 } 1756 1757 void 1758 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 1759 { 1760 struct lpfc_vport *vport = ndlp->vport; 1761 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1762 uint32_t cmd, did, retry; 1763 1764 spin_lock_irq(shost->host_lock); 1765 did = ndlp->nlp_DID; 1766 cmd = ndlp->nlp_last_elscmd; 1767 ndlp->nlp_last_elscmd = 0; 1768 1769 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1770 spin_unlock_irq(shost->host_lock); 1771 return; 1772 } 1773 1774 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1775 spin_unlock_irq(shost->host_lock); 1776 /* 1777 * If a discovery event readded nlp_delayfunc after timer 1778 * firing and before processing the timer, cancel the 1779 * nlp_delayfunc. 1780 */ 1781 del_timer_sync(&ndlp->nlp_delayfunc); 1782 retry = ndlp->nlp_retry; 1783 1784 switch (cmd) { 1785 case ELS_CMD_FLOGI: 1786 lpfc_issue_els_flogi(vport, ndlp, retry); 1787 break; 1788 case ELS_CMD_PLOGI: 1789 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 1790 ndlp->nlp_prev_state = ndlp->nlp_state; 1791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1792 } 1793 break; 1794 case ELS_CMD_ADISC: 1795 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 1796 ndlp->nlp_prev_state = ndlp->nlp_state; 1797 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1798 } 1799 break; 1800 case ELS_CMD_PRLI: 1801 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 1802 ndlp->nlp_prev_state = ndlp->nlp_state; 1803 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1804 } 1805 break; 1806 case ELS_CMD_LOGO: 1807 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 1808 ndlp->nlp_prev_state = ndlp->nlp_state; 1809 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1810 } 1811 break; 1812 case ELS_CMD_FDISC: 1813 lpfc_issue_els_fdisc(vport, ndlp, retry); 1814 break; 1815 } 1816 return; 1817 } 1818 1819 static int 1820 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1821 struct lpfc_iocbq *rspiocb) 1822 { 1823 struct lpfc_vport *vport = cmdiocb->vport; 1824 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1825 IOCB_t *irsp = &rspiocb->iocb; 1826 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1827 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 1828 uint32_t *elscmd; 1829 struct ls_rjt stat; 1830 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 1831 int logerr = 0; 1832 uint32_t cmd = 0; 1833 uint32_t did; 1834 1835 1836 /* Note: context2 may be 0 for internal driver abort 1837 * of delays ELS command. 1838 */ 1839 1840 if (pcmd && pcmd->virt) { 1841 elscmd = (uint32_t *) (pcmd->virt); 1842 cmd = *elscmd++; 1843 } 1844 1845 if (ndlp) 1846 did = ndlp->nlp_DID; 1847 else { 1848 /* We should only hit this case for retrying PLOGI */ 1849 did = irsp->un.elsreq64.remoteID; 1850 ndlp = lpfc_findnode_did(vport, did); 1851 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1852 return 1; 1853 } 1854 1855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1856 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 1857 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 1858 1859 switch (irsp->ulpStatus) { 1860 case IOSTAT_FCP_RSP_ERROR: 1861 case IOSTAT_REMOTE_STOP: 1862 break; 1863 1864 case IOSTAT_LOCAL_REJECT: 1865 switch ((irsp->un.ulpWord[4] & 0xff)) { 1866 case IOERR_LOOP_OPEN_FAILURE: 1867 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 1868 delay = 1000; 1869 retry = 1; 1870 break; 1871 1872 case IOERR_ILLEGAL_COMMAND: 1873 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && 1874 (cmd == ELS_CMD_FDISC)) { 1875 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1876 "0124 FDISC failed (3/6) " 1877 "retrying...\n"); 1878 lpfc_mbx_unreg_vpi(vport); 1879 retry = 1; 1880 /* FDISC retry policy */ 1881 maxretry = 48; 1882 if (cmdiocb->retry >= 32) 1883 delay = 1000; 1884 } 1885 break; 1886 1887 case IOERR_NO_RESOURCES: 1888 logerr = 1; /* HBA out of resources */ 1889 retry = 1; 1890 if (cmdiocb->retry > 100) 1891 delay = 100; 1892 maxretry = 250; 1893 break; 1894 1895 case IOERR_ILLEGAL_FRAME: 1896 delay = 100; 1897 retry = 1; 1898 break; 1899 1900 case IOERR_SEQUENCE_TIMEOUT: 1901 case IOERR_INVALID_RPI: 1902 retry = 1; 1903 break; 1904 } 1905 break; 1906 1907 case IOSTAT_NPORT_RJT: 1908 case IOSTAT_FABRIC_RJT: 1909 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 1910 retry = 1; 1911 break; 1912 } 1913 break; 1914 1915 case IOSTAT_NPORT_BSY: 1916 case IOSTAT_FABRIC_BSY: 1917 logerr = 1; /* Fabric / Remote NPort out of resources */ 1918 retry = 1; 1919 break; 1920 1921 case IOSTAT_LS_RJT: 1922 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 1923 /* Added for Vendor specifc support 1924 * Just keep retrying for these Rsn / Exp codes 1925 */ 1926 switch (stat.un.b.lsRjtRsnCode) { 1927 case LSRJT_UNABLE_TPC: 1928 if (stat.un.b.lsRjtRsnCodeExp == 1929 LSEXP_CMD_IN_PROGRESS) { 1930 if (cmd == ELS_CMD_PLOGI) { 1931 delay = 1000; 1932 maxretry = 48; 1933 } 1934 retry = 1; 1935 break; 1936 } 1937 if (cmd == ELS_CMD_PLOGI) { 1938 delay = 1000; 1939 maxretry = lpfc_max_els_tries + 1; 1940 retry = 1; 1941 break; 1942 } 1943 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1944 (cmd == ELS_CMD_FDISC) && 1945 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 1946 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1947 "0125 FDISC Failed (x%x). " 1948 "Fabric out of resources\n", 1949 stat.un.lsRjtError); 1950 lpfc_vport_set_state(vport, 1951 FC_VPORT_NO_FABRIC_RSCS); 1952 } 1953 break; 1954 1955 case LSRJT_LOGICAL_BSY: 1956 if ((cmd == ELS_CMD_PLOGI) || 1957 (cmd == ELS_CMD_PRLI)) { 1958 delay = 1000; 1959 maxretry = 48; 1960 } else if (cmd == ELS_CMD_FDISC) { 1961 /* FDISC retry policy */ 1962 maxretry = 48; 1963 if (cmdiocb->retry >= 32) 1964 delay = 1000; 1965 } 1966 retry = 1; 1967 break; 1968 1969 case LSRJT_LOGICAL_ERR: 1970 case LSRJT_PROTOCOL_ERR: 1971 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1972 (cmd == ELS_CMD_FDISC) && 1973 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 1974 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 1975 ) { 1976 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1977 "0123 FDISC Failed (x%x). " 1978 "Fabric Detected Bad WWN\n", 1979 stat.un.lsRjtError); 1980 lpfc_vport_set_state(vport, 1981 FC_VPORT_FABRIC_REJ_WWN); 1982 } 1983 break; 1984 } 1985 break; 1986 1987 case IOSTAT_INTERMED_RSP: 1988 case IOSTAT_BA_RJT: 1989 break; 1990 1991 default: 1992 break; 1993 } 1994 1995 if (did == FDMI_DID) 1996 retry = 1; 1997 1998 if ((cmd == ELS_CMD_FLOGI) && 1999 (phba->fc_topology != TOPOLOGY_LOOP)) { 2000 /* FLOGI retry policy */ 2001 retry = 1; 2002 maxretry = 48; 2003 if (cmdiocb->retry >= 32) 2004 delay = 1000; 2005 } 2006 2007 if ((++cmdiocb->retry) >= maxretry) { 2008 phba->fc_stat.elsRetryExceeded++; 2009 retry = 0; 2010 } 2011 2012 if ((vport->load_flag & FC_UNLOADING) != 0) 2013 retry = 0; 2014 2015 if (retry) { 2016 2017 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 2018 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2019 "0107 Retry ELS command x%x to remote " 2020 "NPORT x%x Data: x%x x%x\n", 2021 cmd, did, cmdiocb->retry, delay); 2022 2023 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 2024 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 2025 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 2026 /* Don't reset timer for no resources */ 2027 2028 /* If discovery / RSCN timer is running, reset it */ 2029 if (timer_pending(&vport->fc_disctmo) || 2030 (vport->fc_flag & FC_RSCN_MODE)) 2031 lpfc_set_disctmo(vport); 2032 } 2033 2034 phba->fc_stat.elsXmitRetry++; 2035 if (ndlp && delay) { 2036 phba->fc_stat.elsDelayRetry++; 2037 ndlp->nlp_retry = cmdiocb->retry; 2038 2039 /* delay is specified in milliseconds */ 2040 mod_timer(&ndlp->nlp_delayfunc, 2041 jiffies + msecs_to_jiffies(delay)); 2042 spin_lock_irq(shost->host_lock); 2043 ndlp->nlp_flag |= NLP_DELAY_TMO; 2044 spin_unlock_irq(shost->host_lock); 2045 2046 ndlp->nlp_prev_state = ndlp->nlp_state; 2047 if (cmd == ELS_CMD_PRLI) 2048 lpfc_nlp_set_state(vport, ndlp, 2049 NLP_STE_REG_LOGIN_ISSUE); 2050 else 2051 lpfc_nlp_set_state(vport, ndlp, 2052 NLP_STE_NPR_NODE); 2053 ndlp->nlp_last_elscmd = cmd; 2054 2055 return 1; 2056 } 2057 switch (cmd) { 2058 case ELS_CMD_FLOGI: 2059 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 2060 return 1; 2061 case ELS_CMD_FDISC: 2062 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 2063 return 1; 2064 case ELS_CMD_PLOGI: 2065 if (ndlp) { 2066 ndlp->nlp_prev_state = ndlp->nlp_state; 2067 lpfc_nlp_set_state(vport, ndlp, 2068 NLP_STE_PLOGI_ISSUE); 2069 } 2070 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 2071 return 1; 2072 case ELS_CMD_ADISC: 2073 ndlp->nlp_prev_state = ndlp->nlp_state; 2074 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2075 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 2076 return 1; 2077 case ELS_CMD_PRLI: 2078 ndlp->nlp_prev_state = ndlp->nlp_state; 2079 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 2080 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 2081 return 1; 2082 case ELS_CMD_LOGO: 2083 ndlp->nlp_prev_state = ndlp->nlp_state; 2084 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2085 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 2086 return 1; 2087 } 2088 } 2089 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 2090 if (logerr) { 2091 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2092 "0137 No retry ELS command x%x to remote " 2093 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 2094 cmd, did, irsp->ulpStatus, 2095 irsp->un.ulpWord[4]); 2096 } 2097 else { 2098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2099 "0108 No retry ELS command x%x to remote " 2100 "NPORT x%x Retried:%d Error:x%x/%x\n", 2101 cmd, did, cmdiocb->retry, irsp->ulpStatus, 2102 irsp->un.ulpWord[4]); 2103 } 2104 return 0; 2105 } 2106 2107 static int 2108 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 2109 { 2110 struct lpfc_dmabuf *buf_ptr; 2111 2112 /* Free the response before processing the command. */ 2113 if (!list_empty(&buf_ptr1->list)) { 2114 list_remove_head(&buf_ptr1->list, buf_ptr, 2115 struct lpfc_dmabuf, 2116 list); 2117 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2118 kfree(buf_ptr); 2119 } 2120 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 2121 kfree(buf_ptr1); 2122 return 0; 2123 } 2124 2125 static int 2126 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 2127 { 2128 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2129 kfree(buf_ptr); 2130 return 0; 2131 } 2132 2133 int 2134 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 2135 { 2136 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2137 struct lpfc_nodelist *ndlp; 2138 2139 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 2140 if (ndlp) { 2141 if (ndlp->nlp_flag & NLP_DEFER_RM) { 2142 lpfc_nlp_put(ndlp); 2143 2144 /* If the ndlp is not being used by another discovery 2145 * thread, free it. 2146 */ 2147 if (!lpfc_nlp_not_used(ndlp)) { 2148 /* If ndlp is being used by another discovery 2149 * thread, just clear NLP_DEFER_RM 2150 */ 2151 ndlp->nlp_flag &= ~NLP_DEFER_RM; 2152 } 2153 } 2154 else 2155 lpfc_nlp_put(ndlp); 2156 elsiocb->context1 = NULL; 2157 } 2158 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 2159 if (elsiocb->context2) { 2160 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 2161 /* Firmware could still be in progress of DMAing 2162 * payload, so don't free data buffer till after 2163 * a hbeat. 2164 */ 2165 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 2166 buf_ptr = elsiocb->context2; 2167 elsiocb->context2 = NULL; 2168 if (buf_ptr) { 2169 buf_ptr1 = NULL; 2170 spin_lock_irq(&phba->hbalock); 2171 if (!list_empty(&buf_ptr->list)) { 2172 list_remove_head(&buf_ptr->list, 2173 buf_ptr1, struct lpfc_dmabuf, 2174 list); 2175 INIT_LIST_HEAD(&buf_ptr1->list); 2176 list_add_tail(&buf_ptr1->list, 2177 &phba->elsbuf); 2178 phba->elsbuf_cnt++; 2179 } 2180 INIT_LIST_HEAD(&buf_ptr->list); 2181 list_add_tail(&buf_ptr->list, &phba->elsbuf); 2182 phba->elsbuf_cnt++; 2183 spin_unlock_irq(&phba->hbalock); 2184 } 2185 } else { 2186 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 2187 lpfc_els_free_data(phba, buf_ptr1); 2188 } 2189 } 2190 2191 if (elsiocb->context3) { 2192 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 2193 lpfc_els_free_bpl(phba, buf_ptr); 2194 } 2195 lpfc_sli_release_iocbq(phba, elsiocb); 2196 return 0; 2197 } 2198 2199 static void 2200 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2201 struct lpfc_iocbq *rspiocb) 2202 { 2203 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2204 struct lpfc_vport *vport = cmdiocb->vport; 2205 IOCB_t *irsp; 2206 2207 irsp = &rspiocb->iocb; 2208 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2209 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 2210 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 2211 /* ACC to LOGO completes to NPort <nlp_DID> */ 2212 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2213 "0109 ACC to LOGO completes to NPort x%x " 2214 "Data: x%x x%x x%x\n", 2215 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2216 ndlp->nlp_rpi); 2217 2218 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 2219 /* NPort Recovery mode or node is just allocated */ 2220 if (!lpfc_nlp_not_used(ndlp)) { 2221 /* If the ndlp is being used by another discovery 2222 * thread, just unregister the RPI. 2223 */ 2224 lpfc_unreg_rpi(vport, ndlp); 2225 } else { 2226 /* Indicate the node has already released, should 2227 * not reference to it from within lpfc_els_free_iocb. 2228 */ 2229 cmdiocb->context1 = NULL; 2230 } 2231 } 2232 lpfc_els_free_iocb(phba, cmdiocb); 2233 return; 2234 } 2235 2236 void 2237 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2238 { 2239 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2240 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2241 2242 pmb->context1 = NULL; 2243 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2244 kfree(mp); 2245 mempool_free(pmb, phba->mbox_mem_pool); 2246 if (ndlp) { 2247 lpfc_nlp_put(ndlp); 2248 /* This is the end of the default RPI cleanup logic for this 2249 * ndlp. If no other discovery threads are using this ndlp. 2250 * we should free all resources associated with it. 2251 */ 2252 lpfc_nlp_not_used(ndlp); 2253 } 2254 return; 2255 } 2256 2257 static void 2258 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2259 struct lpfc_iocbq *rspiocb) 2260 { 2261 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2262 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 2263 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 2264 IOCB_t *irsp; 2265 uint8_t *pcmd; 2266 LPFC_MBOXQ_t *mbox = NULL; 2267 struct lpfc_dmabuf *mp = NULL; 2268 uint32_t ls_rjt = 0; 2269 2270 irsp = &rspiocb->iocb; 2271 2272 if (cmdiocb->context_un.mbox) 2273 mbox = cmdiocb->context_un.mbox; 2274 2275 /* First determine if this is a LS_RJT cmpl. Note, this callback 2276 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 2277 */ 2278 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 2279 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 2280 /* A LS_RJT associated with Default RPI cleanup has its own 2281 * seperate code path. 2282 */ 2283 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 2284 ls_rjt = 1; 2285 } 2286 2287 /* Check to see if link went down during discovery */ 2288 if (!ndlp || lpfc_els_chk_latt(vport)) { 2289 if (mbox) { 2290 mp = (struct lpfc_dmabuf *) mbox->context1; 2291 if (mp) { 2292 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2293 kfree(mp); 2294 } 2295 mempool_free(mbox, phba->mbox_mem_pool); 2296 } 2297 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 2298 if (lpfc_nlp_not_used(ndlp)) { 2299 ndlp = NULL; 2300 /* Indicate the node has already released, 2301 * should not reference to it from within 2302 * the routine lpfc_els_free_iocb. 2303 */ 2304 cmdiocb->context1 = NULL; 2305 } 2306 goto out; 2307 } 2308 2309 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2310 "ELS rsp cmpl: status:x%x/x%x did:x%x", 2311 irsp->ulpStatus, irsp->un.ulpWord[4], 2312 cmdiocb->iocb.un.elsreq64.remoteID); 2313 /* ELS response tag <ulpIoTag> completes */ 2314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2315 "0110 ELS response tag x%x completes " 2316 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 2317 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 2318 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 2319 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2320 ndlp->nlp_rpi); 2321 if (mbox) { 2322 if ((rspiocb->iocb.ulpStatus == 0) 2323 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2324 lpfc_unreg_rpi(vport, ndlp); 2325 mbox->context2 = lpfc_nlp_get(ndlp); 2326 mbox->vport = vport; 2327 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 2328 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 2329 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 2330 } 2331 else { 2332 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 2333 ndlp->nlp_prev_state = ndlp->nlp_state; 2334 lpfc_nlp_set_state(vport, ndlp, 2335 NLP_STE_REG_LOGIN_ISSUE); 2336 } 2337 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 2338 != MBX_NOT_FINISHED) { 2339 goto out; 2340 } 2341 2342 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 2343 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2344 "0138 ELS rsp: Cannot issue reg_login for x%x " 2345 "Data: x%x x%x x%x\n", 2346 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2347 ndlp->nlp_rpi); 2348 2349 if (lpfc_nlp_not_used(ndlp)) { 2350 ndlp = NULL; 2351 /* Indicate node has already been released, 2352 * should not reference to it from within 2353 * the routine lpfc_els_free_iocb. 2354 */ 2355 cmdiocb->context1 = NULL; 2356 } 2357 } else { 2358 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 2359 if (!lpfc_error_lost_link(irsp) && 2360 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 2361 if (lpfc_nlp_not_used(ndlp)) { 2362 ndlp = NULL; 2363 /* Indicate node has already been 2364 * released, should not reference 2365 * to it from within the routine 2366 * lpfc_els_free_iocb. 2367 */ 2368 cmdiocb->context1 = NULL; 2369 } 2370 } 2371 } 2372 mp = (struct lpfc_dmabuf *) mbox->context1; 2373 if (mp) { 2374 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2375 kfree(mp); 2376 } 2377 mempool_free(mbox, phba->mbox_mem_pool); 2378 } 2379 out: 2380 if (ndlp) { 2381 spin_lock_irq(shost->host_lock); 2382 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 2383 spin_unlock_irq(shost->host_lock); 2384 2385 /* If the node is not being used by another discovery thread, 2386 * and we are sending a reject, we are done with it. 2387 * Release driver reference count here and free associated 2388 * resources. 2389 */ 2390 if (ls_rjt) 2391 if (lpfc_nlp_not_used(ndlp)) 2392 /* Indicate node has already been released, 2393 * should not reference to it from within 2394 * the routine lpfc_els_free_iocb. 2395 */ 2396 cmdiocb->context1 = NULL; 2397 } 2398 2399 lpfc_els_free_iocb(phba, cmdiocb); 2400 return; 2401 } 2402 2403 int 2404 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 2405 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 2406 LPFC_MBOXQ_t *mbox) 2407 { 2408 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2409 struct lpfc_hba *phba = vport->phba; 2410 IOCB_t *icmd; 2411 IOCB_t *oldcmd; 2412 struct lpfc_iocbq *elsiocb; 2413 struct lpfc_sli_ring *pring; 2414 struct lpfc_sli *psli; 2415 uint8_t *pcmd; 2416 uint16_t cmdsize; 2417 int rc; 2418 ELS_PKT *els_pkt_ptr; 2419 2420 psli = &phba->sli; 2421 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2422 oldcmd = &oldiocb->iocb; 2423 2424 switch (flag) { 2425 case ELS_CMD_ACC: 2426 cmdsize = sizeof(uint32_t); 2427 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2428 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2429 if (!elsiocb) { 2430 spin_lock_irq(shost->host_lock); 2431 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2432 spin_unlock_irq(shost->host_lock); 2433 return 1; 2434 } 2435 2436 icmd = &elsiocb->iocb; 2437 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2438 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2439 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2440 pcmd += sizeof(uint32_t); 2441 2442 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2443 "Issue ACC: did:x%x flg:x%x", 2444 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2445 break; 2446 case ELS_CMD_PLOGI: 2447 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 2448 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2449 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2450 if (!elsiocb) 2451 return 1; 2452 2453 icmd = &elsiocb->iocb; 2454 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2455 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2456 2457 if (mbox) 2458 elsiocb->context_un.mbox = mbox; 2459 2460 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2461 pcmd += sizeof(uint32_t); 2462 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2463 2464 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2465 "Issue ACC PLOGI: did:x%x flg:x%x", 2466 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2467 break; 2468 case ELS_CMD_PRLO: 2469 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 2470 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2471 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 2472 if (!elsiocb) 2473 return 1; 2474 2475 icmd = &elsiocb->iocb; 2476 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2477 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2478 2479 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 2480 sizeof(uint32_t) + sizeof(PRLO)); 2481 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 2482 els_pkt_ptr = (ELS_PKT *) pcmd; 2483 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 2484 2485 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2486 "Issue ACC PRLO: did:x%x flg:x%x", 2487 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2488 break; 2489 default: 2490 return 1; 2491 } 2492 /* Xmit ELS ACC response tag <ulpIoTag> */ 2493 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2494 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 2495 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 2496 elsiocb->iotag, elsiocb->iocb.ulpContext, 2497 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2498 ndlp->nlp_rpi); 2499 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 2500 spin_lock_irq(shost->host_lock); 2501 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2502 spin_unlock_irq(shost->host_lock); 2503 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 2504 } else { 2505 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2506 } 2507 2508 phba->fc_stat.elsXmitACC++; 2509 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2510 if (rc == IOCB_ERROR) { 2511 lpfc_els_free_iocb(phba, elsiocb); 2512 return 1; 2513 } 2514 return 0; 2515 } 2516 2517 int 2518 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 2519 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 2520 LPFC_MBOXQ_t *mbox) 2521 { 2522 struct lpfc_hba *phba = vport->phba; 2523 IOCB_t *icmd; 2524 IOCB_t *oldcmd; 2525 struct lpfc_iocbq *elsiocb; 2526 struct lpfc_sli_ring *pring; 2527 struct lpfc_sli *psli; 2528 uint8_t *pcmd; 2529 uint16_t cmdsize; 2530 int rc; 2531 2532 psli = &phba->sli; 2533 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2534 2535 cmdsize = 2 * sizeof(uint32_t); 2536 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2537 ndlp->nlp_DID, ELS_CMD_LS_RJT); 2538 if (!elsiocb) 2539 return 1; 2540 2541 icmd = &elsiocb->iocb; 2542 oldcmd = &oldiocb->iocb; 2543 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2544 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2545 2546 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 2547 pcmd += sizeof(uint32_t); 2548 *((uint32_t *) (pcmd)) = rejectError; 2549 2550 if (mbox) 2551 elsiocb->context_un.mbox = mbox; 2552 2553 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 2554 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2555 "0129 Xmit ELS RJT x%x response tag x%x " 2556 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 2557 "rpi x%x\n", 2558 rejectError, elsiocb->iotag, 2559 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2560 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2562 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 2563 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 2564 2565 phba->fc_stat.elsXmitLSRJT++; 2566 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2567 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2568 2569 if (rc == IOCB_ERROR) { 2570 lpfc_els_free_iocb(phba, elsiocb); 2571 return 1; 2572 } 2573 return 0; 2574 } 2575 2576 int 2577 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 2578 struct lpfc_nodelist *ndlp) 2579 { 2580 struct lpfc_hba *phba = vport->phba; 2581 struct lpfc_sli *psli = &phba->sli; 2582 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 2583 ADISC *ap; 2584 IOCB_t *icmd, *oldcmd; 2585 struct lpfc_iocbq *elsiocb; 2586 uint8_t *pcmd; 2587 uint16_t cmdsize; 2588 int rc; 2589 2590 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 2591 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2592 ndlp->nlp_DID, ELS_CMD_ACC); 2593 if (!elsiocb) 2594 return 1; 2595 2596 icmd = &elsiocb->iocb; 2597 oldcmd = &oldiocb->iocb; 2598 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2599 2600 /* Xmit ADISC ACC response tag <ulpIoTag> */ 2601 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2602 "0130 Xmit ADISC ACC response iotag x%x xri: " 2603 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 2604 elsiocb->iotag, elsiocb->iocb.ulpContext, 2605 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2606 ndlp->nlp_rpi); 2607 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2608 2609 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2610 pcmd += sizeof(uint32_t); 2611 2612 ap = (ADISC *) (pcmd); 2613 ap->hardAL_PA = phba->fc_pref_ALPA; 2614 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2615 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2616 ap->DID = be32_to_cpu(vport->fc_myDID); 2617 2618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2619 "Issue ACC ADISC: did:x%x flg:x%x", 2620 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2621 2622 phba->fc_stat.elsXmitACC++; 2623 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2624 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2625 if (rc == IOCB_ERROR) { 2626 lpfc_els_free_iocb(phba, elsiocb); 2627 return 1; 2628 } 2629 return 0; 2630 } 2631 2632 int 2633 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 2634 struct lpfc_nodelist *ndlp) 2635 { 2636 struct lpfc_hba *phba = vport->phba; 2637 PRLI *npr; 2638 lpfc_vpd_t *vpd; 2639 IOCB_t *icmd; 2640 IOCB_t *oldcmd; 2641 struct lpfc_iocbq *elsiocb; 2642 struct lpfc_sli_ring *pring; 2643 struct lpfc_sli *psli; 2644 uint8_t *pcmd; 2645 uint16_t cmdsize; 2646 int rc; 2647 2648 psli = &phba->sli; 2649 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2650 2651 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 2652 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2653 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 2654 if (!elsiocb) 2655 return 1; 2656 2657 icmd = &elsiocb->iocb; 2658 oldcmd = &oldiocb->iocb; 2659 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2660 /* Xmit PRLI ACC response tag <ulpIoTag> */ 2661 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2662 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 2663 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2664 elsiocb->iotag, elsiocb->iocb.ulpContext, 2665 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2666 ndlp->nlp_rpi); 2667 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2668 2669 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 2670 pcmd += sizeof(uint32_t); 2671 2672 /* For PRLI, remainder of payload is PRLI parameter page */ 2673 memset(pcmd, 0, sizeof(PRLI)); 2674 2675 npr = (PRLI *) pcmd; 2676 vpd = &phba->vpd; 2677 /* 2678 * If our firmware version is 3.20 or later, 2679 * set the following bits for FC-TAPE support. 2680 */ 2681 if (vpd->rev.feaLevelHigh >= 0x02) { 2682 npr->ConfmComplAllowed = 1; 2683 npr->Retry = 1; 2684 npr->TaskRetryIdReq = 1; 2685 } 2686 2687 npr->acceptRspCode = PRLI_REQ_EXECUTED; 2688 npr->estabImagePair = 1; 2689 npr->readXferRdyDis = 1; 2690 npr->ConfmComplAllowed = 1; 2691 2692 npr->prliType = PRLI_FCP_TYPE; 2693 npr->initiatorFunc = 1; 2694 2695 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2696 "Issue ACC PRLI: did:x%x flg:x%x", 2697 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2698 2699 phba->fc_stat.elsXmitACC++; 2700 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2701 2702 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2703 if (rc == IOCB_ERROR) { 2704 lpfc_els_free_iocb(phba, elsiocb); 2705 return 1; 2706 } 2707 return 0; 2708 } 2709 2710 static int 2711 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 2712 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 2713 { 2714 struct lpfc_hba *phba = vport->phba; 2715 RNID *rn; 2716 IOCB_t *icmd, *oldcmd; 2717 struct lpfc_iocbq *elsiocb; 2718 struct lpfc_sli_ring *pring; 2719 struct lpfc_sli *psli; 2720 uint8_t *pcmd; 2721 uint16_t cmdsize; 2722 int rc; 2723 2724 psli = &phba->sli; 2725 pring = &psli->ring[LPFC_ELS_RING]; 2726 2727 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 2728 + (2 * sizeof(struct lpfc_name)); 2729 if (format) 2730 cmdsize += sizeof(RNID_TOP_DISC); 2731 2732 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2733 ndlp->nlp_DID, ELS_CMD_ACC); 2734 if (!elsiocb) 2735 return 1; 2736 2737 icmd = &elsiocb->iocb; 2738 oldcmd = &oldiocb->iocb; 2739 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2740 /* Xmit RNID ACC response tag <ulpIoTag> */ 2741 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2742 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 2743 elsiocb->iotag, elsiocb->iocb.ulpContext); 2744 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2745 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2746 pcmd += sizeof(uint32_t); 2747 2748 memset(pcmd, 0, sizeof(RNID)); 2749 rn = (RNID *) (pcmd); 2750 rn->Format = format; 2751 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 2752 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2753 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2754 switch (format) { 2755 case 0: 2756 rn->SpecificLen = 0; 2757 break; 2758 case RNID_TOPOLOGY_DISC: 2759 rn->SpecificLen = sizeof(RNID_TOP_DISC); 2760 memcpy(&rn->un.topologyDisc.portName, 2761 &vport->fc_portname, sizeof(struct lpfc_name)); 2762 rn->un.topologyDisc.unitType = RNID_HBA; 2763 rn->un.topologyDisc.physPort = 0; 2764 rn->un.topologyDisc.attachedNodes = 0; 2765 break; 2766 default: 2767 rn->CommonLen = 0; 2768 rn->SpecificLen = 0; 2769 break; 2770 } 2771 2772 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 2773 "Issue ACC RNID: did:x%x flg:x%x", 2774 ndlp->nlp_DID, ndlp->nlp_flag, 0); 2775 2776 phba->fc_stat.elsXmitACC++; 2777 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 2778 lpfc_nlp_put(ndlp); 2779 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 2780 * it could be freed */ 2781 2782 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2783 if (rc == IOCB_ERROR) { 2784 lpfc_els_free_iocb(phba, elsiocb); 2785 return 1; 2786 } 2787 return 0; 2788 } 2789 2790 int 2791 lpfc_els_disc_adisc(struct lpfc_vport *vport) 2792 { 2793 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2794 struct lpfc_nodelist *ndlp, *next_ndlp; 2795 int sentadisc = 0; 2796 2797 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2798 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2799 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2800 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2801 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 2802 spin_lock_irq(shost->host_lock); 2803 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2804 spin_unlock_irq(shost->host_lock); 2805 ndlp->nlp_prev_state = ndlp->nlp_state; 2806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 2807 lpfc_issue_els_adisc(vport, ndlp, 0); 2808 sentadisc++; 2809 vport->num_disc_nodes++; 2810 if (vport->num_disc_nodes >= 2811 vport->cfg_discovery_threads) { 2812 spin_lock_irq(shost->host_lock); 2813 vport->fc_flag |= FC_NLP_MORE; 2814 spin_unlock_irq(shost->host_lock); 2815 break; 2816 } 2817 } 2818 } 2819 if (sentadisc == 0) { 2820 spin_lock_irq(shost->host_lock); 2821 vport->fc_flag &= ~FC_NLP_MORE; 2822 spin_unlock_irq(shost->host_lock); 2823 } 2824 return sentadisc; 2825 } 2826 2827 int 2828 lpfc_els_disc_plogi(struct lpfc_vport *vport) 2829 { 2830 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2831 struct lpfc_nodelist *ndlp, *next_ndlp; 2832 int sentplogi = 0; 2833 2834 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 2835 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2836 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2837 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2838 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 2839 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 2840 ndlp->nlp_prev_state = ndlp->nlp_state; 2841 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 2842 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2843 sentplogi++; 2844 vport->num_disc_nodes++; 2845 if (vport->num_disc_nodes >= 2846 vport->cfg_discovery_threads) { 2847 spin_lock_irq(shost->host_lock); 2848 vport->fc_flag |= FC_NLP_MORE; 2849 spin_unlock_irq(shost->host_lock); 2850 break; 2851 } 2852 } 2853 } 2854 if (sentplogi) { 2855 lpfc_set_disctmo(vport); 2856 } 2857 else { 2858 spin_lock_irq(shost->host_lock); 2859 vport->fc_flag &= ~FC_NLP_MORE; 2860 spin_unlock_irq(shost->host_lock); 2861 } 2862 return sentplogi; 2863 } 2864 2865 void 2866 lpfc_els_flush_rscn(struct lpfc_vport *vport) 2867 { 2868 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2869 struct lpfc_hba *phba = vport->phba; 2870 int i; 2871 2872 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2873 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 2874 vport->fc_rscn_id_list[i] = NULL; 2875 } 2876 spin_lock_irq(shost->host_lock); 2877 vport->fc_rscn_id_cnt = 0; 2878 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2879 spin_unlock_irq(shost->host_lock); 2880 lpfc_can_disctmo(vport); 2881 } 2882 2883 int 2884 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 2885 { 2886 D_ID ns_did; 2887 D_ID rscn_did; 2888 uint32_t *lp; 2889 uint32_t payload_len, i; 2890 2891 ns_did.un.word = did; 2892 2893 /* Never match fabric nodes for RSCNs */ 2894 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 2895 return 0; 2896 2897 /* If we are doing a FULL RSCN rediscovery, match everything */ 2898 if (vport->fc_flag & FC_RSCN_DISCOVERY) 2899 return did; 2900 2901 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2902 lp = vport->fc_rscn_id_list[i]->virt; 2903 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 2904 payload_len -= sizeof(uint32_t); /* take off word 0 */ 2905 while (payload_len) { 2906 rscn_did.un.word = be32_to_cpu(*lp++); 2907 payload_len -= sizeof(uint32_t); 2908 switch (rscn_did.un.b.resv) { 2909 case 0: /* Single N_Port ID effected */ 2910 if (ns_did.un.word == rscn_did.un.word) 2911 return did; 2912 break; 2913 case 1: /* Whole N_Port Area effected */ 2914 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 2915 && (ns_did.un.b.area == rscn_did.un.b.area)) 2916 return did; 2917 break; 2918 case 2: /* Whole N_Port Domain effected */ 2919 if (ns_did.un.b.domain == rscn_did.un.b.domain) 2920 return did; 2921 break; 2922 default: 2923 /* Unknown Identifier in RSCN node */ 2924 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2925 "0217 Unknown Identifier in " 2926 "RSCN payload Data: x%x\n", 2927 rscn_did.un.word); 2928 case 3: /* Whole Fabric effected */ 2929 return did; 2930 } 2931 } 2932 } 2933 return 0; 2934 } 2935 2936 static int 2937 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 2938 { 2939 struct lpfc_nodelist *ndlp = NULL; 2940 2941 /* Look at all nodes effected by pending RSCNs and move 2942 * them to NPR state. 2943 */ 2944 2945 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2946 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || 2947 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 2948 continue; 2949 2950 lpfc_disc_state_machine(vport, ndlp, NULL, 2951 NLP_EVT_DEVICE_RECOVERY); 2952 2953 /* 2954 * Make sure NLP_DELAY_TMO is NOT running after a device 2955 * recovery event. 2956 */ 2957 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2958 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2959 } 2960 2961 return 0; 2962 } 2963 2964 static int 2965 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 2966 struct lpfc_nodelist *ndlp) 2967 { 2968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2969 struct lpfc_hba *phba = vport->phba; 2970 struct lpfc_dmabuf *pcmd; 2971 uint32_t *lp, *datap; 2972 IOCB_t *icmd; 2973 uint32_t payload_len, length, nportid, *cmd; 2974 int rscn_cnt = vport->fc_rscn_id_cnt; 2975 int rscn_id = 0, hba_id = 0; 2976 int i; 2977 2978 icmd = &cmdiocb->iocb; 2979 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2980 lp = (uint32_t *) pcmd->virt; 2981 2982 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 2983 payload_len -= sizeof(uint32_t); /* take off word 0 */ 2984 /* RSCN received */ 2985 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2986 "0214 RSCN received Data: x%x x%x x%x x%x\n", 2987 vport->fc_flag, payload_len, *lp, rscn_cnt); 2988 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 2989 fc_host_post_event(shost, fc_get_event_number(), 2990 FCH_EVT_RSCN, lp[i]); 2991 2992 /* If we are about to begin discovery, just ACC the RSCN. 2993 * Discovery processing will satisfy it. 2994 */ 2995 if (vport->port_state <= LPFC_NS_QRY) { 2996 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 2997 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 2998 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 2999 3000 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3001 return 0; 3002 } 3003 3004 /* If this RSCN just contains NPortIDs for other vports on this HBA, 3005 * just ACC and ignore it. 3006 */ 3007 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3008 !(vport->cfg_peer_port_login)) { 3009 i = payload_len; 3010 datap = lp; 3011 while (i > 0) { 3012 nportid = *datap++; 3013 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 3014 i -= sizeof(uint32_t); 3015 rscn_id++; 3016 if (lpfc_find_vport_by_did(phba, nportid)) 3017 hba_id++; 3018 } 3019 if (rscn_id == hba_id) { 3020 /* ALL NPortIDs in RSCN are on HBA */ 3021 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3022 "0214 Ignore RSCN " 3023 "Data: x%x x%x x%x x%x\n", 3024 vport->fc_flag, payload_len, 3025 *lp, rscn_cnt); 3026 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3027 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 3028 ndlp->nlp_DID, vport->port_state, 3029 ndlp->nlp_flag); 3030 3031 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 3032 ndlp, NULL); 3033 return 0; 3034 } 3035 } 3036 3037 /* If we are already processing an RSCN, save the received 3038 * RSCN payload buffer, cmdiocb->context2 to process later. 3039 */ 3040 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 3041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3042 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 3043 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 3044 3045 spin_lock_irq(shost->host_lock); 3046 vport->fc_flag |= FC_RSCN_DEFERRED; 3047 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 3048 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 3049 vport->fc_flag |= FC_RSCN_MODE; 3050 spin_unlock_irq(shost->host_lock); 3051 if (rscn_cnt) { 3052 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 3053 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 3054 } 3055 if ((rscn_cnt) && 3056 (payload_len + length <= LPFC_BPL_SIZE)) { 3057 *cmd &= ELS_CMD_MASK; 3058 *cmd |= be32_to_cpu(payload_len + length); 3059 memcpy(((uint8_t *)cmd) + length, lp, 3060 payload_len); 3061 } else { 3062 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 3063 vport->fc_rscn_id_cnt++; 3064 /* If we zero, cmdiocb->context2, the calling 3065 * routine will not try to free it. 3066 */ 3067 cmdiocb->context2 = NULL; 3068 } 3069 3070 /* Deferred RSCN */ 3071 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3072 "0235 Deferred RSCN " 3073 "Data: x%x x%x x%x\n", 3074 vport->fc_rscn_id_cnt, vport->fc_flag, 3075 vport->port_state); 3076 } else { 3077 vport->fc_flag |= FC_RSCN_DISCOVERY; 3078 spin_unlock_irq(shost->host_lock); 3079 /* ReDiscovery RSCN */ 3080 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3081 "0234 ReDiscovery RSCN " 3082 "Data: x%x x%x x%x\n", 3083 vport->fc_rscn_id_cnt, vport->fc_flag, 3084 vport->port_state); 3085 } 3086 /* Send back ACC */ 3087 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3088 3089 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3090 lpfc_rscn_recovery_check(vport); 3091 spin_lock_irq(shost->host_lock); 3092 vport->fc_flag &= ~FC_RSCN_DEFERRED; 3093 spin_unlock_irq(shost->host_lock); 3094 return 0; 3095 } 3096 3097 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3098 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 3099 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 3100 3101 spin_lock_irq(shost->host_lock); 3102 vport->fc_flag |= FC_RSCN_MODE; 3103 spin_unlock_irq(shost->host_lock); 3104 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 3105 /* 3106 * If we zero, cmdiocb->context2, the calling routine will 3107 * not try to free it. 3108 */ 3109 cmdiocb->context2 = NULL; 3110 3111 lpfc_set_disctmo(vport); 3112 3113 /* Send back ACC */ 3114 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3115 3116 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3117 lpfc_rscn_recovery_check(vport); 3118 3119 return lpfc_els_handle_rscn(vport); 3120 } 3121 3122 int 3123 lpfc_els_handle_rscn(struct lpfc_vport *vport) 3124 { 3125 struct lpfc_nodelist *ndlp; 3126 struct lpfc_hba *phba = vport->phba; 3127 3128 /* Ignore RSCN if the port is being torn down. */ 3129 if (vport->load_flag & FC_UNLOADING) { 3130 lpfc_els_flush_rscn(vport); 3131 return 0; 3132 } 3133 3134 /* Start timer for RSCN processing */ 3135 lpfc_set_disctmo(vport); 3136 3137 /* RSCN processed */ 3138 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3139 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 3140 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 3141 vport->port_state); 3142 3143 /* To process RSCN, first compare RSCN data with NameServer */ 3144 vport->fc_ns_retry = 0; 3145 vport->num_disc_nodes = 0; 3146 3147 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3148 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 3149 /* Good ndlp, issue CT Request to NameServer */ 3150 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 3151 /* Wait for NameServer query cmpl before we can 3152 continue */ 3153 return 1; 3154 } else { 3155 /* If login to NameServer does not exist, issue one */ 3156 /* Good status, issue PLOGI to NameServer */ 3157 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3158 if (ndlp) 3159 /* Wait for NameServer login cmpl before we can 3160 continue */ 3161 return 1; 3162 3163 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3164 if (!ndlp) { 3165 lpfc_els_flush_rscn(vport); 3166 return 0; 3167 } else { 3168 lpfc_nlp_init(vport, ndlp, NameServer_DID); 3169 ndlp->nlp_type |= NLP_FABRIC; 3170 ndlp->nlp_prev_state = ndlp->nlp_state; 3171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3172 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 3173 /* Wait for NameServer login cmpl before we can 3174 continue */ 3175 return 1; 3176 } 3177 } 3178 3179 lpfc_els_flush_rscn(vport); 3180 return 0; 3181 } 3182 3183 static int 3184 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3185 struct lpfc_nodelist *ndlp) 3186 { 3187 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3188 struct lpfc_hba *phba = vport->phba; 3189 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3190 uint32_t *lp = (uint32_t *) pcmd->virt; 3191 IOCB_t *icmd = &cmdiocb->iocb; 3192 struct serv_parm *sp; 3193 LPFC_MBOXQ_t *mbox; 3194 struct ls_rjt stat; 3195 uint32_t cmd, did; 3196 int rc; 3197 3198 cmd = *lp++; 3199 sp = (struct serv_parm *) lp; 3200 3201 /* FLOGI received */ 3202 3203 lpfc_set_disctmo(vport); 3204 3205 if (phba->fc_topology == TOPOLOGY_LOOP) { 3206 /* We should never receive a FLOGI in loop mode, ignore it */ 3207 did = icmd->un.elsreq64.remoteID; 3208 3209 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 3210 Loop Mode */ 3211 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3212 "0113 An FLOGI ELS command x%x was " 3213 "received from DID x%x in Loop Mode\n", 3214 cmd, did); 3215 return 1; 3216 } 3217 3218 did = Fabric_DID; 3219 3220 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { 3221 /* For a FLOGI we accept, then if our portname is greater 3222 * then the remote portname we initiate Nport login. 3223 */ 3224 3225 rc = memcmp(&vport->fc_portname, &sp->portName, 3226 sizeof(struct lpfc_name)); 3227 3228 if (!rc) { 3229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3230 if (!mbox) 3231 return 1; 3232 3233 lpfc_linkdown(phba); 3234 lpfc_init_link(phba, mbox, 3235 phba->cfg_topology, 3236 phba->cfg_link_speed); 3237 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3238 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3239 mbox->vport = vport; 3240 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3241 lpfc_set_loopback_flag(phba); 3242 if (rc == MBX_NOT_FINISHED) { 3243 mempool_free(mbox, phba->mbox_mem_pool); 3244 } 3245 return 1; 3246 } else if (rc > 0) { /* greater than */ 3247 spin_lock_irq(shost->host_lock); 3248 vport->fc_flag |= FC_PT2PT_PLOGI; 3249 spin_unlock_irq(shost->host_lock); 3250 } 3251 spin_lock_irq(shost->host_lock); 3252 vport->fc_flag |= FC_PT2PT; 3253 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3254 spin_unlock_irq(shost->host_lock); 3255 } else { 3256 /* Reject this request because invalid parameters */ 3257 stat.un.b.lsRjtRsvd0 = 0; 3258 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3259 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 3260 stat.un.b.vendorUnique = 0; 3261 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 3262 NULL); 3263 return 1; 3264 } 3265 3266 /* Send back ACC */ 3267 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); 3268 3269 return 0; 3270 } 3271 3272 static int 3273 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3274 struct lpfc_nodelist *ndlp) 3275 { 3276 struct lpfc_dmabuf *pcmd; 3277 uint32_t *lp; 3278 IOCB_t *icmd; 3279 RNID *rn; 3280 struct ls_rjt stat; 3281 uint32_t cmd, did; 3282 3283 icmd = &cmdiocb->iocb; 3284 did = icmd->un.elsreq64.remoteID; 3285 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3286 lp = (uint32_t *) pcmd->virt; 3287 3288 cmd = *lp++; 3289 rn = (RNID *) lp; 3290 3291 /* RNID received */ 3292 3293 switch (rn->Format) { 3294 case 0: 3295 case RNID_TOPOLOGY_DISC: 3296 /* Send back ACC */ 3297 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 3298 break; 3299 default: 3300 /* Reject this request because format not supported */ 3301 stat.un.b.lsRjtRsvd0 = 0; 3302 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3303 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3304 stat.un.b.vendorUnique = 0; 3305 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 3306 NULL); 3307 } 3308 return 0; 3309 } 3310 3311 static int 3312 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3313 struct lpfc_nodelist *ndlp) 3314 { 3315 struct ls_rjt stat; 3316 3317 /* For now, unconditionally reject this command */ 3318 stat.un.b.lsRjtRsvd0 = 0; 3319 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3320 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3321 stat.un.b.vendorUnique = 0; 3322 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 3323 return 0; 3324 } 3325 3326 static void 3327 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3328 { 3329 struct lpfc_sli *psli = &phba->sli; 3330 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 3331 MAILBOX_t *mb; 3332 IOCB_t *icmd; 3333 RPS_RSP *rps_rsp; 3334 uint8_t *pcmd; 3335 struct lpfc_iocbq *elsiocb; 3336 struct lpfc_nodelist *ndlp; 3337 uint16_t xri, status; 3338 uint32_t cmdsize; 3339 3340 mb = &pmb->mb; 3341 3342 ndlp = (struct lpfc_nodelist *) pmb->context2; 3343 xri = (uint16_t) ((unsigned long)(pmb->context1)); 3344 pmb->context1 = NULL; 3345 pmb->context2 = NULL; 3346 3347 if (mb->mbxStatus) { 3348 mempool_free(pmb, phba->mbox_mem_pool); 3349 return; 3350 } 3351 3352 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 3353 mempool_free(pmb, phba->mbox_mem_pool); 3354 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 3355 lpfc_max_els_tries, ndlp, 3356 ndlp->nlp_DID, ELS_CMD_ACC); 3357 3358 /* Decrement the ndlp reference count from previous mbox command */ 3359 lpfc_nlp_put(ndlp); 3360 3361 if (!elsiocb) 3362 return; 3363 3364 icmd = &elsiocb->iocb; 3365 icmd->ulpContext = xri; 3366 3367 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3368 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3369 pcmd += sizeof(uint32_t); /* Skip past command */ 3370 rps_rsp = (RPS_RSP *)pcmd; 3371 3372 if (phba->fc_topology != TOPOLOGY_LOOP) 3373 status = 0x10; 3374 else 3375 status = 0x8; 3376 if (phba->pport->fc_flag & FC_FABRIC) 3377 status |= 0x4; 3378 3379 rps_rsp->rsvd1 = 0; 3380 rps_rsp->portStatus = cpu_to_be16(status); 3381 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 3382 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 3383 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 3384 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 3385 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 3386 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 3387 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 3388 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 3389 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 3390 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3391 elsiocb->iotag, elsiocb->iocb.ulpContext, 3392 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3393 ndlp->nlp_rpi); 3394 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3395 phba->fc_stat.elsXmitACC++; 3396 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 3397 lpfc_els_free_iocb(phba, elsiocb); 3398 return; 3399 } 3400 3401 static int 3402 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3403 struct lpfc_nodelist *ndlp) 3404 { 3405 struct lpfc_hba *phba = vport->phba; 3406 uint32_t *lp; 3407 uint8_t flag; 3408 LPFC_MBOXQ_t *mbox; 3409 struct lpfc_dmabuf *pcmd; 3410 RPS *rps; 3411 struct ls_rjt stat; 3412 3413 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 3414 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 3415 stat.un.b.lsRjtRsvd0 = 0; 3416 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3417 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3418 stat.un.b.vendorUnique = 0; 3419 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 3420 NULL); 3421 } 3422 3423 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3424 lp = (uint32_t *) pcmd->virt; 3425 flag = (be32_to_cpu(*lp++) & 0xf); 3426 rps = (RPS *) lp; 3427 3428 if ((flag == 0) || 3429 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 3430 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 3431 sizeof(struct lpfc_name)) == 0))) { 3432 3433 printk("Fix me....\n"); 3434 dump_stack(); 3435 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 3436 if (mbox) { 3437 lpfc_read_lnk_stat(phba, mbox); 3438 mbox->context1 = 3439 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 3440 mbox->context2 = lpfc_nlp_get(ndlp); 3441 mbox->vport = vport; 3442 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 3443 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 3444 != MBX_NOT_FINISHED) 3445 /* Mbox completion will send ELS Response */ 3446 return 0; 3447 /* Decrement reference count used for the failed mbox 3448 * command. 3449 */ 3450 lpfc_nlp_put(ndlp); 3451 mempool_free(mbox, phba->mbox_mem_pool); 3452 } 3453 } 3454 stat.un.b.lsRjtRsvd0 = 0; 3455 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3456 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3457 stat.un.b.vendorUnique = 0; 3458 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 3459 return 0; 3460 } 3461 3462 static int 3463 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 3464 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 3465 { 3466 struct lpfc_hba *phba = vport->phba; 3467 IOCB_t *icmd, *oldcmd; 3468 RPL_RSP rpl_rsp; 3469 struct lpfc_iocbq *elsiocb; 3470 struct lpfc_sli *psli = &phba->sli; 3471 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 3472 uint8_t *pcmd; 3473 3474 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3475 ndlp->nlp_DID, ELS_CMD_ACC); 3476 3477 if (!elsiocb) 3478 return 1; 3479 3480 icmd = &elsiocb->iocb; 3481 oldcmd = &oldiocb->iocb; 3482 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3483 3484 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3485 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3486 pcmd += sizeof(uint16_t); 3487 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 3488 pcmd += sizeof(uint16_t); 3489 3490 /* Setup the RPL ACC payload */ 3491 rpl_rsp.listLen = be32_to_cpu(1); 3492 rpl_rsp.index = 0; 3493 rpl_rsp.port_num_blk.portNum = 0; 3494 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 3495 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 3496 sizeof(struct lpfc_name)); 3497 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 3498 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 3499 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3500 "0120 Xmit ELS RPL ACC response tag x%x " 3501 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 3502 "rpi x%x\n", 3503 elsiocb->iotag, elsiocb->iocb.ulpContext, 3504 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 3505 ndlp->nlp_rpi); 3506 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3507 phba->fc_stat.elsXmitACC++; 3508 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 3509 lpfc_els_free_iocb(phba, elsiocb); 3510 return 1; 3511 } 3512 return 0; 3513 } 3514 3515 static int 3516 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3517 struct lpfc_nodelist *ndlp) 3518 { 3519 struct lpfc_dmabuf *pcmd; 3520 uint32_t *lp; 3521 uint32_t maxsize; 3522 uint16_t cmdsize; 3523 RPL *rpl; 3524 struct ls_rjt stat; 3525 3526 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 3527 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 3528 stat.un.b.lsRjtRsvd0 = 0; 3529 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3530 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3531 stat.un.b.vendorUnique = 0; 3532 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 3533 NULL); 3534 } 3535 3536 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3537 lp = (uint32_t *) pcmd->virt; 3538 rpl = (RPL *) (lp + 1); 3539 3540 maxsize = be32_to_cpu(rpl->maxsize); 3541 3542 /* We support only one port */ 3543 if ((rpl->index == 0) && 3544 ((maxsize == 0) || 3545 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 3546 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 3547 } else { 3548 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 3549 } 3550 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 3551 3552 return 0; 3553 } 3554 3555 static int 3556 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3557 struct lpfc_nodelist *ndlp) 3558 { 3559 struct lpfc_dmabuf *pcmd; 3560 uint32_t *lp; 3561 IOCB_t *icmd; 3562 FARP *fp; 3563 uint32_t cmd, cnt, did; 3564 3565 icmd = &cmdiocb->iocb; 3566 did = icmd->un.elsreq64.remoteID; 3567 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3568 lp = (uint32_t *) pcmd->virt; 3569 3570 cmd = *lp++; 3571 fp = (FARP *) lp; 3572 /* FARP-REQ received from DID <did> */ 3573 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3574 "0601 FARP-REQ received from DID x%x\n", did); 3575 /* We will only support match on WWPN or WWNN */ 3576 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 3577 return 0; 3578 } 3579 3580 cnt = 0; 3581 /* If this FARP command is searching for my portname */ 3582 if (fp->Mflags & FARP_MATCH_PORT) { 3583 if (memcmp(&fp->RportName, &vport->fc_portname, 3584 sizeof(struct lpfc_name)) == 0) 3585 cnt = 1; 3586 } 3587 3588 /* If this FARP command is searching for my nodename */ 3589 if (fp->Mflags & FARP_MATCH_NODE) { 3590 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 3591 sizeof(struct lpfc_name)) == 0) 3592 cnt = 1; 3593 } 3594 3595 if (cnt) { 3596 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 3597 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 3598 /* Log back into the node before sending the FARP. */ 3599 if (fp->Rflags & FARP_REQUEST_PLOGI) { 3600 ndlp->nlp_prev_state = ndlp->nlp_state; 3601 lpfc_nlp_set_state(vport, ndlp, 3602 NLP_STE_PLOGI_ISSUE); 3603 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 3604 } 3605 3606 /* Send a FARP response to that node */ 3607 if (fp->Rflags & FARP_REQUEST_FARPR) 3608 lpfc_issue_els_farpr(vport, did, 0); 3609 } 3610 } 3611 return 0; 3612 } 3613 3614 static int 3615 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3616 struct lpfc_nodelist *ndlp) 3617 { 3618 struct lpfc_dmabuf *pcmd; 3619 uint32_t *lp; 3620 IOCB_t *icmd; 3621 uint32_t cmd, did; 3622 3623 icmd = &cmdiocb->iocb; 3624 did = icmd->un.elsreq64.remoteID; 3625 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3626 lp = (uint32_t *) pcmd->virt; 3627 3628 cmd = *lp++; 3629 /* FARP-RSP received from DID <did> */ 3630 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3631 "0600 FARP-RSP received from DID x%x\n", did); 3632 /* ACCEPT the Farp resp request */ 3633 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3634 3635 return 0; 3636 } 3637 3638 static int 3639 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3640 struct lpfc_nodelist *fan_ndlp) 3641 { 3642 struct lpfc_dmabuf *pcmd; 3643 uint32_t *lp; 3644 IOCB_t *icmd; 3645 uint32_t cmd, did; 3646 FAN *fp; 3647 struct lpfc_nodelist *ndlp, *next_ndlp; 3648 struct lpfc_hba *phba = vport->phba; 3649 3650 /* FAN received */ 3651 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3652 "0265 FAN received\n"); 3653 icmd = &cmdiocb->iocb; 3654 did = icmd->un.elsreq64.remoteID; 3655 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3656 lp = (uint32_t *)pcmd->virt; 3657 3658 cmd = *lp++; 3659 fp = (FAN *) lp; 3660 3661 /* FAN received; Fan does not have a reply sequence */ 3662 3663 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { 3664 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 3665 sizeof(struct lpfc_name)) != 0) || 3666 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 3667 sizeof(struct lpfc_name)) != 0)) { 3668 /* 3669 * This node has switched fabrics. FLOGI is required 3670 * Clean up the old rpi's 3671 */ 3672 3673 list_for_each_entry_safe(ndlp, next_ndlp, 3674 &vport->fc_nodes, nlp_listp) { 3675 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3676 continue; 3677 if (ndlp->nlp_type & NLP_FABRIC) { 3678 /* 3679 * Clean up old Fabric, Nameserver and 3680 * other NLP_FABRIC logins 3681 */ 3682 lpfc_drop_node(vport, ndlp); 3683 3684 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3685 /* Fail outstanding I/O now since this 3686 * device is marked for PLOGI 3687 */ 3688 lpfc_unreg_rpi(vport, ndlp); 3689 } 3690 } 3691 3692 lpfc_initial_flogi(vport); 3693 return 0; 3694 } 3695 /* Discovery not needed, 3696 * move the nodes to their original state. 3697 */ 3698 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3699 nlp_listp) { 3700 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3701 continue; 3702 3703 switch (ndlp->nlp_prev_state) { 3704 case NLP_STE_UNMAPPED_NODE: 3705 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3706 lpfc_nlp_set_state(vport, ndlp, 3707 NLP_STE_UNMAPPED_NODE); 3708 break; 3709 3710 case NLP_STE_MAPPED_NODE: 3711 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3712 lpfc_nlp_set_state(vport, ndlp, 3713 NLP_STE_MAPPED_NODE); 3714 break; 3715 3716 default: 3717 break; 3718 } 3719 } 3720 3721 /* Start discovery - this should just do CLEAR_LA */ 3722 lpfc_disc_start(vport); 3723 } 3724 return 0; 3725 } 3726 3727 void 3728 lpfc_els_timeout(unsigned long ptr) 3729 { 3730 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 3731 struct lpfc_hba *phba = vport->phba; 3732 unsigned long iflag; 3733 3734 spin_lock_irqsave(&vport->work_port_lock, iflag); 3735 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3736 vport->work_port_events |= WORKER_ELS_TMO; 3737 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3738 3739 spin_lock_irqsave(&phba->hbalock, iflag); 3740 if (phba->work_wait) 3741 lpfc_worker_wake_up(phba); 3742 spin_unlock_irqrestore(&phba->hbalock, iflag); 3743 } 3744 else 3745 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3746 return; 3747 } 3748 3749 void 3750 lpfc_els_timeout_handler(struct lpfc_vport *vport) 3751 { 3752 struct lpfc_hba *phba = vport->phba; 3753 struct lpfc_sli_ring *pring; 3754 struct lpfc_iocbq *tmp_iocb, *piocb; 3755 IOCB_t *cmd = NULL; 3756 struct lpfc_dmabuf *pcmd; 3757 uint32_t els_command = 0; 3758 uint32_t timeout; 3759 uint32_t remote_ID = 0xffffffff; 3760 3761 /* If the timer is already canceled do nothing */ 3762 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3763 return; 3764 } 3765 spin_lock_irq(&phba->hbalock); 3766 timeout = (uint32_t)(phba->fc_ratov << 1); 3767 3768 pring = &phba->sli.ring[LPFC_ELS_RING]; 3769 3770 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3771 cmd = &piocb->iocb; 3772 3773 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 3774 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 3775 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 3776 continue; 3777 3778 if (piocb->vport != vport) 3779 continue; 3780 3781 pcmd = (struct lpfc_dmabuf *) piocb->context2; 3782 if (pcmd) 3783 els_command = *(uint32_t *) (pcmd->virt); 3784 3785 if (els_command == ELS_CMD_FARP || 3786 els_command == ELS_CMD_FARPR || 3787 els_command == ELS_CMD_FDISC) 3788 continue; 3789 3790 if (vport != piocb->vport) 3791 continue; 3792 3793 if (piocb->drvrTimeout > 0) { 3794 if (piocb->drvrTimeout >= timeout) 3795 piocb->drvrTimeout -= timeout; 3796 else 3797 piocb->drvrTimeout = 0; 3798 continue; 3799 } 3800 3801 remote_ID = 0xffffffff; 3802 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 3803 remote_ID = cmd->un.elsreq64.remoteID; 3804 else { 3805 struct lpfc_nodelist *ndlp; 3806 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 3807 if (ndlp) 3808 remote_ID = ndlp->nlp_DID; 3809 } 3810 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3811 "0127 ELS timeout Data: x%x x%x x%x " 3812 "x%x\n", els_command, 3813 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 3814 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3815 } 3816 spin_unlock_irq(&phba->hbalock); 3817 3818 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 3819 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 3820 } 3821 3822 void 3823 lpfc_els_flush_cmd(struct lpfc_vport *vport) 3824 { 3825 LIST_HEAD(completions); 3826 struct lpfc_hba *phba = vport->phba; 3827 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3828 struct lpfc_iocbq *tmp_iocb, *piocb; 3829 IOCB_t *cmd = NULL; 3830 3831 lpfc_fabric_abort_vport(vport); 3832 3833 spin_lock_irq(&phba->hbalock); 3834 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3835 cmd = &piocb->iocb; 3836 3837 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3838 continue; 3839 } 3840 3841 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 3842 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 3843 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 3844 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 3845 cmd->ulpCommand == CMD_ABORT_XRI_CN) 3846 continue; 3847 3848 if (piocb->vport != vport) 3849 continue; 3850 3851 list_move_tail(&piocb->list, &completions); 3852 pring->txq_cnt--; 3853 } 3854 3855 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3856 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3857 continue; 3858 } 3859 3860 if (piocb->vport != vport) 3861 continue; 3862 3863 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3864 } 3865 spin_unlock_irq(&phba->hbalock); 3866 3867 while (!list_empty(&completions)) { 3868 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 3869 cmd = &piocb->iocb; 3870 list_del_init(&piocb->list); 3871 3872 if (!piocb->iocb_cmpl) 3873 lpfc_sli_release_iocbq(phba, piocb); 3874 else { 3875 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3876 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 3877 (piocb->iocb_cmpl) (phba, piocb, piocb); 3878 } 3879 } 3880 3881 return; 3882 } 3883 3884 void 3885 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 3886 { 3887 LIST_HEAD(completions); 3888 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3889 struct lpfc_iocbq *tmp_iocb, *piocb; 3890 IOCB_t *cmd = NULL; 3891 3892 lpfc_fabric_abort_hba(phba); 3893 spin_lock_irq(&phba->hbalock); 3894 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3895 cmd = &piocb->iocb; 3896 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 3897 continue; 3898 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 3899 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 3900 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 3901 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 3902 cmd->ulpCommand == CMD_ABORT_XRI_CN) 3903 continue; 3904 list_move_tail(&piocb->list, &completions); 3905 pring->txq_cnt--; 3906 } 3907 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3908 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 3909 continue; 3910 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3911 } 3912 spin_unlock_irq(&phba->hbalock); 3913 while (!list_empty(&completions)) { 3914 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 3915 cmd = &piocb->iocb; 3916 list_del_init(&piocb->list); 3917 if (!piocb->iocb_cmpl) 3918 lpfc_sli_release_iocbq(phba, piocb); 3919 else { 3920 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3921 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 3922 (piocb->iocb_cmpl) (phba, piocb, piocb); 3923 } 3924 } 3925 return; 3926 } 3927 3928 static void 3929 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3930 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 3931 { 3932 struct Scsi_Host *shost; 3933 struct lpfc_nodelist *ndlp; 3934 struct ls_rjt stat; 3935 uint32_t *payload; 3936 uint32_t cmd, did, newnode, rjt_err = 0; 3937 IOCB_t *icmd = &elsiocb->iocb; 3938 3939 if (vport == NULL || elsiocb->context2 == NULL) 3940 goto dropit; 3941 3942 newnode = 0; 3943 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3944 cmd = *payload; 3945 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 3946 lpfc_post_buffer(phba, pring, 1, 1); 3947 3948 did = icmd->un.rcvels.remoteID; 3949 if (icmd->ulpStatus) { 3950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3951 "RCV Unsol ELS: status:x%x/x%x did:x%x", 3952 icmd->ulpStatus, icmd->un.ulpWord[4], did); 3953 goto dropit; 3954 } 3955 3956 /* Check to see if link went down during discovery */ 3957 if (lpfc_els_chk_latt(vport)) 3958 goto dropit; 3959 3960 /* Ignore traffic recevied during vport shutdown. */ 3961 if (vport->load_flag & FC_UNLOADING) 3962 goto dropit; 3963 3964 ndlp = lpfc_findnode_did(vport, did); 3965 if (!ndlp) { 3966 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3967 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3968 if (!ndlp) 3969 goto dropit; 3970 3971 lpfc_nlp_init(vport, ndlp, did); 3972 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3973 newnode = 1; 3974 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3975 ndlp->nlp_type |= NLP_FABRIC; 3976 } 3977 } 3978 else { 3979 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3980 /* This is simular to the new node path */ 3981 lpfc_nlp_get(ndlp); 3982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3983 newnode = 1; 3984 } 3985 } 3986 3987 phba->fc_stat.elsRcvFrame++; 3988 if (elsiocb->context1) 3989 lpfc_nlp_put(elsiocb->context1); 3990 elsiocb->context1 = lpfc_nlp_get(ndlp); 3991 elsiocb->vport = vport; 3992 3993 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 3994 cmd &= ELS_CMD_MASK; 3995 } 3996 /* ELS command <elsCmd> received from NPORT <did> */ 3997 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3998 "0112 ELS command x%x received from NPORT x%x " 3999 "Data: x%x\n", cmd, did, vport->port_state); 4000 switch (cmd) { 4001 case ELS_CMD_PLOGI: 4002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4003 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 4004 did, vport->port_state, ndlp->nlp_flag); 4005 4006 phba->fc_stat.elsRcvPLOGI++; 4007 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 4008 4009 if (vport->port_state < LPFC_DISC_AUTH) { 4010 rjt_err = LSRJT_UNABLE_TPC; 4011 break; 4012 } 4013 4014 shost = lpfc_shost_from_vport(vport); 4015 spin_lock_irq(shost->host_lock); 4016 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 4017 spin_unlock_irq(shost->host_lock); 4018 4019 lpfc_disc_state_machine(vport, ndlp, elsiocb, 4020 NLP_EVT_RCV_PLOGI); 4021 4022 break; 4023 case ELS_CMD_FLOGI: 4024 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4025 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 4026 did, vport->port_state, ndlp->nlp_flag); 4027 4028 phba->fc_stat.elsRcvFLOGI++; 4029 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 4030 if (newnode) 4031 lpfc_nlp_put(ndlp); 4032 break; 4033 case ELS_CMD_LOGO: 4034 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4035 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 4036 did, vport->port_state, ndlp->nlp_flag); 4037 4038 phba->fc_stat.elsRcvLOGO++; 4039 if (vport->port_state < LPFC_DISC_AUTH) { 4040 rjt_err = LSRJT_UNABLE_TPC; 4041 break; 4042 } 4043 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 4044 break; 4045 case ELS_CMD_PRLO: 4046 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4047 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 4048 did, vport->port_state, ndlp->nlp_flag); 4049 4050 phba->fc_stat.elsRcvPRLO++; 4051 if (vport->port_state < LPFC_DISC_AUTH) { 4052 rjt_err = LSRJT_UNABLE_TPC; 4053 break; 4054 } 4055 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 4056 break; 4057 case ELS_CMD_RSCN: 4058 phba->fc_stat.elsRcvRSCN++; 4059 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 4060 if (newnode) 4061 lpfc_nlp_put(ndlp); 4062 break; 4063 case ELS_CMD_ADISC: 4064 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4065 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 4066 did, vport->port_state, ndlp->nlp_flag); 4067 4068 phba->fc_stat.elsRcvADISC++; 4069 if (vport->port_state < LPFC_DISC_AUTH) { 4070 rjt_err = LSRJT_UNABLE_TPC; 4071 break; 4072 } 4073 lpfc_disc_state_machine(vport, ndlp, elsiocb, 4074 NLP_EVT_RCV_ADISC); 4075 break; 4076 case ELS_CMD_PDISC: 4077 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4078 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 4079 did, vport->port_state, ndlp->nlp_flag); 4080 4081 phba->fc_stat.elsRcvPDISC++; 4082 if (vport->port_state < LPFC_DISC_AUTH) { 4083 rjt_err = LSRJT_UNABLE_TPC; 4084 break; 4085 } 4086 lpfc_disc_state_machine(vport, ndlp, elsiocb, 4087 NLP_EVT_RCV_PDISC); 4088 break; 4089 case ELS_CMD_FARPR: 4090 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4091 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 4092 did, vport->port_state, ndlp->nlp_flag); 4093 4094 phba->fc_stat.elsRcvFARPR++; 4095 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 4096 break; 4097 case ELS_CMD_FARP: 4098 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4099 "RCV FARP: did:x%x/ste:x%x flg:x%x", 4100 did, vport->port_state, ndlp->nlp_flag); 4101 4102 phba->fc_stat.elsRcvFARP++; 4103 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 4104 break; 4105 case ELS_CMD_FAN: 4106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4107 "RCV FAN: did:x%x/ste:x%x flg:x%x", 4108 did, vport->port_state, ndlp->nlp_flag); 4109 4110 phba->fc_stat.elsRcvFAN++; 4111 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 4112 break; 4113 case ELS_CMD_PRLI: 4114 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4115 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 4116 did, vport->port_state, ndlp->nlp_flag); 4117 4118 phba->fc_stat.elsRcvPRLI++; 4119 if (vport->port_state < LPFC_DISC_AUTH) { 4120 rjt_err = LSRJT_UNABLE_TPC; 4121 break; 4122 } 4123 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 4124 break; 4125 case ELS_CMD_LIRR: 4126 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4127 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 4128 did, vport->port_state, ndlp->nlp_flag); 4129 4130 phba->fc_stat.elsRcvLIRR++; 4131 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 4132 if (newnode) 4133 lpfc_nlp_put(ndlp); 4134 break; 4135 case ELS_CMD_RPS: 4136 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4137 "RCV RPS: did:x%x/ste:x%x flg:x%x", 4138 did, vport->port_state, ndlp->nlp_flag); 4139 4140 phba->fc_stat.elsRcvRPS++; 4141 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 4142 if (newnode) 4143 lpfc_nlp_put(ndlp); 4144 break; 4145 case ELS_CMD_RPL: 4146 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4147 "RCV RPL: did:x%x/ste:x%x flg:x%x", 4148 did, vport->port_state, ndlp->nlp_flag); 4149 4150 phba->fc_stat.elsRcvRPL++; 4151 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 4152 if (newnode) 4153 lpfc_nlp_put(ndlp); 4154 break; 4155 case ELS_CMD_RNID: 4156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4157 "RCV RNID: did:x%x/ste:x%x flg:x%x", 4158 did, vport->port_state, ndlp->nlp_flag); 4159 4160 phba->fc_stat.elsRcvRNID++; 4161 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 4162 if (newnode) 4163 lpfc_nlp_put(ndlp); 4164 break; 4165 default: 4166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 4167 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 4168 cmd, did, vport->port_state); 4169 4170 /* Unsupported ELS command, reject */ 4171 rjt_err = LSRJT_INVALID_CMD; 4172 4173 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 4174 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4175 "0115 Unknown ELS command x%x " 4176 "received from NPORT x%x\n", cmd, did); 4177 if (newnode) 4178 lpfc_nlp_put(ndlp); 4179 break; 4180 } 4181 4182 /* check if need to LS_RJT received ELS cmd */ 4183 if (rjt_err) { 4184 memset(&stat, 0, sizeof(stat)); 4185 stat.un.b.lsRjtRsnCode = rjt_err; 4186 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 4187 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 4188 NULL); 4189 } 4190 4191 return; 4192 4193 dropit: 4194 if (vport && !(vport->load_flag & FC_UNLOADING)) 4195 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 4196 "(%d):0111 Dropping received ELS cmd " 4197 "Data: x%x x%x x%x\n", 4198 vport->vpi, icmd->ulpStatus, 4199 icmd->un.ulpWord[4], icmd->ulpTimeout); 4200 phba->fc_stat.elsRcvDrop++; 4201 } 4202 4203 static struct lpfc_vport * 4204 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 4205 { 4206 struct lpfc_vport *vport; 4207 unsigned long flags; 4208 4209 spin_lock_irqsave(&phba->hbalock, flags); 4210 list_for_each_entry(vport, &phba->port_list, listentry) { 4211 if (vport->vpi == vpi) { 4212 spin_unlock_irqrestore(&phba->hbalock, flags); 4213 return vport; 4214 } 4215 } 4216 spin_unlock_irqrestore(&phba->hbalock, flags); 4217 return NULL; 4218 } 4219 4220 void 4221 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4222 struct lpfc_iocbq *elsiocb) 4223 { 4224 struct lpfc_vport *vport = phba->pport; 4225 IOCB_t *icmd = &elsiocb->iocb; 4226 dma_addr_t paddr; 4227 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 4228 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 4229 4230 elsiocb->context2 = NULL; 4231 elsiocb->context3 = NULL; 4232 4233 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 4234 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 4235 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 4236 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 4237 phba->fc_stat.NoRcvBuf++; 4238 /* Not enough posted buffers; Try posting more buffers */ 4239 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 4240 lpfc_post_buffer(phba, pring, 0, 1); 4241 return; 4242 } 4243 4244 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4245 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 4246 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 4247 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 4248 vport = phba->pport; 4249 else { 4250 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 4251 vport = lpfc_find_vport_by_vpid(phba, vpi); 4252 } 4253 } 4254 /* If there are no BDEs associated 4255 * with this IOCB, there is nothing to do. 4256 */ 4257 if (icmd->ulpBdeCount == 0) 4258 return; 4259 4260 /* type of ELS cmd is first 32bit word 4261 * in packet 4262 */ 4263 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4264 elsiocb->context2 = bdeBuf1; 4265 } else { 4266 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 4267 icmd->un.cont64[0].addrLow); 4268 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 4269 paddr); 4270 } 4271 4272 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 4273 /* 4274 * The different unsolicited event handlers would tell us 4275 * if they are done with "mp" by setting context2 to NULL. 4276 */ 4277 lpfc_nlp_put(elsiocb->context1); 4278 elsiocb->context1 = NULL; 4279 if (elsiocb->context2) { 4280 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 4281 elsiocb->context2 = NULL; 4282 } 4283 4284 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 4285 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 4286 icmd->ulpBdeCount == 2) { 4287 elsiocb->context2 = bdeBuf2; 4288 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 4289 /* free mp if we are done with it */ 4290 if (elsiocb->context2) { 4291 lpfc_in_buf_free(phba, elsiocb->context2); 4292 elsiocb->context2 = NULL; 4293 } 4294 } 4295 } 4296 4297 void 4298 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 4299 { 4300 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 4301 4302 ndlp = lpfc_findnode_did(vport, NameServer_DID); 4303 if (!ndlp) { 4304 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4305 if (!ndlp) { 4306 if (phba->fc_topology == TOPOLOGY_LOOP) { 4307 lpfc_disc_start(vport); 4308 return; 4309 } 4310 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4311 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4312 "0251 NameServer login: no memory\n"); 4313 return; 4314 } 4315 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4316 ndlp->nlp_type |= NLP_FABRIC; 4317 } 4318 4319 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4320 4321 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 4322 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4323 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4324 "0252 Cannot issue NameServer login\n"); 4325 return; 4326 } 4327 4328 if (vport->cfg_fdmi_on) { 4329 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 4330 GFP_KERNEL); 4331 if (ndlp_fdmi) { 4332 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 4333 ndlp_fdmi->nlp_type |= NLP_FABRIC; 4334 ndlp_fdmi->nlp_state = 4335 NLP_STE_PLOGI_ISSUE; 4336 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 4337 0); 4338 } 4339 } 4340 return; 4341 } 4342 4343 static void 4344 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4345 { 4346 struct lpfc_vport *vport = pmb->vport; 4347 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4348 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 4349 MAILBOX_t *mb = &pmb->mb; 4350 4351 spin_lock_irq(shost->host_lock); 4352 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 4353 spin_unlock_irq(shost->host_lock); 4354 4355 if (mb->mbxStatus) { 4356 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4357 "0915 Register VPI failed: 0x%x\n", 4358 mb->mbxStatus); 4359 4360 switch (mb->mbxStatus) { 4361 case 0x11: /* unsupported feature */ 4362 case 0x9603: /* max_vpi exceeded */ 4363 /* giving up on vport registration */ 4364 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4365 spin_lock_irq(shost->host_lock); 4366 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 4367 spin_unlock_irq(shost->host_lock); 4368 lpfc_can_disctmo(vport); 4369 break; 4370 default: 4371 /* Try to recover from this error */ 4372 lpfc_mbx_unreg_vpi(vport); 4373 spin_lock_irq(shost->host_lock); 4374 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4375 spin_unlock_irq(shost->host_lock); 4376 lpfc_initial_fdisc(vport); 4377 break; 4378 } 4379 4380 } else { 4381 if (vport == phba->pport) 4382 lpfc_issue_fabric_reglogin(vport); 4383 else 4384 lpfc_do_scr_ns_plogi(phba, vport); 4385 } 4386 4387 /* Now, we decrement the ndlp reference count held for this 4388 * callback function 4389 */ 4390 lpfc_nlp_put(ndlp); 4391 4392 mempool_free(pmb, phba->mbox_mem_pool); 4393 return; 4394 } 4395 4396 static void 4397 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 4398 struct lpfc_nodelist *ndlp) 4399 { 4400 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4401 LPFC_MBOXQ_t *mbox; 4402 4403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4404 if (mbox) { 4405 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 4406 mbox->vport = vport; 4407 mbox->context2 = lpfc_nlp_get(ndlp); 4408 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 4409 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4410 == MBX_NOT_FINISHED) { 4411 /* mailbox command not success, decrement ndlp 4412 * reference count for this command 4413 */ 4414 lpfc_nlp_put(ndlp); 4415 mempool_free(mbox, phba->mbox_mem_pool); 4416 4417 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4418 "0253 Register VPI: Can't send mbox\n"); 4419 goto mbox_err_exit; 4420 } 4421 } else { 4422 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 4423 "0254 Register VPI: no memory\n"); 4424 goto mbox_err_exit; 4425 } 4426 return; 4427 4428 mbox_err_exit: 4429 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4430 spin_lock_irq(shost->host_lock); 4431 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 4432 spin_unlock_irq(shost->host_lock); 4433 return; 4434 } 4435 4436 static void 4437 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4438 struct lpfc_iocbq *rspiocb) 4439 { 4440 struct lpfc_vport *vport = cmdiocb->vport; 4441 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4442 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4443 struct lpfc_nodelist *np; 4444 struct lpfc_nodelist *next_np; 4445 IOCB_t *irsp = &rspiocb->iocb; 4446 struct lpfc_iocbq *piocb; 4447 4448 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4449 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 4450 irsp->ulpStatus, irsp->un.ulpWord[4], 4451 vport->fc_prevDID); 4452 /* Since all FDISCs are being single threaded, we 4453 * must reset the discovery timer for ALL vports 4454 * waiting to send FDISC when one completes. 4455 */ 4456 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 4457 lpfc_set_disctmo(piocb->vport); 4458 } 4459 4460 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4461 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 4462 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 4463 4464 if (irsp->ulpStatus) { 4465 /* Check for retry */ 4466 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 4467 goto out; 4468 /* FDISC failed */ 4469 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4470 "0124 FDISC failed. (%d/%d)\n", 4471 irsp->ulpStatus, irsp->un.ulpWord[4]); 4472 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) 4473 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4474 4475 lpfc_nlp_put(ndlp); 4476 /* giving up on FDISC. Cancel discovery timer */ 4477 lpfc_can_disctmo(vport); 4478 } else { 4479 spin_lock_irq(shost->host_lock); 4480 vport->fc_flag |= FC_FABRIC; 4481 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 4482 vport->fc_flag |= FC_PUBLIC_LOOP; 4483 spin_unlock_irq(shost->host_lock); 4484 4485 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 4486 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 4487 if ((vport->fc_prevDID != vport->fc_myDID) && 4488 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 4489 /* If our NportID changed, we need to ensure all 4490 * remaining NPORTs get unreg_login'ed so we can 4491 * issue unreg_vpi. 4492 */ 4493 list_for_each_entry_safe(np, next_np, 4494 &vport->fc_nodes, nlp_listp) { 4495 if (np->nlp_state != NLP_STE_NPR_NODE 4496 || !(np->nlp_flag & NLP_NPR_ADISC)) 4497 continue; 4498 spin_lock_irq(shost->host_lock); 4499 np->nlp_flag &= ~NLP_NPR_ADISC; 4500 spin_unlock_irq(shost->host_lock); 4501 lpfc_unreg_rpi(vport, np); 4502 } 4503 lpfc_mbx_unreg_vpi(vport); 4504 spin_lock_irq(shost->host_lock); 4505 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4506 spin_unlock_irq(shost->host_lock); 4507 } 4508 4509 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 4510 lpfc_register_new_vport(phba, vport, ndlp); 4511 else 4512 lpfc_do_scr_ns_plogi(phba, vport); 4513 4514 /* Unconditionaly kick off releasing fabric node for vports */ 4515 lpfc_nlp_put(ndlp); 4516 } 4517 4518 out: 4519 lpfc_els_free_iocb(phba, cmdiocb); 4520 } 4521 4522 static int 4523 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4524 uint8_t retry) 4525 { 4526 struct lpfc_hba *phba = vport->phba; 4527 IOCB_t *icmd; 4528 struct lpfc_iocbq *elsiocb; 4529 struct serv_parm *sp; 4530 uint8_t *pcmd; 4531 uint16_t cmdsize; 4532 int did = ndlp->nlp_DID; 4533 int rc; 4534 4535 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 4536 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 4537 ELS_CMD_FDISC); 4538 if (!elsiocb) { 4539 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4540 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4541 "0255 Issue FDISC: no IOCB\n"); 4542 return 1; 4543 } 4544 4545 icmd = &elsiocb->iocb; 4546 icmd->un.elsreq64.myID = 0; 4547 icmd->un.elsreq64.fl = 1; 4548 4549 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 4550 icmd->ulpCt_h = 1; 4551 icmd->ulpCt_l = 0; 4552 4553 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4554 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 4555 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 4556 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 4557 sp = (struct serv_parm *) pcmd; 4558 /* Setup CSPs accordingly for Fabric */ 4559 sp->cmn.e_d_tov = 0; 4560 sp->cmn.w2.r_a_tov = 0; 4561 sp->cls1.classValid = 0; 4562 sp->cls2.seqDelivery = 1; 4563 sp->cls3.seqDelivery = 1; 4564 4565 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 4566 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 4567 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 4568 pcmd += sizeof(uint32_t); /* Port Name */ 4569 memcpy(pcmd, &vport->fc_portname, 8); 4570 pcmd += sizeof(uint32_t); /* Node Name */ 4571 pcmd += sizeof(uint32_t); /* Node Name */ 4572 memcpy(pcmd, &vport->fc_nodename, 8); 4573 4574 lpfc_set_disctmo(vport); 4575 4576 phba->fc_stat.elsXmitFDISC++; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 4578 4579 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4580 "Issue FDISC: did:x%x", 4581 did, 0, 0); 4582 4583 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 4584 if (rc == IOCB_ERROR) { 4585 lpfc_els_free_iocb(phba, elsiocb); 4586 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4587 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4588 "0256 Issue FDISC: Cannot send IOCB\n"); 4589 return 1; 4590 } 4591 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 4592 vport->port_state = LPFC_FDISC; 4593 return 0; 4594 } 4595 4596 static void 4597 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4598 struct lpfc_iocbq *rspiocb) 4599 { 4600 struct lpfc_vport *vport = cmdiocb->vport; 4601 IOCB_t *irsp; 4602 4603 irsp = &rspiocb->iocb; 4604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4605 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 4606 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 4607 4608 lpfc_els_free_iocb(phba, cmdiocb); 4609 vport->unreg_vpi_cmpl = VPORT_ERROR; 4610 } 4611 4612 int 4613 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4614 { 4615 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4616 struct lpfc_hba *phba = vport->phba; 4617 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 4618 IOCB_t *icmd; 4619 struct lpfc_iocbq *elsiocb; 4620 uint8_t *pcmd; 4621 uint16_t cmdsize; 4622 4623 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 4624 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 4625 ELS_CMD_LOGO); 4626 if (!elsiocb) 4627 return 1; 4628 4629 icmd = &elsiocb->iocb; 4630 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4631 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 4632 pcmd += sizeof(uint32_t); 4633 4634 /* Fill in LOGO payload */ 4635 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 4636 pcmd += sizeof(uint32_t); 4637 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 4638 4639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4640 "Issue LOGO npiv did:x%x flg:x%x", 4641 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4642 4643 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 4644 spin_lock_irq(shost->host_lock); 4645 ndlp->nlp_flag |= NLP_LOGO_SND; 4646 spin_unlock_irq(shost->host_lock); 4647 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4648 spin_lock_irq(shost->host_lock); 4649 ndlp->nlp_flag &= ~NLP_LOGO_SND; 4650 spin_unlock_irq(shost->host_lock); 4651 lpfc_els_free_iocb(phba, elsiocb); 4652 return 1; 4653 } 4654 return 0; 4655 } 4656 4657 void 4658 lpfc_fabric_block_timeout(unsigned long ptr) 4659 { 4660 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4661 unsigned long iflags; 4662 uint32_t tmo_posted; 4663 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 4664 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 4665 if (!tmo_posted) 4666 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 4667 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 4668 4669 if (!tmo_posted) { 4670 spin_lock_irqsave(&phba->hbalock, iflags); 4671 if (phba->work_wait) 4672 lpfc_worker_wake_up(phba); 4673 spin_unlock_irqrestore(&phba->hbalock, iflags); 4674 } 4675 } 4676 4677 static void 4678 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 4679 { 4680 struct lpfc_iocbq *iocb; 4681 unsigned long iflags; 4682 int ret; 4683 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 4684 IOCB_t *cmd; 4685 4686 repeat: 4687 iocb = NULL; 4688 spin_lock_irqsave(&phba->hbalock, iflags); 4689 /* Post any pending iocb to the SLI layer */ 4690 if (atomic_read(&phba->fabric_iocb_count) == 0) { 4691 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 4692 list); 4693 if (iocb) 4694 atomic_inc(&phba->fabric_iocb_count); 4695 } 4696 spin_unlock_irqrestore(&phba->hbalock, iflags); 4697 if (iocb) { 4698 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 4699 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 4700 iocb->iocb_flag |= LPFC_IO_FABRIC; 4701 4702 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 4703 "Fabric sched1: ste:x%x", 4704 iocb->vport->port_state, 0, 0); 4705 4706 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 4707 4708 if (ret == IOCB_ERROR) { 4709 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 4710 iocb->fabric_iocb_cmpl = NULL; 4711 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 4712 cmd = &iocb->iocb; 4713 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4714 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 4715 iocb->iocb_cmpl(phba, iocb, iocb); 4716 4717 atomic_dec(&phba->fabric_iocb_count); 4718 goto repeat; 4719 } 4720 } 4721 4722 return; 4723 } 4724 4725 void 4726 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 4727 { 4728 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4729 4730 lpfc_resume_fabric_iocbs(phba); 4731 return; 4732 } 4733 4734 static void 4735 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 4736 { 4737 int blocked; 4738 4739 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4740 /* Start a timer to unblock fabric 4741 * iocbs after 100ms 4742 */ 4743 if (!blocked) 4744 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 4745 4746 return; 4747 } 4748 4749 static void 4750 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4751 struct lpfc_iocbq *rspiocb) 4752 { 4753 struct ls_rjt stat; 4754 4755 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 4756 BUG(); 4757 4758 switch (rspiocb->iocb.ulpStatus) { 4759 case IOSTAT_NPORT_RJT: 4760 case IOSTAT_FABRIC_RJT: 4761 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 4762 lpfc_block_fabric_iocbs(phba); 4763 } 4764 break; 4765 4766 case IOSTAT_NPORT_BSY: 4767 case IOSTAT_FABRIC_BSY: 4768 lpfc_block_fabric_iocbs(phba); 4769 break; 4770 4771 case IOSTAT_LS_RJT: 4772 stat.un.lsRjtError = 4773 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 4774 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 4775 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 4776 lpfc_block_fabric_iocbs(phba); 4777 break; 4778 } 4779 4780 if (atomic_read(&phba->fabric_iocb_count) == 0) 4781 BUG(); 4782 4783 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 4784 cmdiocb->fabric_iocb_cmpl = NULL; 4785 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 4786 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 4787 4788 atomic_dec(&phba->fabric_iocb_count); 4789 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 4790 /* Post any pending iocbs to HBA */ 4791 lpfc_resume_fabric_iocbs(phba); 4792 } 4793 } 4794 4795 static int 4796 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 4797 { 4798 unsigned long iflags; 4799 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 4800 int ready; 4801 int ret; 4802 4803 if (atomic_read(&phba->fabric_iocb_count) > 1) 4804 BUG(); 4805 4806 spin_lock_irqsave(&phba->hbalock, iflags); 4807 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 4808 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4809 4810 spin_unlock_irqrestore(&phba->hbalock, iflags); 4811 if (ready) { 4812 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 4813 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 4814 iocb->iocb_flag |= LPFC_IO_FABRIC; 4815 4816 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 4817 "Fabric sched2: ste:x%x", 4818 iocb->vport->port_state, 0, 0); 4819 4820 atomic_inc(&phba->fabric_iocb_count); 4821 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 4822 4823 if (ret == IOCB_ERROR) { 4824 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 4825 iocb->fabric_iocb_cmpl = NULL; 4826 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 4827 atomic_dec(&phba->fabric_iocb_count); 4828 } 4829 } else { 4830 spin_lock_irqsave(&phba->hbalock, iflags); 4831 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 4832 spin_unlock_irqrestore(&phba->hbalock, iflags); 4833 ret = IOCB_SUCCESS; 4834 } 4835 return ret; 4836 } 4837 4838 4839 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 4840 { 4841 LIST_HEAD(completions); 4842 struct lpfc_hba *phba = vport->phba; 4843 struct lpfc_iocbq *tmp_iocb, *piocb; 4844 IOCB_t *cmd; 4845 4846 spin_lock_irq(&phba->hbalock); 4847 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 4848 list) { 4849 4850 if (piocb->vport != vport) 4851 continue; 4852 4853 list_move_tail(&piocb->list, &completions); 4854 } 4855 spin_unlock_irq(&phba->hbalock); 4856 4857 while (!list_empty(&completions)) { 4858 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 4859 list_del_init(&piocb->list); 4860 4861 cmd = &piocb->iocb; 4862 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4863 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 4864 (piocb->iocb_cmpl) (phba, piocb, piocb); 4865 } 4866 } 4867 4868 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 4869 { 4870 LIST_HEAD(completions); 4871 struct lpfc_hba *phba = ndlp->vport->phba; 4872 struct lpfc_iocbq *tmp_iocb, *piocb; 4873 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 4874 IOCB_t *cmd; 4875 4876 spin_lock_irq(&phba->hbalock); 4877 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 4878 list) { 4879 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 4880 4881 list_move_tail(&piocb->list, &completions); 4882 } 4883 } 4884 spin_unlock_irq(&phba->hbalock); 4885 4886 while (!list_empty(&completions)) { 4887 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 4888 list_del_init(&piocb->list); 4889 4890 cmd = &piocb->iocb; 4891 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4892 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 4893 (piocb->iocb_cmpl) (phba, piocb, piocb); 4894 } 4895 } 4896 4897 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 4898 { 4899 LIST_HEAD(completions); 4900 struct lpfc_iocbq *piocb; 4901 IOCB_t *cmd; 4902 4903 spin_lock_irq(&phba->hbalock); 4904 list_splice_init(&phba->fabric_iocb_list, &completions); 4905 spin_unlock_irq(&phba->hbalock); 4906 4907 while (!list_empty(&completions)) { 4908 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 4909 list_del_init(&piocb->list); 4910 4911 cmd = &piocb->iocb; 4912 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4913 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 4914 (piocb->iocb_cmpl) (phba, piocb, piocb); 4915 } 4916 } 4917 4918 4919 #if 0 4920 void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) 4921 { 4922 LIST_HEAD(completions); 4923 struct lpfc_iocbq *tmp_iocb, *piocb; 4924 IOCB_t *cmd; 4925 struct lpfc_nodelist *ndlp; 4926 4927 spin_lock_irq(&phba->hbalock); 4928 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 4929 list) { 4930 4931 cmd = &piocb->iocb; 4932 ndlp = (struct lpfc_nodelist *) piocb->context1; 4933 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR && 4934 ndlp != NULL && 4935 ndlp->nlp_DID == Fabric_DID) 4936 list_move_tail(&piocb->list, &completions); 4937 } 4938 spin_unlock_irq(&phba->hbalock); 4939 4940 while (!list_empty(&completions)) { 4941 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 4942 list_del_init(&piocb->list); 4943 4944 cmd = &piocb->iocb; 4945 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4946 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 4947 (piocb->iocb_cmpl) (phba, piocb, piocb); 4948 } 4949 } 4950 #endif /* 0 */ 4951 4952 4953