1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 #include <linux/pci.h> 28 #include <linux/kthread.h> 29 #include <linux/interrupt.h> 30 #include <linux/lockdep.h> 31 #include <linux/utsname.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include "lpfc_hw4.h" 40 #include "lpfc_hw.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_sli.h" 44 #include "lpfc_sli4.h" 45 #include "lpfc.h" 46 #include "lpfc_scsi.h" 47 #include "lpfc_nvme.h" 48 #include "lpfc_logmsg.h" 49 #include "lpfc_crtn.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_debugfs.h" 52 53 /* AlpaArray for assignment of scsid for scan-down and bind_method */ 54 static uint8_t lpfcAlpaArray[] = { 55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 68 }; 69 70 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 71 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 72 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 73 static int lpfc_fcf_inuse(struct lpfc_hba *); 74 static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 75 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); 76 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); 77 78 static int 79 lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) 80 { 81 if (ndlp->nlp_fc4_type || 82 ndlp->nlp_type & NLP_FABRIC) 83 return 1; 84 return 0; 85 } 86 /* The source of a terminate rport I/O is either a dev_loss_tmo 87 * event or a call to fc_remove_host. While the rport should be 88 * valid during these downcalls, the transport can call twice 89 * in a single event. This routine provides somoe protection 90 * as the NDLP isn't really free, just released to the pool. 91 */ 92 static int 93 lpfc_rport_invalid(struct fc_rport *rport) 94 { 95 struct lpfc_rport_data *rdata; 96 struct lpfc_nodelist *ndlp; 97 98 if (!rport) { 99 pr_err("**** %s: NULL rport, exit.\n", __func__); 100 return -EINVAL; 101 } 102 103 rdata = rport->dd_data; 104 if (!rdata) { 105 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", 106 __func__, rport, rport->scsi_target_id); 107 return -EINVAL; 108 } 109 110 ndlp = rdata->pnode; 111 if (!rdata->pnode) { 112 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", 113 __func__, rport, rport->scsi_target_id); 114 return -EINVAL; 115 } 116 117 if (!ndlp->vport) { 118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " 119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport, 120 rport->scsi_target_id); 121 return -EINVAL; 122 } 123 return 0; 124 } 125 126 void 127 lpfc_terminate_rport_io(struct fc_rport *rport) 128 { 129 struct lpfc_rport_data *rdata; 130 struct lpfc_nodelist *ndlp; 131 struct lpfc_vport *vport; 132 133 if (lpfc_rport_invalid(rport)) 134 return; 135 136 rdata = rport->dd_data; 137 ndlp = rdata->pnode; 138 vport = ndlp->vport; 139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 140 "rport terminate: sid:x%x did:x%x flg:x%x", 141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 142 143 if (ndlp->nlp_sid != NLP_NO_SID) 144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); 145 } 146 147 /* 148 * This function will be called when dev_loss_tmo fire. 149 */ 150 void 151 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) 152 { 153 struct lpfc_nodelist *ndlp; 154 struct lpfc_vport *vport; 155 struct lpfc_hba *phba; 156 struct lpfc_work_evt *evtp; 157 unsigned long iflags; 158 159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; 160 if (!ndlp) 161 return; 162 163 vport = ndlp->vport; 164 phba = vport->phba; 165 166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 167 "rport devlosscb: sid:x%x did:x%x flg:x%x", 168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 169 170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x " 172 "load_flag x%x refcnt %d state %d xpt x%x\n", 173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, 174 vport->load_flag, kref_read(&ndlp->kref), 175 ndlp->nlp_state, ndlp->fc4_xpt_flags); 176 177 /* Don't schedule a worker thread event if the vport is going down. 178 * The teardown process cleans up the node via lpfc_drop_node. 179 */ 180 if (vport->load_flag & FC_UNLOADING) { 181 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; 182 ndlp->rport = NULL; 183 184 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; 185 /* clear the NLP_XPT_REGD if the node is not registered 186 * with nvme-fc 187 */ 188 if (ndlp->fc4_xpt_flags == NLP_XPT_REGD) 189 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; 190 191 /* Remove the node reference from remote_port_add now. 192 * The driver will not call remote_port_delete. 193 */ 194 lpfc_nlp_put(ndlp); 195 return; 196 } 197 198 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 199 return; 200 201 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) 202 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 203 "6789 rport name %llx != node port name %llx", 204 rport->port_name, 205 wwn_to_u64(ndlp->nlp_portname.u.wwn)); 206 207 evtp = &ndlp->dev_loss_evt; 208 209 if (!list_empty(&evtp->evt_listp)) { 210 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 211 "6790 rport name %llx dev_loss_evt pending\n", 212 rport->port_name); 213 return; 214 } 215 216 spin_lock_irqsave(&ndlp->lock, iflags); 217 ndlp->nlp_flag |= NLP_IN_DEV_LOSS; 218 219 /* If there is a PLOGI in progress, and we are in a 220 * NLP_NPR_2B_DISC state, don't turn off the flag. 221 */ 222 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 223 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 224 225 /* 226 * The backend does not expect any more calls associated with this 227 * rport. Remove the association between rport and ndlp. 228 */ 229 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; 230 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; 231 ndlp->rport = NULL; 232 spin_unlock_irqrestore(&ndlp->lock, iflags); 233 234 if (phba->worker_thread) { 235 /* We need to hold the node by incrementing the reference 236 * count until this queued work is done 237 */ 238 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 239 240 spin_lock_irqsave(&phba->hbalock, iflags); 241 if (evtp->evt_arg1) { 242 evtp->evt = LPFC_EVT_DEV_LOSS; 243 list_add_tail(&evtp->evt_listp, &phba->work_list); 244 lpfc_worker_wake_up(phba); 245 } 246 spin_unlock_irqrestore(&phba->hbalock, iflags); 247 } else { 248 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 249 "3188 worker thread is stopped %s x%06x, " 250 " rport x%px flg x%x load_flag x%x refcnt " 251 "%d\n", __func__, ndlp->nlp_DID, 252 ndlp->rport, ndlp->nlp_flag, 253 vport->load_flag, kref_read(&ndlp->kref)); 254 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { 255 spin_lock_irqsave(&ndlp->lock, iflags); 256 /* Node is in dev loss. No further transaction. */ 257 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 258 spin_unlock_irqrestore(&ndlp->lock, iflags); 259 lpfc_disc_state_machine(vport, ndlp, NULL, 260 NLP_EVT_DEVICE_RM); 261 } 262 263 } 264 265 return; 266 } 267 268 /** 269 * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport 270 * @vport: Pointer to vport context object. 271 * 272 * This function checks for idle VMID entries related to a particular vport. If 273 * found unused/idle, free them accordingly. 274 **/ 275 static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) 276 { 277 u16 keep; 278 u32 difftime = 0, r, bucket; 279 u64 *lta; 280 int cpu; 281 struct lpfc_vmid *vmp; 282 283 write_lock(&vport->vmid_lock); 284 285 if (!vport->cur_vmid_cnt) 286 goto out; 287 288 /* iterate through the table */ 289 hash_for_each(vport->hash_table, bucket, vmp, hnode) { 290 keep = 0; 291 if (vmp->flag & LPFC_VMID_REGISTERED) { 292 /* check if the particular VMID is in use */ 293 /* for all available per cpu variable */ 294 for_each_possible_cpu(cpu) { 295 /* if last access time is less than timeout */ 296 lta = per_cpu_ptr(vmp->last_io_time, cpu); 297 if (!lta) 298 continue; 299 difftime = (jiffies) - (*lta); 300 if ((vport->vmid_inactivity_timeout * 301 JIFFIES_PER_HR) > difftime) { 302 keep = 1; 303 break; 304 } 305 } 306 307 /* if none of the cpus have been used by the vm, */ 308 /* remove the entry if already registered */ 309 if (!keep) { 310 /* mark the entry for deregistration */ 311 vmp->flag = LPFC_VMID_DE_REGISTER; 312 write_unlock(&vport->vmid_lock); 313 if (vport->vmid_priority_tagging) 314 r = lpfc_vmid_uvem(vport, vmp, false); 315 else 316 r = lpfc_vmid_cmd(vport, 317 SLI_CTAS_DAPP_IDENT, 318 vmp); 319 320 /* decrement number of active vms and mark */ 321 /* entry in slot as free */ 322 write_lock(&vport->vmid_lock); 323 if (!r) { 324 struct lpfc_vmid *ht = vmp; 325 326 vport->cur_vmid_cnt--; 327 ht->flag = LPFC_VMID_SLOT_FREE; 328 free_percpu(ht->last_io_time); 329 ht->last_io_time = NULL; 330 hash_del(&ht->hnode); 331 } 332 } 333 } 334 } 335 out: 336 write_unlock(&vport->vmid_lock); 337 } 338 339 /** 340 * lpfc_check_inactive_vmid - VMID inactivity checker 341 * @phba: Pointer to hba context object. 342 * 343 * This function is called from the worker thread to determine if an entry in 344 * the VMID table can be released since there was no I/O activity seen from that 345 * particular VM for the specified time. When this happens, the entry in the 346 * table is released and also the resources on the switch cleared. 347 **/ 348 349 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) 350 { 351 struct lpfc_vport *vport; 352 struct lpfc_vport **vports; 353 int i; 354 355 vports = lpfc_create_vport_work_array(phba); 356 if (!vports) 357 return; 358 359 for (i = 0; i <= phba->max_vports; i++) { 360 if ((!vports[i]) && (i == 0)) 361 vport = phba->pport; 362 else 363 vport = vports[i]; 364 if (!vport) 365 break; 366 367 lpfc_check_inactive_vmid_one(vport); 368 } 369 lpfc_destroy_vport_work_array(phba, vports); 370 } 371 372 /** 373 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss 374 * @vport: Pointer to vport object. 375 * @ndlp: Pointer to remote node object. 376 * 377 * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of 378 * node during dev_loss_tmo processing, then this function restores the nlp_put 379 * kref decrement from lpfc_dev_loss_tmo_handler. 380 **/ 381 void 382 lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, 383 struct lpfc_nodelist *ndlp) 384 { 385 unsigned long iflags; 386 387 spin_lock_irqsave(&ndlp->lock, iflags); 388 if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { 389 ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; 390 spin_unlock_irqrestore(&ndlp->lock, iflags); 391 lpfc_nlp_get(ndlp); 392 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, 393 "8438 Devloss timeout reversed on DID x%x " 394 "refcnt %d ndlp %p flag x%x " 395 "port_state = x%x\n", 396 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, 397 ndlp->nlp_flag, vport->port_state); 398 spin_lock_irqsave(&ndlp->lock, iflags); 399 } 400 spin_unlock_irqrestore(&ndlp->lock, iflags); 401 } 402 403 /** 404 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 405 * @ndlp: Pointer to remote node object. 406 * 407 * This function is called from the worker thread when devloss timeout timer 408 * expires. For SLI4 host, this routine shall return 1 when at lease one 409 * remote node, including this @ndlp, is still in use of FCF; otherwise, this 410 * routine shall return 0 when there is no remote node is still in use of FCF 411 * when devloss timeout happened to this @ndlp. 412 **/ 413 static int 414 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 415 { 416 struct lpfc_vport *vport; 417 struct lpfc_hba *phba; 418 uint8_t *name; 419 int warn_on = 0; 420 int fcf_inuse = 0; 421 bool recovering = false; 422 struct fc_vport *fc_vport = NULL; 423 unsigned long iflags; 424 425 vport = ndlp->vport; 426 name = (uint8_t *)&ndlp->nlp_portname; 427 phba = vport->phba; 428 429 if (phba->sli_rev == LPFC_SLI_REV4) 430 fcf_inuse = lpfc_fcf_inuse(phba); 431 432 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 433 "rport devlosstmo:did:x%x type:x%x id:x%x", 434 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); 435 436 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 437 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", 438 __func__, ndlp->nlp_DID, ndlp->nlp_flag, 439 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); 440 441 /* If the driver is recovering the rport, ignore devloss. */ 442 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 443 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 444 "0284 Devloss timeout Ignored on " 445 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 446 "NPort x%x\n", 447 *name, *(name+1), *(name+2), *(name+3), 448 *(name+4), *(name+5), *(name+6), *(name+7), 449 ndlp->nlp_DID); 450 451 spin_lock_irqsave(&ndlp->lock, iflags); 452 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 453 spin_unlock_irqrestore(&ndlp->lock, iflags); 454 return fcf_inuse; 455 } 456 457 /* Fabric nodes are done. */ 458 if (ndlp->nlp_type & NLP_FABRIC) { 459 spin_lock_irqsave(&ndlp->lock, iflags); 460 461 /* In massive vport configuration settings or when the FLOGI 462 * completes with a sequence timeout, it's possible 463 * dev_loss_tmo fired during node recovery. The driver has to 464 * account for this race to allow for recovery and keep 465 * the reference counting correct. 466 */ 467 switch (ndlp->nlp_DID) { 468 case Fabric_DID: 469 fc_vport = vport->fc_vport; 470 if (fc_vport) { 471 /* NPIV path. */ 472 if (fc_vport->vport_state == 473 FC_VPORT_INITIALIZING) 474 recovering = true; 475 } else { 476 /* Physical port path. */ 477 if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) 478 recovering = true; 479 } 480 break; 481 case Fabric_Cntl_DID: 482 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 483 recovering = true; 484 break; 485 case FDMI_DID: 486 fallthrough; 487 case NameServer_DID: 488 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 489 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) 490 recovering = true; 491 break; 492 } 493 spin_unlock_irqrestore(&ndlp->lock, iflags); 494 495 /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing 496 * the following lpfc_nlp_put is necessary after fabric node is 497 * recovered. 498 */ 499 if (recovering) { 500 lpfc_printf_vlog(vport, KERN_INFO, 501 LOG_DISCOVERY | LOG_NODE, 502 "8436 Devloss timeout marked on " 503 "DID x%x refcnt %d ndlp %p " 504 "flag x%x port_state = x%x\n", 505 ndlp->nlp_DID, kref_read(&ndlp->kref), 506 ndlp, ndlp->nlp_flag, 507 vport->port_state); 508 spin_lock_irqsave(&ndlp->lock, iflags); 509 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; 510 spin_unlock_irqrestore(&ndlp->lock, iflags); 511 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 512 /* Fabric node fully recovered before this dev_loss_tmo 513 * queue work is processed. Thus, ignore the 514 * dev_loss_tmo event. 515 */ 516 lpfc_printf_vlog(vport, KERN_INFO, 517 LOG_DISCOVERY | LOG_NODE, 518 "8437 Devloss timeout ignored on " 519 "DID x%x refcnt %d ndlp %p " 520 "flag x%x port_state = x%x\n", 521 ndlp->nlp_DID, kref_read(&ndlp->kref), 522 ndlp, ndlp->nlp_flag, 523 vport->port_state); 524 return fcf_inuse; 525 } 526 527 spin_lock_irqsave(&ndlp->lock, iflags); 528 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 529 spin_unlock_irqrestore(&ndlp->lock, iflags); 530 lpfc_nlp_put(ndlp); 531 return fcf_inuse; 532 } 533 534 if (ndlp->nlp_sid != NLP_NO_SID) { 535 warn_on = 1; 536 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); 537 } 538 539 if (warn_on) { 540 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 541 "0203 Devloss timeout on " 542 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 543 "NPort x%06x Data: x%x x%x x%x refcnt %d\n", 544 *name, *(name+1), *(name+2), *(name+3), 545 *(name+4), *(name+5), *(name+6), *(name+7), 546 ndlp->nlp_DID, ndlp->nlp_flag, 547 ndlp->nlp_state, ndlp->nlp_rpi, 548 kref_read(&ndlp->kref)); 549 } else { 550 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, 551 "0204 Devloss timeout on " 552 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 553 "NPort x%06x Data: x%x x%x x%x\n", 554 *name, *(name+1), *(name+2), *(name+3), 555 *(name+4), *(name+5), *(name+6), *(name+7), 556 ndlp->nlp_DID, ndlp->nlp_flag, 557 ndlp->nlp_state, ndlp->nlp_rpi); 558 } 559 560 /* If we are devloss, but we are in the process of rediscovering the 561 * ndlp, don't issue a NLP_EVT_DEVICE_RM event. 562 */ 563 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 564 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { 565 return fcf_inuse; 566 } 567 568 spin_lock_irqsave(&ndlp->lock, iflags); 569 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 570 spin_unlock_irqrestore(&ndlp->lock, iflags); 571 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) 572 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 573 574 return fcf_inuse; 575 } 576 577 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) 578 { 579 struct lpfc_vport *vport; 580 struct lpfc_vport **vports; 581 int i; 582 583 vports = lpfc_create_vport_work_array(phba); 584 if (!vports) 585 return; 586 587 for (i = 0; i <= phba->max_vports; i++) { 588 if ((!vports[i]) && (i == 0)) 589 vport = phba->pport; 590 else 591 vport = vports[i]; 592 if (!vport) 593 break; 594 595 if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { 596 if (!lpfc_issue_els_qfpa(vport)) 597 vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; 598 } 599 } 600 lpfc_destroy_vport_work_array(phba, vports); 601 } 602 603 /** 604 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 605 * @phba: Pointer to hba context object. 606 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 607 * @nlp_did: remote node identifer with devloss timeout. 608 * 609 * This function is called from the worker thread after invoking devloss 610 * timeout handler and releasing the reference count for the ndlp with 611 * which the devloss timeout was handled for SLI4 host. For the devloss 612 * timeout of the last remote node which had been in use of FCF, when this 613 * routine is invoked, it shall be guaranteed that none of the remote are 614 * in-use of FCF. When devloss timeout to the last remote using the FCF, 615 * if the FIP engine is neither in FCF table scan process nor roundrobin 616 * failover process, the in-use FCF shall be unregistered. If the FIP 617 * engine is in FCF discovery process, the devloss timeout state shall 618 * be set for either the FCF table scan process or roundrobin failover 619 * process to unregister the in-use FCF. 620 **/ 621 static void 622 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 623 uint32_t nlp_did) 624 { 625 /* If devloss timeout happened to a remote node when FCF had no 626 * longer been in-use, do nothing. 627 */ 628 if (!fcf_inuse) 629 return; 630 631 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 632 spin_lock_irq(&phba->hbalock); 633 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 634 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 635 spin_unlock_irq(&phba->hbalock); 636 return; 637 } 638 phba->hba_flag |= HBA_DEVLOSS_TMO; 639 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 640 "2847 Last remote node (x%x) using " 641 "FCF devloss tmo\n", nlp_did); 642 } 643 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 644 spin_unlock_irq(&phba->hbalock); 645 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 646 "2868 Devloss tmo to FCF rediscovery " 647 "in progress\n"); 648 return; 649 } 650 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 651 spin_unlock_irq(&phba->hbalock); 652 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 653 "2869 Devloss tmo to idle FIP engine, " 654 "unreg in-use FCF and rescan.\n"); 655 /* Unregister in-use FCF and rescan */ 656 lpfc_unregister_fcf_rescan(phba); 657 return; 658 } 659 spin_unlock_irq(&phba->hbalock); 660 if (phba->hba_flag & FCF_TS_INPROG) 661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 662 "2870 FCF table scan in progress\n"); 663 if (phba->hba_flag & FCF_RR_INPROG) 664 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 665 "2871 FLOGI roundrobin FCF failover " 666 "in progress\n"); 667 } 668 lpfc_unregister_unused_fcf(phba); 669 } 670 671 /** 672 * lpfc_alloc_fast_evt - Allocates data structure for posting event 673 * @phba: Pointer to hba context object. 674 * 675 * This function is called from the functions which need to post 676 * events from interrupt context. This function allocates data 677 * structure required for posting event. It also keeps track of 678 * number of events pending and prevent event storm when there are 679 * too many events. 680 **/ 681 struct lpfc_fast_path_event * 682 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 683 struct lpfc_fast_path_event *ret; 684 685 /* If there are lot of fast event do not exhaust memory due to this */ 686 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 687 return NULL; 688 689 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 690 GFP_ATOMIC); 691 if (ret) { 692 atomic_inc(&phba->fast_event_count); 693 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 694 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 695 } 696 return ret; 697 } 698 699 /** 700 * lpfc_free_fast_evt - Frees event data structure 701 * @phba: Pointer to hba context object. 702 * @evt: Event object which need to be freed. 703 * 704 * This function frees the data structure required for posting 705 * events. 706 **/ 707 void 708 lpfc_free_fast_evt(struct lpfc_hba *phba, 709 struct lpfc_fast_path_event *evt) { 710 711 atomic_dec(&phba->fast_event_count); 712 kfree(evt); 713 } 714 715 /** 716 * lpfc_send_fastpath_evt - Posts events generated from fast path 717 * @phba: Pointer to hba context object. 718 * @evtp: Event data structure. 719 * 720 * This function is called from worker thread, when the interrupt 721 * context need to post an event. This function posts the event 722 * to fc transport netlink interface. 723 **/ 724 static void 725 lpfc_send_fastpath_evt(struct lpfc_hba *phba, 726 struct lpfc_work_evt *evtp) 727 { 728 unsigned long evt_category, evt_sub_category; 729 struct lpfc_fast_path_event *fast_evt_data; 730 char *evt_data; 731 uint32_t evt_data_size; 732 struct Scsi_Host *shost; 733 734 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 735 work_evt); 736 737 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 738 evt_sub_category = (unsigned long) fast_evt_data->un. 739 fabric_evt.subcategory; 740 shost = lpfc_shost_from_vport(fast_evt_data->vport); 741 if (evt_category == FC_REG_FABRIC_EVENT) { 742 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 743 evt_data = (char *) &fast_evt_data->un.read_check_error; 744 evt_data_size = sizeof(fast_evt_data->un. 745 read_check_error); 746 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 747 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { 748 evt_data = (char *) &fast_evt_data->un.fabric_evt; 749 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 750 } else { 751 lpfc_free_fast_evt(phba, fast_evt_data); 752 return; 753 } 754 } else if (evt_category == FC_REG_SCSI_EVENT) { 755 switch (evt_sub_category) { 756 case LPFC_EVENT_QFULL: 757 case LPFC_EVENT_DEVBSY: 758 evt_data = (char *) &fast_evt_data->un.scsi_evt; 759 evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 760 break; 761 case LPFC_EVENT_CHECK_COND: 762 evt_data = (char *) &fast_evt_data->un.check_cond_evt; 763 evt_data_size = sizeof(fast_evt_data->un. 764 check_cond_evt); 765 break; 766 case LPFC_EVENT_VARQUEDEPTH: 767 evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 768 evt_data_size = sizeof(fast_evt_data->un. 769 queue_depth_evt); 770 break; 771 default: 772 lpfc_free_fast_evt(phba, fast_evt_data); 773 return; 774 } 775 } else { 776 lpfc_free_fast_evt(phba, fast_evt_data); 777 return; 778 } 779 780 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 781 fc_host_post_vendor_event(shost, 782 fc_get_event_number(), 783 evt_data_size, 784 evt_data, 785 LPFC_NL_VENDOR_ID); 786 787 lpfc_free_fast_evt(phba, fast_evt_data); 788 return; 789 } 790 791 static void 792 lpfc_work_list_done(struct lpfc_hba *phba) 793 { 794 struct lpfc_work_evt *evtp = NULL; 795 struct lpfc_nodelist *ndlp; 796 int free_evt; 797 int fcf_inuse; 798 uint32_t nlp_did; 799 bool hba_pci_err; 800 801 spin_lock_irq(&phba->hbalock); 802 while (!list_empty(&phba->work_list)) { 803 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 804 evt_listp); 805 spin_unlock_irq(&phba->hbalock); 806 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 807 free_evt = 1; 808 switch (evtp->evt) { 809 case LPFC_EVT_ELS_RETRY: 810 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 811 if (!hba_pci_err) { 812 lpfc_els_retry_delay_handler(ndlp); 813 free_evt = 0; /* evt is part of ndlp */ 814 } 815 /* decrement the node reference count held 816 * for this queued work 817 */ 818 lpfc_nlp_put(ndlp); 819 break; 820 case LPFC_EVT_DEV_LOSS: 821 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 822 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 823 free_evt = 0; 824 /* decrement the node reference count held for 825 * this queued work 826 */ 827 nlp_did = ndlp->nlp_DID; 828 lpfc_nlp_put(ndlp); 829 if (phba->sli_rev == LPFC_SLI_REV4) 830 lpfc_sli4_post_dev_loss_tmo_handler(phba, 831 fcf_inuse, 832 nlp_did); 833 break; 834 case LPFC_EVT_RECOVER_PORT: 835 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 836 if (!hba_pci_err) { 837 lpfc_sli_abts_recover_port(ndlp->vport, ndlp); 838 free_evt = 0; 839 } 840 /* decrement the node reference count held for 841 * this queued work 842 */ 843 lpfc_nlp_put(ndlp); 844 break; 845 case LPFC_EVT_ONLINE: 846 if (phba->link_state < LPFC_LINK_DOWN) 847 *(int *) (evtp->evt_arg1) = lpfc_online(phba); 848 else 849 *(int *) (evtp->evt_arg1) = 0; 850 complete((struct completion *)(evtp->evt_arg2)); 851 break; 852 case LPFC_EVT_OFFLINE_PREP: 853 if (phba->link_state >= LPFC_LINK_DOWN) 854 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 855 *(int *)(evtp->evt_arg1) = 0; 856 complete((struct completion *)(evtp->evt_arg2)); 857 break; 858 case LPFC_EVT_OFFLINE: 859 lpfc_offline(phba); 860 lpfc_sli_brdrestart(phba); 861 *(int *)(evtp->evt_arg1) = 862 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); 863 lpfc_unblock_mgmt_io(phba); 864 complete((struct completion *)(evtp->evt_arg2)); 865 break; 866 case LPFC_EVT_WARM_START: 867 lpfc_offline(phba); 868 lpfc_reset_barrier(phba); 869 lpfc_sli_brdreset(phba); 870 lpfc_hba_down_post(phba); 871 *(int *)(evtp->evt_arg1) = 872 lpfc_sli_brdready(phba, HS_MBRDY); 873 lpfc_unblock_mgmt_io(phba); 874 complete((struct completion *)(evtp->evt_arg2)); 875 break; 876 case LPFC_EVT_KILL: 877 lpfc_offline(phba); 878 *(int *)(evtp->evt_arg1) 879 = (phba->pport->stopped) 880 ? 0 : lpfc_sli_brdkill(phba); 881 lpfc_unblock_mgmt_io(phba); 882 complete((struct completion *)(evtp->evt_arg2)); 883 break; 884 case LPFC_EVT_FASTPATH_MGMT_EVT: 885 lpfc_send_fastpath_evt(phba, evtp); 886 free_evt = 0; 887 break; 888 case LPFC_EVT_RESET_HBA: 889 if (!(phba->pport->load_flag & FC_UNLOADING)) 890 lpfc_reset_hba(phba); 891 break; 892 } 893 if (free_evt) 894 kfree(evtp); 895 spin_lock_irq(&phba->hbalock); 896 } 897 spin_unlock_irq(&phba->hbalock); 898 899 } 900 901 static void 902 lpfc_work_done(struct lpfc_hba *phba) 903 { 904 struct lpfc_sli_ring *pring; 905 uint32_t ha_copy, status, control, work_port_events; 906 struct lpfc_vport **vports; 907 struct lpfc_vport *vport; 908 int i; 909 bool hba_pci_err; 910 911 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 912 spin_lock_irq(&phba->hbalock); 913 ha_copy = phba->work_ha; 914 phba->work_ha = 0; 915 spin_unlock_irq(&phba->hbalock); 916 if (hba_pci_err) 917 ha_copy = 0; 918 919 /* First, try to post the next mailbox command to SLI4 device */ 920 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) 921 lpfc_sli4_post_async_mbox(phba); 922 923 if (ha_copy & HA_ERATT) { 924 /* Handle the error attention event */ 925 lpfc_handle_eratt(phba); 926 927 if (phba->fw_dump_cmpl) { 928 complete(phba->fw_dump_cmpl); 929 phba->fw_dump_cmpl = NULL; 930 } 931 } 932 933 if (ha_copy & HA_MBATT) 934 lpfc_sli_handle_mb_event(phba); 935 936 if (ha_copy & HA_LATT) 937 lpfc_handle_latt(phba); 938 939 /* Handle VMID Events */ 940 if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { 941 if (phba->pport->work_port_events & 942 WORKER_CHECK_VMID_ISSUE_QFPA) { 943 lpfc_check_vmid_qfpa_issue(phba); 944 phba->pport->work_port_events &= 945 ~WORKER_CHECK_VMID_ISSUE_QFPA; 946 } 947 if (phba->pport->work_port_events & 948 WORKER_CHECK_INACTIVE_VMID) { 949 lpfc_check_inactive_vmid(phba); 950 phba->pport->work_port_events &= 951 ~WORKER_CHECK_INACTIVE_VMID; 952 } 953 } 954 955 /* Process SLI4 events */ 956 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 957 if (phba->hba_flag & HBA_RRQ_ACTIVE) 958 lpfc_handle_rrq_active(phba); 959 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 960 lpfc_sli4_els_xri_abort_event_proc(phba); 961 if (phba->hba_flag & ASYNC_EVENT) 962 lpfc_sli4_async_event_proc(phba); 963 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { 964 spin_lock_irq(&phba->hbalock); 965 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; 966 spin_unlock_irq(&phba->hbalock); 967 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 968 } 969 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) 970 lpfc_sli4_fcf_redisc_event_proc(phba); 971 } 972 973 vports = lpfc_create_vport_work_array(phba); 974 if (vports != NULL) 975 for (i = 0; i <= phba->max_vports; i++) { 976 /* 977 * We could have no vports in array if unloading, so if 978 * this happens then just use the pport 979 */ 980 if (vports[i] == NULL && i == 0) 981 vport = phba->pport; 982 else 983 vport = vports[i]; 984 if (vport == NULL) 985 break; 986 spin_lock_irq(&vport->work_port_lock); 987 work_port_events = vport->work_port_events; 988 vport->work_port_events &= ~work_port_events; 989 spin_unlock_irq(&vport->work_port_lock); 990 if (hba_pci_err) 991 continue; 992 if (work_port_events & WORKER_DISC_TMO) 993 lpfc_disc_timeout_handler(vport); 994 if (work_port_events & WORKER_ELS_TMO) 995 lpfc_els_timeout_handler(vport); 996 if (work_port_events & WORKER_HB_TMO) 997 lpfc_hb_timeout_handler(phba); 998 if (work_port_events & WORKER_MBOX_TMO) 999 lpfc_mbox_timeout_handler(phba); 1000 if (work_port_events & WORKER_FABRIC_BLOCK_TMO) 1001 lpfc_unblock_fabric_iocbs(phba); 1002 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 1003 lpfc_ramp_down_queue_handler(phba); 1004 if (work_port_events & WORKER_DELAYED_DISC_TMO) 1005 lpfc_delayed_disc_timeout_handler(vport); 1006 } 1007 lpfc_destroy_vport_work_array(phba, vports); 1008 1009 pring = lpfc_phba_elsring(phba); 1010 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 1011 status >>= (4*LPFC_ELS_RING); 1012 if (pring && (status & HA_RXMASK || 1013 pring->flag & LPFC_DEFERRED_RING_EVENT || 1014 phba->hba_flag & HBA_SP_QUEUE_EVT)) { 1015 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 1016 pring->flag |= LPFC_DEFERRED_RING_EVENT; 1017 /* Preserve legacy behavior. */ 1018 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) 1019 set_bit(LPFC_DATA_READY, &phba->data_flags); 1020 } else { 1021 /* Driver could have abort request completed in queue 1022 * when link goes down. Allow for this transition. 1023 */ 1024 if (phba->link_state >= LPFC_LINK_DOWN || 1025 phba->link_flag & LS_MDS_LOOPBACK) { 1026 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 1027 lpfc_sli_handle_slow_ring_event(phba, pring, 1028 (status & 1029 HA_RXMASK)); 1030 } 1031 } 1032 if (phba->sli_rev == LPFC_SLI_REV4) 1033 lpfc_drain_txq(phba); 1034 /* 1035 * Turn on Ring interrupts 1036 */ 1037 if (phba->sli_rev <= LPFC_SLI_REV3) { 1038 spin_lock_irq(&phba->hbalock); 1039 control = readl(phba->HCregaddr); 1040 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 1041 lpfc_debugfs_slow_ring_trc(phba, 1042 "WRK Enable ring: cntl:x%x hacopy:x%x", 1043 control, ha_copy, 0); 1044 1045 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 1046 writel(control, phba->HCregaddr); 1047 readl(phba->HCregaddr); /* flush */ 1048 } else { 1049 lpfc_debugfs_slow_ring_trc(phba, 1050 "WRK Ring ok: cntl:x%x hacopy:x%x", 1051 control, ha_copy, 0); 1052 } 1053 spin_unlock_irq(&phba->hbalock); 1054 } 1055 } 1056 lpfc_work_list_done(phba); 1057 } 1058 1059 int 1060 lpfc_do_work(void *p) 1061 { 1062 struct lpfc_hba *phba = p; 1063 int rc; 1064 1065 set_user_nice(current, MIN_NICE); 1066 current->flags |= PF_NOFREEZE; 1067 phba->data_flags = 0; 1068 1069 while (!kthread_should_stop()) { 1070 /* wait and check worker queue activities */ 1071 rc = wait_event_interruptible(phba->work_waitq, 1072 (test_and_clear_bit(LPFC_DATA_READY, 1073 &phba->data_flags) 1074 || kthread_should_stop())); 1075 /* Signal wakeup shall terminate the worker thread */ 1076 if (rc) { 1077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1078 "0433 Wakeup on signal: rc=x%x\n", rc); 1079 break; 1080 } 1081 1082 /* Attend pending lpfc data processing */ 1083 lpfc_work_done(phba); 1084 } 1085 phba->worker_thread = NULL; 1086 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1087 "0432 Worker thread stopped.\n"); 1088 return 0; 1089 } 1090 1091 /* 1092 * This is only called to handle FC worker events. Since this a rare 1093 * occurrence, we allocate a struct lpfc_work_evt structure here instead of 1094 * embedding it in the IOCB. 1095 */ 1096 int 1097 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, 1098 uint32_t evt) 1099 { 1100 struct lpfc_work_evt *evtp; 1101 unsigned long flags; 1102 1103 /* 1104 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 1105 * be queued to worker thread for processing 1106 */ 1107 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); 1108 if (!evtp) 1109 return 0; 1110 1111 evtp->evt_arg1 = arg1; 1112 evtp->evt_arg2 = arg2; 1113 evtp->evt = evt; 1114 1115 spin_lock_irqsave(&phba->hbalock, flags); 1116 list_add_tail(&evtp->evt_listp, &phba->work_list); 1117 spin_unlock_irqrestore(&phba->hbalock, flags); 1118 1119 lpfc_worker_wake_up(phba); 1120 1121 return 1; 1122 } 1123 1124 void 1125 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 1126 { 1127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1128 struct lpfc_hba *phba = vport->phba; 1129 struct lpfc_nodelist *ndlp, *next_ndlp; 1130 1131 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1132 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 1133 /* It's possible the FLOGI to the fabric node never 1134 * successfully completed and never registered with the 1135 * transport. In this case there is no way to clean up 1136 * the node. 1137 */ 1138 if (ndlp->nlp_DID == Fabric_DID) { 1139 if (ndlp->nlp_prev_state == 1140 NLP_STE_UNUSED_NODE && 1141 !ndlp->fc4_xpt_flags) 1142 lpfc_nlp_put(ndlp); 1143 } 1144 continue; 1145 } 1146 1147 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 1148 ((vport->port_type == LPFC_NPIV_PORT) && 1149 ((ndlp->nlp_DID == NameServer_DID) || 1150 (ndlp->nlp_DID == FDMI_DID) || 1151 (ndlp->nlp_DID == Fabric_Cntl_DID)))) 1152 lpfc_unreg_rpi(vport, ndlp); 1153 1154 /* Leave Fabric nodes alone on link down */ 1155 if ((phba->sli_rev < LPFC_SLI_REV4) && 1156 (!remove && ndlp->nlp_type & NLP_FABRIC)) 1157 continue; 1158 1159 /* Notify transport of connectivity loss to trigger cleanup. */ 1160 if (phba->nvmet_support && 1161 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 1162 lpfc_nvmet_invalidate_host(phba, ndlp); 1163 1164 lpfc_disc_state_machine(vport, ndlp, NULL, 1165 remove 1166 ? NLP_EVT_DEVICE_RM 1167 : NLP_EVT_DEVICE_RECOVERY); 1168 } 1169 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 1170 if (phba->sli_rev == LPFC_SLI_REV4) 1171 lpfc_sli4_unreg_all_rpis(vport); 1172 lpfc_mbx_unreg_vpi(vport); 1173 spin_lock_irq(shost->host_lock); 1174 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1175 spin_unlock_irq(shost->host_lock); 1176 } 1177 } 1178 1179 void 1180 lpfc_port_link_failure(struct lpfc_vport *vport) 1181 { 1182 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 1183 1184 /* Cleanup any outstanding received buffers */ 1185 lpfc_cleanup_rcv_buffers(vport); 1186 1187 /* Cleanup any outstanding RSCN activity */ 1188 lpfc_els_flush_rscn(vport); 1189 1190 /* Cleanup any outstanding ELS commands */ 1191 lpfc_els_flush_cmd(vport); 1192 1193 lpfc_cleanup_rpis(vport, 0); 1194 1195 /* Turn off discovery timer if its running */ 1196 lpfc_can_disctmo(vport); 1197 } 1198 1199 void 1200 lpfc_linkdown_port(struct lpfc_vport *vport) 1201 { 1202 struct lpfc_hba *phba = vport->phba; 1203 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1204 1205 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 1206 fc_host_post_event(shost, fc_get_event_number(), 1207 FCH_EVT_LINKDOWN, 0); 1208 1209 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1210 "Link Down: state:x%x rtry:x%x flg:x%x", 1211 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 1212 1213 lpfc_port_link_failure(vport); 1214 1215 /* Stop delayed Nport discovery */ 1216 spin_lock_irq(shost->host_lock); 1217 vport->fc_flag &= ~FC_DISC_DELAYED; 1218 spin_unlock_irq(shost->host_lock); 1219 del_timer_sync(&vport->delayed_disc_tmo); 1220 1221 if (phba->sli_rev == LPFC_SLI_REV4 && 1222 vport->port_type == LPFC_PHYSICAL_PORT && 1223 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { 1224 /* Assume success on link up */ 1225 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 1226 } 1227 } 1228 1229 int 1230 lpfc_linkdown(struct lpfc_hba *phba) 1231 { 1232 struct lpfc_vport *vport = phba->pport; 1233 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1234 struct lpfc_vport **vports; 1235 LPFC_MBOXQ_t *mb; 1236 int i; 1237 int offline; 1238 1239 if (phba->link_state == LPFC_LINK_DOWN) 1240 return 0; 1241 1242 /* Block all SCSI stack I/Os */ 1243 lpfc_scsi_dev_block(phba); 1244 offline = pci_channel_offline(phba->pcidev); 1245 1246 phba->defer_flogi_acc_flag = false; 1247 1248 /* Clear external loopback plug detected flag */ 1249 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1250 1251 spin_lock_irq(&phba->hbalock); 1252 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1253 spin_unlock_irq(&phba->hbalock); 1254 if (phba->link_state > LPFC_LINK_DOWN) { 1255 phba->link_state = LPFC_LINK_DOWN; 1256 if (phba->sli4_hba.conf_trunk) { 1257 phba->trunk_link.link0.state = 0; 1258 phba->trunk_link.link1.state = 0; 1259 phba->trunk_link.link2.state = 0; 1260 phba->trunk_link.link3.state = 0; 1261 phba->trunk_link.phy_lnk_speed = 1262 LPFC_LINK_SPEED_UNKNOWN; 1263 phba->sli4_hba.link_state.logical_speed = 1264 LPFC_LINK_SPEED_UNKNOWN; 1265 } 1266 spin_lock_irq(shost->host_lock); 1267 phba->pport->fc_flag &= ~FC_LBIT; 1268 spin_unlock_irq(shost->host_lock); 1269 } 1270 vports = lpfc_create_vport_work_array(phba); 1271 if (vports != NULL) { 1272 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1273 /* Issue a LINK DOWN event to all nodes */ 1274 lpfc_linkdown_port(vports[i]); 1275 1276 vports[i]->fc_myDID = 0; 1277 1278 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 1279 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 1280 if (phba->nvmet_support) 1281 lpfc_nvmet_update_targetport(phba); 1282 else 1283 lpfc_nvme_update_localport(vports[i]); 1284 } 1285 } 1286 } 1287 lpfc_destroy_vport_work_array(phba, vports); 1288 1289 /* Clean up any SLI3 firmware default rpi's */ 1290 if (phba->sli_rev > LPFC_SLI_REV3 || offline) 1291 goto skip_unreg_did; 1292 1293 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1294 if (mb) { 1295 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); 1296 mb->vport = vport; 1297 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1298 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 1299 == MBX_NOT_FINISHED) { 1300 mempool_free(mb, phba->mbox_mem_pool); 1301 } 1302 } 1303 1304 skip_unreg_did: 1305 /* Setup myDID for link up if we are in pt2pt mode */ 1306 if (phba->pport->fc_flag & FC_PT2PT) { 1307 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1308 if (mb) { 1309 lpfc_config_link(phba, mb); 1310 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1311 mb->vport = vport; 1312 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 1313 == MBX_NOT_FINISHED) { 1314 mempool_free(mb, phba->mbox_mem_pool); 1315 } 1316 } 1317 spin_lock_irq(shost->host_lock); 1318 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 1319 phba->pport->rcv_flogi_cnt = 0; 1320 spin_unlock_irq(shost->host_lock); 1321 } 1322 return 0; 1323 } 1324 1325 static void 1326 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) 1327 { 1328 struct lpfc_nodelist *ndlp; 1329 1330 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1331 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); 1332 1333 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1334 continue; 1335 if (ndlp->nlp_type & NLP_FABRIC) { 1336 /* On Linkup its safe to clean up the ndlp 1337 * from Fabric connections. 1338 */ 1339 if (ndlp->nlp_DID != Fabric_DID) 1340 lpfc_unreg_rpi(vport, ndlp); 1341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1342 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 1343 /* Fail outstanding IO now since device is 1344 * marked for PLOGI. 1345 */ 1346 lpfc_unreg_rpi(vport, ndlp); 1347 } 1348 } 1349 } 1350 1351 static void 1352 lpfc_linkup_port(struct lpfc_vport *vport) 1353 { 1354 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1355 struct lpfc_hba *phba = vport->phba; 1356 1357 if ((vport->load_flag & FC_UNLOADING) != 0) 1358 return; 1359 1360 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1361 "Link Up: top:x%x speed:x%x flg:x%x", 1362 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); 1363 1364 /* If NPIV is not enabled, only bring the physical port up */ 1365 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1366 (vport != phba->pport)) 1367 return; 1368 1369 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 1370 fc_host_post_event(shost, fc_get_event_number(), 1371 FCH_EVT_LINKUP, 0); 1372 1373 spin_lock_irq(shost->host_lock); 1374 if (phba->defer_flogi_acc_flag) 1375 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE | 1376 FC_NLP_MORE | FC_RSCN_DISCOVERY); 1377 else 1378 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | 1379 FC_ABORT_DISCOVERY | FC_RSCN_MODE | 1380 FC_NLP_MORE | FC_RSCN_DISCOVERY); 1381 vport->fc_flag |= FC_NDISC_ACTIVE; 1382 vport->fc_ns_retry = 0; 1383 spin_unlock_irq(shost->host_lock); 1384 lpfc_setup_fdmi_mask(vport); 1385 1386 lpfc_linkup_cleanup_nodes(vport); 1387 } 1388 1389 static int 1390 lpfc_linkup(struct lpfc_hba *phba) 1391 { 1392 struct lpfc_vport **vports; 1393 int i; 1394 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 1395 1396 phba->link_state = LPFC_LINK_UP; 1397 1398 /* Unblock fabric iocbs if they are blocked */ 1399 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1400 del_timer_sync(&phba->fabric_block_timer); 1401 1402 vports = lpfc_create_vport_work_array(phba); 1403 if (vports != NULL) 1404 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1405 lpfc_linkup_port(vports[i]); 1406 lpfc_destroy_vport_work_array(phba, vports); 1407 1408 /* Clear the pport flogi counter in case the link down was 1409 * absorbed without an ACQE. No lock here - in worker thread 1410 * and discovery is synchronized. 1411 */ 1412 spin_lock_irq(shost->host_lock); 1413 phba->pport->rcv_flogi_cnt = 0; 1414 spin_unlock_irq(shost->host_lock); 1415 1416 /* reinitialize initial HBA flag */ 1417 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); 1418 1419 return 0; 1420 } 1421 1422 /* 1423 * This routine handles processing a CLEAR_LA mailbox 1424 * command upon completion. It is setup in the LPFC_MBOXQ 1425 * as the completion routine when the command is 1426 * handed off to the SLI layer. SLI3 only. 1427 */ 1428 static void 1429 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1430 { 1431 struct lpfc_vport *vport = pmb->vport; 1432 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1433 struct lpfc_sli *psli = &phba->sli; 1434 MAILBOX_t *mb = &pmb->u.mb; 1435 uint32_t control; 1436 1437 /* Since we don't do discovery right now, turn these off here */ 1438 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1439 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1440 1441 /* Check for error */ 1442 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 1443 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 1444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1445 "0320 CLEAR_LA mbxStatus error x%x hba " 1446 "state x%x\n", 1447 mb->mbxStatus, vport->port_state); 1448 phba->link_state = LPFC_HBA_ERROR; 1449 goto out; 1450 } 1451 1452 if (vport->port_type == LPFC_PHYSICAL_PORT) 1453 phba->link_state = LPFC_HBA_READY; 1454 1455 spin_lock_irq(&phba->hbalock); 1456 psli->sli_flag |= LPFC_PROCESS_LA; 1457 control = readl(phba->HCregaddr); 1458 control |= HC_LAINT_ENA; 1459 writel(control, phba->HCregaddr); 1460 readl(phba->HCregaddr); /* flush */ 1461 spin_unlock_irq(&phba->hbalock); 1462 mempool_free(pmb, phba->mbox_mem_pool); 1463 return; 1464 1465 out: 1466 /* Device Discovery completes */ 1467 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1468 "0225 Device Discovery completes\n"); 1469 mempool_free(pmb, phba->mbox_mem_pool); 1470 1471 spin_lock_irq(shost->host_lock); 1472 vport->fc_flag &= ~FC_ABORT_DISCOVERY; 1473 spin_unlock_irq(shost->host_lock); 1474 1475 lpfc_can_disctmo(vport); 1476 1477 /* turn on Link Attention interrupts */ 1478 1479 spin_lock_irq(&phba->hbalock); 1480 psli->sli_flag |= LPFC_PROCESS_LA; 1481 control = readl(phba->HCregaddr); 1482 control |= HC_LAINT_ENA; 1483 writel(control, phba->HCregaddr); 1484 readl(phba->HCregaddr); /* flush */ 1485 spin_unlock_irq(&phba->hbalock); 1486 1487 return; 1488 } 1489 1490 void 1491 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1492 { 1493 struct lpfc_vport *vport = pmb->vport; 1494 LPFC_MBOXQ_t *sparam_mb; 1495 u16 status = pmb->u.mb.mbxStatus; 1496 int rc; 1497 1498 mempool_free(pmb, phba->mbox_mem_pool); 1499 1500 if (status) 1501 goto out; 1502 1503 /* don't perform discovery for SLI4 loopback diagnostic test */ 1504 if ((phba->sli_rev == LPFC_SLI_REV4) && 1505 !(phba->hba_flag & HBA_FCOE_MODE) && 1506 (phba->link_flag & LS_LOOPBACK_MODE)) 1507 return; 1508 1509 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 1510 vport->fc_flag & FC_PUBLIC_LOOP && 1511 !(vport->fc_flag & FC_LBIT)) { 1512 /* Need to wait for FAN - use discovery timer 1513 * for timeout. port_state is identically 1514 * LPFC_LOCAL_CFG_LINK while waiting for FAN 1515 */ 1516 lpfc_set_disctmo(vport); 1517 return; 1518 } 1519 1520 /* Start discovery by sending a FLOGI. port_state is identically 1521 * LPFC_FLOGI while waiting for FLOGI cmpl. 1522 */ 1523 if (vport->port_state != LPFC_FLOGI) { 1524 /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if 1525 * bb-credit recovery is in place. 1526 */ 1527 if (phba->bbcredit_support && phba->cfg_enable_bbcr && 1528 !(phba->link_flag & LS_LOOPBACK_MODE)) { 1529 sparam_mb = mempool_alloc(phba->mbox_mem_pool, 1530 GFP_KERNEL); 1531 if (!sparam_mb) 1532 goto sparam_out; 1533 1534 rc = lpfc_read_sparam(phba, sparam_mb, 0); 1535 if (rc) { 1536 mempool_free(sparam_mb, phba->mbox_mem_pool); 1537 goto sparam_out; 1538 } 1539 sparam_mb->vport = vport; 1540 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1541 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); 1542 if (rc == MBX_NOT_FINISHED) { 1543 lpfc_mbox_rsrc_cleanup(phba, sparam_mb, 1544 MBOX_THD_UNLOCKED); 1545 goto sparam_out; 1546 } 1547 1548 phba->hba_flag |= HBA_DEFER_FLOGI; 1549 } else { 1550 lpfc_initial_flogi(vport); 1551 } 1552 } else { 1553 if (vport->fc_flag & FC_PT2PT) 1554 lpfc_disc_start(vport); 1555 } 1556 return; 1557 1558 out: 1559 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1560 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n", 1561 status, vport->port_state); 1562 1563 sparam_out: 1564 lpfc_linkdown(phba); 1565 1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1567 "0200 CONFIG_LINK bad hba state x%x\n", 1568 vport->port_state); 1569 1570 lpfc_issue_clear_la(phba, vport); 1571 return; 1572 } 1573 1574 /** 1575 * lpfc_sli4_clear_fcf_rr_bmask 1576 * @phba: pointer to the struct lpfc_hba for this port. 1577 * This fucnction resets the round robin bit mask and clears the 1578 * fcf priority list. The list deletions are done while holding the 1579 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared 1580 * from the lpfc_fcf_pri record. 1581 **/ 1582 void 1583 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) 1584 { 1585 struct lpfc_fcf_pri *fcf_pri; 1586 struct lpfc_fcf_pri *next_fcf_pri; 1587 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 1588 spin_lock_irq(&phba->hbalock); 1589 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 1590 &phba->fcf.fcf_pri_list, list) { 1591 list_del_init(&fcf_pri->list); 1592 fcf_pri->fcf_rec.flag = 0; 1593 } 1594 spin_unlock_irq(&phba->hbalock); 1595 } 1596 static void 1597 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1598 { 1599 struct lpfc_vport *vport = mboxq->vport; 1600 1601 if (mboxq->u.mb.mbxStatus) { 1602 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1603 "2017 REG_FCFI mbxStatus error x%x " 1604 "HBA state x%x\n", mboxq->u.mb.mbxStatus, 1605 vport->port_state); 1606 goto fail_out; 1607 } 1608 1609 /* Start FCoE discovery by sending a FLOGI. */ 1610 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1611 /* Set the FCFI registered flag */ 1612 spin_lock_irq(&phba->hbalock); 1613 phba->fcf.fcf_flag |= FCF_REGISTERED; 1614 spin_unlock_irq(&phba->hbalock); 1615 1616 /* If there is a pending FCoE event, restart FCF table scan. */ 1617 if ((!(phba->hba_flag & FCF_RR_INPROG)) && 1618 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1619 goto fail_out; 1620 1621 /* Mark successful completion of FCF table scan */ 1622 spin_lock_irq(&phba->hbalock); 1623 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1624 phba->hba_flag &= ~FCF_TS_INPROG; 1625 if (vport->port_state != LPFC_FLOGI) { 1626 phba->hba_flag |= FCF_RR_INPROG; 1627 spin_unlock_irq(&phba->hbalock); 1628 lpfc_issue_init_vfi(vport); 1629 goto out; 1630 } 1631 spin_unlock_irq(&phba->hbalock); 1632 goto out; 1633 1634 fail_out: 1635 spin_lock_irq(&phba->hbalock); 1636 phba->hba_flag &= ~FCF_RR_INPROG; 1637 spin_unlock_irq(&phba->hbalock); 1638 out: 1639 mempool_free(mboxq, phba->mbox_mem_pool); 1640 } 1641 1642 /** 1643 * lpfc_fab_name_match - Check if the fcf fabric name match. 1644 * @fab_name: pointer to fabric name. 1645 * @new_fcf_record: pointer to fcf record. 1646 * 1647 * This routine compare the fcf record's fabric name with provided 1648 * fabric name. If the fabric name are identical this function 1649 * returns 1 else return 0. 1650 **/ 1651 static uint32_t 1652 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1653 { 1654 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) 1655 return 0; 1656 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) 1657 return 0; 1658 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) 1659 return 0; 1660 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) 1661 return 0; 1662 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) 1663 return 0; 1664 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) 1665 return 0; 1666 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) 1667 return 0; 1668 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) 1669 return 0; 1670 return 1; 1671 } 1672 1673 /** 1674 * lpfc_sw_name_match - Check if the fcf switch name match. 1675 * @sw_name: pointer to switch name. 1676 * @new_fcf_record: pointer to fcf record. 1677 * 1678 * This routine compare the fcf record's switch name with provided 1679 * switch name. If the switch name are identical this function 1680 * returns 1 else return 0. 1681 **/ 1682 static uint32_t 1683 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1684 { 1685 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) 1686 return 0; 1687 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) 1688 return 0; 1689 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) 1690 return 0; 1691 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) 1692 return 0; 1693 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) 1694 return 0; 1695 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) 1696 return 0; 1697 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) 1698 return 0; 1699 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) 1700 return 0; 1701 return 1; 1702 } 1703 1704 /** 1705 * lpfc_mac_addr_match - Check if the fcf mac address match. 1706 * @mac_addr: pointer to mac address. 1707 * @new_fcf_record: pointer to fcf record. 1708 * 1709 * This routine compare the fcf record's mac address with HBA's 1710 * FCF mac address. If the mac addresses are identical this function 1711 * returns 1 else return 0. 1712 **/ 1713 static uint32_t 1714 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) 1715 { 1716 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) 1717 return 0; 1718 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) 1719 return 0; 1720 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) 1721 return 0; 1722 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) 1723 return 0; 1724 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) 1725 return 0; 1726 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) 1727 return 0; 1728 return 1; 1729 } 1730 1731 static bool 1732 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) 1733 { 1734 return (curr_vlan_id == new_vlan_id); 1735 } 1736 1737 /** 1738 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. 1739 * @phba: pointer to lpfc hba data structure. 1740 * @fcf_index: Index for the lpfc_fcf_record. 1741 * @new_fcf_record: pointer to hba fcf record. 1742 * 1743 * This routine updates the driver FCF priority record from the new HBA FCF 1744 * record. The hbalock is asserted held in the code path calling this 1745 * routine. 1746 **/ 1747 static void 1748 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, 1749 struct fcf_record *new_fcf_record 1750 ) 1751 { 1752 struct lpfc_fcf_pri *fcf_pri; 1753 1754 fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1755 fcf_pri->fcf_rec.fcf_index = fcf_index; 1756 /* FCF record priority */ 1757 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1758 1759 } 1760 1761 /** 1762 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1763 * @fcf_rec: pointer to driver fcf record. 1764 * @new_fcf_record: pointer to fcf record. 1765 * 1766 * This routine copies the FCF information from the FCF 1767 * record to lpfc_hba data structure. 1768 **/ 1769 static void 1770 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, 1771 struct fcf_record *new_fcf_record) 1772 { 1773 /* Fabric name */ 1774 fcf_rec->fabric_name[0] = 1775 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1776 fcf_rec->fabric_name[1] = 1777 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1778 fcf_rec->fabric_name[2] = 1779 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1780 fcf_rec->fabric_name[3] = 1781 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1782 fcf_rec->fabric_name[4] = 1783 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1784 fcf_rec->fabric_name[5] = 1785 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1786 fcf_rec->fabric_name[6] = 1787 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1788 fcf_rec->fabric_name[7] = 1789 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1790 /* Mac address */ 1791 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1792 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1793 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1794 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1795 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1796 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1797 /* FCF record index */ 1798 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1799 /* FCF record priority */ 1800 fcf_rec->priority = new_fcf_record->fip_priority; 1801 /* Switch name */ 1802 fcf_rec->switch_name[0] = 1803 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1804 fcf_rec->switch_name[1] = 1805 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1806 fcf_rec->switch_name[2] = 1807 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1808 fcf_rec->switch_name[3] = 1809 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1810 fcf_rec->switch_name[4] = 1811 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1812 fcf_rec->switch_name[5] = 1813 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1814 fcf_rec->switch_name[6] = 1815 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1816 fcf_rec->switch_name[7] = 1817 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1818 } 1819 1820 /** 1821 * __lpfc_update_fcf_record - Update driver fcf record 1822 * @phba: pointer to lpfc hba data structure. 1823 * @fcf_rec: pointer to driver fcf record. 1824 * @new_fcf_record: pointer to hba fcf record. 1825 * @addr_mode: address mode to be set to the driver fcf record. 1826 * @vlan_id: vlan tag to be set to the driver fcf record. 1827 * @flag: flag bits to be set to the driver fcf record. 1828 * 1829 * This routine updates the driver FCF record from the new HBA FCF record 1830 * together with the address mode, vlan_id, and other informations. This 1831 * routine is called with the hbalock held. 1832 **/ 1833 static void 1834 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, 1835 struct fcf_record *new_fcf_record, uint32_t addr_mode, 1836 uint16_t vlan_id, uint32_t flag) 1837 { 1838 lockdep_assert_held(&phba->hbalock); 1839 1840 /* Copy the fields from the HBA's FCF record */ 1841 lpfc_copy_fcf_record(fcf_rec, new_fcf_record); 1842 /* Update other fields of driver FCF record */ 1843 fcf_rec->addr_mode = addr_mode; 1844 fcf_rec->vlan_id = vlan_id; 1845 fcf_rec->flag |= (flag | RECORD_VALID); 1846 __lpfc_update_fcf_record_pri(phba, 1847 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), 1848 new_fcf_record); 1849 } 1850 1851 /** 1852 * lpfc_register_fcf - Register the FCF with hba. 1853 * @phba: pointer to lpfc hba data structure. 1854 * 1855 * This routine issues a register fcfi mailbox command to register 1856 * the fcf with HBA. 1857 **/ 1858 static void 1859 lpfc_register_fcf(struct lpfc_hba *phba) 1860 { 1861 LPFC_MBOXQ_t *fcf_mbxq; 1862 int rc; 1863 1864 spin_lock_irq(&phba->hbalock); 1865 /* If the FCF is not available do nothing. */ 1866 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1867 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1868 spin_unlock_irq(&phba->hbalock); 1869 return; 1870 } 1871 1872 /* The FCF is already registered, start discovery */ 1873 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1874 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1875 phba->hba_flag &= ~FCF_TS_INPROG; 1876 if (phba->pport->port_state != LPFC_FLOGI && 1877 phba->pport->fc_flag & FC_FABRIC) { 1878 phba->hba_flag |= FCF_RR_INPROG; 1879 spin_unlock_irq(&phba->hbalock); 1880 lpfc_initial_flogi(phba->pport); 1881 return; 1882 } 1883 spin_unlock_irq(&phba->hbalock); 1884 return; 1885 } 1886 spin_unlock_irq(&phba->hbalock); 1887 1888 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1889 if (!fcf_mbxq) { 1890 spin_lock_irq(&phba->hbalock); 1891 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1892 spin_unlock_irq(&phba->hbalock); 1893 return; 1894 } 1895 1896 lpfc_reg_fcfi(phba, fcf_mbxq); 1897 fcf_mbxq->vport = phba->pport; 1898 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1899 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1900 if (rc == MBX_NOT_FINISHED) { 1901 spin_lock_irq(&phba->hbalock); 1902 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1903 spin_unlock_irq(&phba->hbalock); 1904 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1905 } 1906 1907 return; 1908 } 1909 1910 /** 1911 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. 1912 * @phba: pointer to lpfc hba data structure. 1913 * @new_fcf_record: pointer to fcf record. 1914 * @boot_flag: Indicates if this record used by boot bios. 1915 * @addr_mode: The address mode to be used by this FCF 1916 * @vlan_id: The vlan id to be used as vlan tagging by this FCF. 1917 * 1918 * This routine compare the fcf record with connect list obtained from the 1919 * config region to decide if this FCF can be used for SAN discovery. It returns 1920 * 1 if this record can be used for SAN discovery else return zero. If this FCF 1921 * record can be used for SAN discovery, the boot_flag will indicate if this FCF 1922 * is used by boot bios and addr_mode will indicate the addressing mode to be 1923 * used for this FCF when the function returns. 1924 * If the FCF record need to be used with a particular vlan id, the vlan is 1925 * set in the vlan_id on return of the function. If not VLAN tagging need to 1926 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; 1927 **/ 1928 static int 1929 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1930 struct fcf_record *new_fcf_record, 1931 uint32_t *boot_flag, uint32_t *addr_mode, 1932 uint16_t *vlan_id) 1933 { 1934 struct lpfc_fcf_conn_entry *conn_entry; 1935 int i, j, fcf_vlan_id = 0; 1936 1937 /* Find the lowest VLAN id in the FCF record */ 1938 for (i = 0; i < 512; i++) { 1939 if (new_fcf_record->vlan_bitmap[i]) { 1940 fcf_vlan_id = i * 8; 1941 j = 0; 1942 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { 1943 j++; 1944 fcf_vlan_id++; 1945 } 1946 break; 1947 } 1948 } 1949 1950 /* FCF not valid/available or solicitation in progress */ 1951 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1952 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || 1953 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) 1954 return 0; 1955 1956 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1957 *boot_flag = 0; 1958 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1959 new_fcf_record); 1960 if (phba->valid_vlan) 1961 *vlan_id = phba->vlan_id; 1962 else 1963 *vlan_id = LPFC_FCOE_NULL_VID; 1964 return 1; 1965 } 1966 1967 /* 1968 * If there are no FCF connection table entry, driver connect to all 1969 * FCFs. 1970 */ 1971 if (list_empty(&phba->fcf_conn_rec_list)) { 1972 *boot_flag = 0; 1973 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1974 new_fcf_record); 1975 1976 /* 1977 * When there are no FCF connect entries, use driver's default 1978 * addressing mode - FPMA. 1979 */ 1980 if (*addr_mode & LPFC_FCF_FPMA) 1981 *addr_mode = LPFC_FCF_FPMA; 1982 1983 /* If FCF record report a vlan id use that vlan id */ 1984 if (fcf_vlan_id) 1985 *vlan_id = fcf_vlan_id; 1986 else 1987 *vlan_id = LPFC_FCOE_NULL_VID; 1988 return 1; 1989 } 1990 1991 list_for_each_entry(conn_entry, 1992 &phba->fcf_conn_rec_list, list) { 1993 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1994 continue; 1995 1996 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1997 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1998 new_fcf_record)) 1999 continue; 2000 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && 2001 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, 2002 new_fcf_record)) 2003 continue; 2004 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 2005 /* 2006 * If the vlan bit map does not have the bit set for the 2007 * vlan id to be used, then it is not a match. 2008 */ 2009 if (!(new_fcf_record->vlan_bitmap 2010 [conn_entry->conn_rec.vlan_tag / 8] & 2011 (1 << (conn_entry->conn_rec.vlan_tag % 8)))) 2012 continue; 2013 } 2014 2015 /* 2016 * If connection record does not support any addressing mode, 2017 * skip the FCF record. 2018 */ 2019 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) 2020 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) 2021 continue; 2022 2023 /* 2024 * Check if the connection record specifies a required 2025 * addressing mode. 2026 */ 2027 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2028 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { 2029 2030 /* 2031 * If SPMA required but FCF not support this continue. 2032 */ 2033 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2034 !(bf_get(lpfc_fcf_record_mac_addr_prov, 2035 new_fcf_record) & LPFC_FCF_SPMA)) 2036 continue; 2037 2038 /* 2039 * If FPMA required but FCF not support this continue. 2040 */ 2041 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2042 !(bf_get(lpfc_fcf_record_mac_addr_prov, 2043 new_fcf_record) & LPFC_FCF_FPMA)) 2044 continue; 2045 } 2046 2047 /* 2048 * This fcf record matches filtering criteria. 2049 */ 2050 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) 2051 *boot_flag = 1; 2052 else 2053 *boot_flag = 0; 2054 2055 /* 2056 * If user did not specify any addressing mode, or if the 2057 * preferred addressing mode specified by user is not supported 2058 * by FCF, allow fabric to pick the addressing mode. 2059 */ 2060 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 2061 new_fcf_record); 2062 /* 2063 * If the user specified a required address mode, assign that 2064 * address mode 2065 */ 2066 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2067 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) 2068 *addr_mode = (conn_entry->conn_rec.flags & 2069 FCFCNCT_AM_SPMA) ? 2070 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 2071 /* 2072 * If the user specified a preferred address mode, use the 2073 * addr mode only if FCF support the addr_mode. 2074 */ 2075 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2076 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 2077 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2078 (*addr_mode & LPFC_FCF_SPMA)) 2079 *addr_mode = LPFC_FCF_SPMA; 2080 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2081 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 2082 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2083 (*addr_mode & LPFC_FCF_FPMA)) 2084 *addr_mode = LPFC_FCF_FPMA; 2085 2086 /* If matching connect list has a vlan id, use it */ 2087 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 2088 *vlan_id = conn_entry->conn_rec.vlan_tag; 2089 /* 2090 * If no vlan id is specified in connect list, use the vlan id 2091 * in the FCF record 2092 */ 2093 else if (fcf_vlan_id) 2094 *vlan_id = fcf_vlan_id; 2095 else 2096 *vlan_id = LPFC_FCOE_NULL_VID; 2097 2098 return 1; 2099 } 2100 2101 return 0; 2102 } 2103 2104 /** 2105 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. 2106 * @phba: pointer to lpfc hba data structure. 2107 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. 2108 * 2109 * This function check if there is any fcoe event pending while driver 2110 * scan FCF entries. If there is any pending event, it will restart the 2111 * FCF saning and return 1 else return 0. 2112 */ 2113 int 2114 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 2115 { 2116 /* 2117 * If the Link is up and no FCoE events while in the 2118 * FCF discovery, no need to restart FCF discovery. 2119 */ 2120 if ((phba->link_state >= LPFC_LINK_UP) && 2121 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 2122 return 0; 2123 2124 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2125 "2768 Pending link or FCF event during current " 2126 "handling of the previous event: link_state:x%x, " 2127 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", 2128 phba->link_state, phba->fcoe_eventtag_at_fcf_scan, 2129 phba->fcoe_eventtag); 2130 2131 spin_lock_irq(&phba->hbalock); 2132 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 2133 spin_unlock_irq(&phba->hbalock); 2134 2135 if (phba->link_state >= LPFC_LINK_UP) { 2136 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2137 "2780 Restart FCF table scan due to " 2138 "pending FCF event:evt_tag_at_scan:x%x, " 2139 "evt_tag_current:x%x\n", 2140 phba->fcoe_eventtag_at_fcf_scan, 2141 phba->fcoe_eventtag); 2142 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 2143 } else { 2144 /* 2145 * Do not continue FCF discovery and clear FCF_TS_INPROG 2146 * flag 2147 */ 2148 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2149 "2833 Stop FCF discovery process due to link " 2150 "state change (x%x)\n", phba->link_state); 2151 spin_lock_irq(&phba->hbalock); 2152 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 2153 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 2154 spin_unlock_irq(&phba->hbalock); 2155 } 2156 2157 /* Unregister the currently registered FCF if required */ 2158 if (unreg_fcf) { 2159 spin_lock_irq(&phba->hbalock); 2160 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 2161 spin_unlock_irq(&phba->hbalock); 2162 lpfc_sli4_unregister_fcf(phba); 2163 } 2164 return 1; 2165 } 2166 2167 /** 2168 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record 2169 * @phba: pointer to lpfc hba data structure. 2170 * @fcf_cnt: number of eligible fcf record seen so far. 2171 * 2172 * This function makes an running random selection decision on FCF record to 2173 * use through a sequence of @fcf_cnt eligible FCF records with equal 2174 * probability. To perform integer manunipulation of random numbers with 2175 * size unit32_t, a 16-bit random number returned from get_random_u16() is 2176 * taken as the random random number generated. 2177 * 2178 * Returns true when outcome is for the newly read FCF record should be 2179 * chosen; otherwise, return false when outcome is for keeping the previously 2180 * chosen FCF record. 2181 **/ 2182 static bool 2183 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) 2184 { 2185 uint32_t rand_num; 2186 2187 /* Get 16-bit uniform random number */ 2188 rand_num = get_random_u16(); 2189 2190 /* Decision with probability 1/fcf_cnt */ 2191 if ((fcf_cnt * rand_num) < 0xFFFF) 2192 return true; 2193 else 2194 return false; 2195 } 2196 2197 /** 2198 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. 2199 * @phba: pointer to lpfc hba data structure. 2200 * @mboxq: pointer to mailbox object. 2201 * @next_fcf_index: pointer to holder of next fcf index. 2202 * 2203 * This routine parses the non-embedded fcf mailbox command by performing the 2204 * necessarily error checking, non-embedded read FCF record mailbox command 2205 * SGE parsing, and endianness swapping. 2206 * 2207 * Returns the pointer to the new FCF record in the non-embedded mailbox 2208 * command DMA memory if successfully, other NULL. 2209 */ 2210 static struct fcf_record * 2211 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 2212 uint16_t *next_fcf_index) 2213 { 2214 void *virt_addr; 2215 struct lpfc_mbx_sge sge; 2216 struct lpfc_mbx_read_fcf_tbl *read_fcf; 2217 uint32_t shdr_status, shdr_add_status, if_type; 2218 union lpfc_sli4_cfg_shdr *shdr; 2219 struct fcf_record *new_fcf_record; 2220 2221 /* Get the first SGE entry from the non-embedded DMA memory. This 2222 * routine only uses a single SGE. 2223 */ 2224 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 2225 if (unlikely(!mboxq->sge_array)) { 2226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2227 "2524 Failed to get the non-embedded SGE " 2228 "virtual address\n"); 2229 return NULL; 2230 } 2231 virt_addr = mboxq->sge_array->addr[0]; 2232 2233 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 2234 lpfc_sli_pcimem_bcopy(shdr, shdr, 2235 sizeof(union lpfc_sli4_cfg_shdr)); 2236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2237 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2238 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2239 if (shdr_status || shdr_add_status) { 2240 if (shdr_status == STATUS_FCF_TABLE_EMPTY || 2241 if_type == LPFC_SLI_INTF_IF_TYPE_2) 2242 lpfc_printf_log(phba, KERN_ERR, 2243 LOG_TRACE_EVENT, 2244 "2726 READ_FCF_RECORD Indicates empty " 2245 "FCF table.\n"); 2246 else 2247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2248 "2521 READ_FCF_RECORD mailbox failed " 2249 "with status x%x add_status x%x, " 2250 "mbx\n", shdr_status, shdr_add_status); 2251 return NULL; 2252 } 2253 2254 /* Interpreting the returned information of the FCF record */ 2255 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 2256 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 2257 sizeof(struct lpfc_mbx_read_fcf_tbl)); 2258 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 2259 new_fcf_record = (struct fcf_record *)(virt_addr + 2260 sizeof(struct lpfc_mbx_read_fcf_tbl)); 2261 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 2262 offsetof(struct fcf_record, vlan_bitmap)); 2263 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); 2264 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); 2265 2266 return new_fcf_record; 2267 } 2268 2269 /** 2270 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record 2271 * @phba: pointer to lpfc hba data structure. 2272 * @fcf_record: pointer to the fcf record. 2273 * @vlan_id: the lowest vlan identifier associated to this fcf record. 2274 * @next_fcf_index: the index to the next fcf record in hba's fcf table. 2275 * 2276 * This routine logs the detailed FCF record if the LOG_FIP loggin is 2277 * enabled. 2278 **/ 2279 static void 2280 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, 2281 struct fcf_record *fcf_record, 2282 uint16_t vlan_id, 2283 uint16_t next_fcf_index) 2284 { 2285 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2286 "2764 READ_FCF_RECORD:\n" 2287 "\tFCF_Index : x%x\n" 2288 "\tFCF_Avail : x%x\n" 2289 "\tFCF_Valid : x%x\n" 2290 "\tFCF_SOL : x%x\n" 2291 "\tFIP_Priority : x%x\n" 2292 "\tMAC_Provider : x%x\n" 2293 "\tLowest VLANID : x%x\n" 2294 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" 2295 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 2296 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 2297 "\tNext_FCF_Index: x%x\n", 2298 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 2299 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 2300 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 2301 bf_get(lpfc_fcf_record_fcf_sol, fcf_record), 2302 fcf_record->fip_priority, 2303 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 2304 vlan_id, 2305 bf_get(lpfc_fcf_record_mac_0, fcf_record), 2306 bf_get(lpfc_fcf_record_mac_1, fcf_record), 2307 bf_get(lpfc_fcf_record_mac_2, fcf_record), 2308 bf_get(lpfc_fcf_record_mac_3, fcf_record), 2309 bf_get(lpfc_fcf_record_mac_4, fcf_record), 2310 bf_get(lpfc_fcf_record_mac_5, fcf_record), 2311 bf_get(lpfc_fcf_record_fab_name_0, fcf_record), 2312 bf_get(lpfc_fcf_record_fab_name_1, fcf_record), 2313 bf_get(lpfc_fcf_record_fab_name_2, fcf_record), 2314 bf_get(lpfc_fcf_record_fab_name_3, fcf_record), 2315 bf_get(lpfc_fcf_record_fab_name_4, fcf_record), 2316 bf_get(lpfc_fcf_record_fab_name_5, fcf_record), 2317 bf_get(lpfc_fcf_record_fab_name_6, fcf_record), 2318 bf_get(lpfc_fcf_record_fab_name_7, fcf_record), 2319 bf_get(lpfc_fcf_record_switch_name_0, fcf_record), 2320 bf_get(lpfc_fcf_record_switch_name_1, fcf_record), 2321 bf_get(lpfc_fcf_record_switch_name_2, fcf_record), 2322 bf_get(lpfc_fcf_record_switch_name_3, fcf_record), 2323 bf_get(lpfc_fcf_record_switch_name_4, fcf_record), 2324 bf_get(lpfc_fcf_record_switch_name_5, fcf_record), 2325 bf_get(lpfc_fcf_record_switch_name_6, fcf_record), 2326 bf_get(lpfc_fcf_record_switch_name_7, fcf_record), 2327 next_fcf_index); 2328 } 2329 2330 /** 2331 * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF 2332 * @phba: pointer to lpfc hba data structure. 2333 * @fcf_rec: pointer to an existing FCF record. 2334 * @new_fcf_record: pointer to a new FCF record. 2335 * @new_vlan_id: vlan id from the new FCF record. 2336 * 2337 * This function performs matching test of a new FCF record against an existing 2338 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id 2339 * will not be used as part of the FCF record matching criteria. 2340 * 2341 * Returns true if all the fields matching, otherwise returns false. 2342 */ 2343 static bool 2344 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, 2345 struct lpfc_fcf_rec *fcf_rec, 2346 struct fcf_record *new_fcf_record, 2347 uint16_t new_vlan_id) 2348 { 2349 if (new_vlan_id != LPFC_FCOE_IGNORE_VID) 2350 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) 2351 return false; 2352 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) 2353 return false; 2354 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) 2355 return false; 2356 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 2357 return false; 2358 if (fcf_rec->priority != new_fcf_record->fip_priority) 2359 return false; 2360 return true; 2361 } 2362 2363 /** 2364 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 2365 * @vport: Pointer to vport object. 2366 * @fcf_index: index to next fcf. 2367 * 2368 * This function processing the roundrobin fcf failover to next fcf index. 2369 * When this function is invoked, there will be a current fcf registered 2370 * for flogi. 2371 * Return: 0 for continue retrying flogi on currently registered fcf; 2372 * 1 for stop flogi on currently registered fcf; 2373 */ 2374 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 2375 { 2376 struct lpfc_hba *phba = vport->phba; 2377 int rc; 2378 2379 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 2380 spin_lock_irq(&phba->hbalock); 2381 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2382 spin_unlock_irq(&phba->hbalock); 2383 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2384 "2872 Devloss tmo with no eligible " 2385 "FCF, unregister in-use FCF (x%x) " 2386 "and rescan FCF table\n", 2387 phba->fcf.current_rec.fcf_indx); 2388 lpfc_unregister_fcf_rescan(phba); 2389 goto stop_flogi_current_fcf; 2390 } 2391 /* Mark the end to FLOGI roundrobin failover */ 2392 phba->hba_flag &= ~FCF_RR_INPROG; 2393 /* Allow action to new fcf asynchronous event */ 2394 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2395 spin_unlock_irq(&phba->hbalock); 2396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2397 "2865 No FCF available, stop roundrobin FCF " 2398 "failover and change port state:x%x/x%x\n", 2399 phba->pport->port_state, LPFC_VPORT_UNKNOWN); 2400 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2401 2402 if (!phba->fcf.fcf_redisc_attempted) { 2403 lpfc_unregister_fcf(phba); 2404 2405 rc = lpfc_sli4_redisc_fcf_table(phba); 2406 if (!rc) { 2407 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2408 "3195 Rediscover FCF table\n"); 2409 phba->fcf.fcf_redisc_attempted = 1; 2410 lpfc_sli4_clear_fcf_rr_bmask(phba); 2411 } else { 2412 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2413 "3196 Rediscover FCF table " 2414 "failed. Status:x%x\n", rc); 2415 } 2416 } else { 2417 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2418 "3197 Already rediscover FCF table " 2419 "attempted. No more retry\n"); 2420 } 2421 goto stop_flogi_current_fcf; 2422 } else { 2423 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 2424 "2794 Try FLOGI roundrobin FCF failover to " 2425 "(x%x)\n", fcf_index); 2426 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 2427 if (rc) 2428 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 2429 "2761 FLOGI roundrobin FCF failover " 2430 "failed (rc:x%x) to read FCF (x%x)\n", 2431 rc, phba->fcf.current_rec.fcf_indx); 2432 else 2433 goto stop_flogi_current_fcf; 2434 } 2435 return 0; 2436 2437 stop_flogi_current_fcf: 2438 lpfc_can_disctmo(vport); 2439 return 1; 2440 } 2441 2442 /** 2443 * lpfc_sli4_fcf_pri_list_del 2444 * @phba: pointer to lpfc hba data structure. 2445 * @fcf_index: the index of the fcf record to delete 2446 * This routine checks the on list flag of the fcf_index to be deleted. 2447 * If it is one the list then it is removed from the list, and the flag 2448 * is cleared. This routine grab the hbalock before removing the fcf 2449 * record from the list. 2450 **/ 2451 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, 2452 uint16_t fcf_index) 2453 { 2454 struct lpfc_fcf_pri *new_fcf_pri; 2455 2456 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2457 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2458 "3058 deleting idx x%x pri x%x flg x%x\n", 2459 fcf_index, new_fcf_pri->fcf_rec.priority, 2460 new_fcf_pri->fcf_rec.flag); 2461 spin_lock_irq(&phba->hbalock); 2462 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { 2463 if (phba->fcf.current_rec.priority == 2464 new_fcf_pri->fcf_rec.priority) 2465 phba->fcf.eligible_fcf_cnt--; 2466 list_del_init(&new_fcf_pri->list); 2467 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; 2468 } 2469 spin_unlock_irq(&phba->hbalock); 2470 } 2471 2472 /** 2473 * lpfc_sli4_set_fcf_flogi_fail 2474 * @phba: pointer to lpfc hba data structure. 2475 * @fcf_index: the index of the fcf record to update 2476 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED 2477 * flag so the the round robin slection for the particular priority level 2478 * will try a different fcf record that does not have this bit set. 2479 * If the fcf record is re-read for any reason this flag is cleared brfore 2480 * adding it to the priority list. 2481 **/ 2482 void 2483 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) 2484 { 2485 struct lpfc_fcf_pri *new_fcf_pri; 2486 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2487 spin_lock_irq(&phba->hbalock); 2488 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; 2489 spin_unlock_irq(&phba->hbalock); 2490 } 2491 2492 /** 2493 * lpfc_sli4_fcf_pri_list_add 2494 * @phba: pointer to lpfc hba data structure. 2495 * @fcf_index: the index of the fcf record to add 2496 * @new_fcf_record: pointer to a new FCF record. 2497 * This routine checks the priority of the fcf_index to be added. 2498 * If it is a lower priority than the current head of the fcf_pri list 2499 * then it is added to the list in the right order. 2500 * If it is the same priority as the current head of the list then it 2501 * is added to the head of the list and its bit in the rr_bmask is set. 2502 * If the fcf_index to be added is of a higher priority than the current 2503 * head of the list then the rr_bmask is cleared, its bit is set in the 2504 * rr_bmask and it is added to the head of the list. 2505 * returns: 2506 * 0=success 1=failure 2507 **/ 2508 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, 2509 uint16_t fcf_index, 2510 struct fcf_record *new_fcf_record) 2511 { 2512 uint16_t current_fcf_pri; 2513 uint16_t last_index; 2514 struct lpfc_fcf_pri *fcf_pri; 2515 struct lpfc_fcf_pri *next_fcf_pri; 2516 struct lpfc_fcf_pri *new_fcf_pri; 2517 int ret; 2518 2519 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2520 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2521 "3059 adding idx x%x pri x%x flg x%x\n", 2522 fcf_index, new_fcf_record->fip_priority, 2523 new_fcf_pri->fcf_rec.flag); 2524 spin_lock_irq(&phba->hbalock); 2525 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) 2526 list_del_init(&new_fcf_pri->list); 2527 new_fcf_pri->fcf_rec.fcf_index = fcf_index; 2528 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 2529 if (list_empty(&phba->fcf.fcf_pri_list)) { 2530 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2531 ret = lpfc_sli4_fcf_rr_index_set(phba, 2532 new_fcf_pri->fcf_rec.fcf_index); 2533 goto out; 2534 } 2535 2536 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 2537 LPFC_SLI4_FCF_TBL_INDX_MAX); 2538 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 2539 ret = 0; /* Empty rr list */ 2540 goto out; 2541 } 2542 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; 2543 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { 2544 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2545 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { 2546 memset(phba->fcf.fcf_rr_bmask, 0, 2547 sizeof(*phba->fcf.fcf_rr_bmask)); 2548 /* fcfs_at_this_priority_level = 1; */ 2549 phba->fcf.eligible_fcf_cnt = 1; 2550 } else 2551 /* fcfs_at_this_priority_level++; */ 2552 phba->fcf.eligible_fcf_cnt++; 2553 ret = lpfc_sli4_fcf_rr_index_set(phba, 2554 new_fcf_pri->fcf_rec.fcf_index); 2555 goto out; 2556 } 2557 2558 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 2559 &phba->fcf.fcf_pri_list, list) { 2560 if (new_fcf_pri->fcf_rec.priority <= 2561 fcf_pri->fcf_rec.priority) { 2562 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) 2563 list_add(&new_fcf_pri->list, 2564 &phba->fcf.fcf_pri_list); 2565 else 2566 list_add(&new_fcf_pri->list, 2567 &((struct lpfc_fcf_pri *) 2568 fcf_pri->list.prev)->list); 2569 ret = 0; 2570 goto out; 2571 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2572 || new_fcf_pri->fcf_rec.priority < 2573 next_fcf_pri->fcf_rec.priority) { 2574 list_add(&new_fcf_pri->list, &fcf_pri->list); 2575 ret = 0; 2576 goto out; 2577 } 2578 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) 2579 continue; 2580 2581 } 2582 ret = 1; 2583 out: 2584 /* we use = instead of |= to clear the FLOGI_FAILED flag. */ 2585 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; 2586 spin_unlock_irq(&phba->hbalock); 2587 return ret; 2588 } 2589 2590 /** 2591 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2592 * @phba: pointer to lpfc hba data structure. 2593 * @mboxq: pointer to mailbox object. 2594 * 2595 * This function iterates through all the fcf records available in 2596 * HBA and chooses the optimal FCF record for discovery. After finding 2597 * the FCF for discovery it registers the FCF record and kicks start 2598 * discovery. 2599 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to 2600 * use an FCF record which matches fabric name and mac address of the 2601 * currently used FCF record. 2602 * If the driver supports only one FCF, it will try to use the FCF record 2603 * used by BOOT_BIOS. 2604 */ 2605 void 2606 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2607 { 2608 struct fcf_record *new_fcf_record; 2609 uint32_t boot_flag, addr_mode; 2610 uint16_t fcf_index, next_fcf_index; 2611 struct lpfc_fcf_rec *fcf_rec = NULL; 2612 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 2613 bool select_new_fcf; 2614 int rc; 2615 2616 /* If there is pending FCoE event restart FCF table scan */ 2617 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 2618 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2619 return; 2620 } 2621 2622 /* Parse the FCF record from the non-embedded mailbox command */ 2623 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2624 &next_fcf_index); 2625 if (!new_fcf_record) { 2626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2627 "2765 Mailbox command READ_FCF_RECORD " 2628 "failed to retrieve a FCF record.\n"); 2629 /* Let next new FCF event trigger fast failover */ 2630 spin_lock_irq(&phba->hbalock); 2631 phba->hba_flag &= ~FCF_TS_INPROG; 2632 spin_unlock_irq(&phba->hbalock); 2633 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2634 return; 2635 } 2636 2637 /* Check the FCF record against the connection list */ 2638 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2639 &addr_mode, &vlan_id); 2640 2641 /* Log the FCF record information if turned on */ 2642 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2643 next_fcf_index); 2644 2645 /* 2646 * If the fcf record does not match with connect list entries 2647 * read the next entry; otherwise, this is an eligible FCF 2648 * record for roundrobin FCF failover. 2649 */ 2650 if (!rc) { 2651 lpfc_sli4_fcf_pri_list_del(phba, 2652 bf_get(lpfc_fcf_record_fcf_index, 2653 new_fcf_record)); 2654 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2655 "2781 FCF (x%x) failed connection " 2656 "list check: (x%x/x%x/%x)\n", 2657 bf_get(lpfc_fcf_record_fcf_index, 2658 new_fcf_record), 2659 bf_get(lpfc_fcf_record_fcf_avail, 2660 new_fcf_record), 2661 bf_get(lpfc_fcf_record_fcf_valid, 2662 new_fcf_record), 2663 bf_get(lpfc_fcf_record_fcf_sol, 2664 new_fcf_record)); 2665 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2666 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2667 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 2668 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 2669 phba->fcf.current_rec.fcf_indx) { 2670 lpfc_printf_log(phba, KERN_ERR, 2671 LOG_TRACE_EVENT, 2672 "2862 FCF (x%x) matches property " 2673 "of in-use FCF (x%x)\n", 2674 bf_get(lpfc_fcf_record_fcf_index, 2675 new_fcf_record), 2676 phba->fcf.current_rec.fcf_indx); 2677 goto read_next_fcf; 2678 } 2679 /* 2680 * In case the current in-use FCF record becomes 2681 * invalid/unavailable during FCF discovery that 2682 * was not triggered by fast FCF failover process, 2683 * treat it as fast FCF failover. 2684 */ 2685 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && 2686 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2687 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2688 "2835 Invalid in-use FCF " 2689 "(x%x), enter FCF failover " 2690 "table scan.\n", 2691 phba->fcf.current_rec.fcf_indx); 2692 spin_lock_irq(&phba->hbalock); 2693 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2694 spin_unlock_irq(&phba->hbalock); 2695 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2696 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2697 LPFC_FCOE_FCF_GET_FIRST); 2698 return; 2699 } 2700 } 2701 goto read_next_fcf; 2702 } else { 2703 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2704 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, 2705 new_fcf_record); 2706 if (rc) 2707 goto read_next_fcf; 2708 } 2709 2710 /* 2711 * If this is not the first FCF discovery of the HBA, use last 2712 * FCF record for the discovery. The condition that a rescan 2713 * matches the in-use FCF record: fabric name, switch name, mac 2714 * address, and vlan_id. 2715 */ 2716 spin_lock_irq(&phba->hbalock); 2717 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2718 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2719 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2720 new_fcf_record, vlan_id)) { 2721 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2722 phba->fcf.current_rec.fcf_indx) { 2723 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2724 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2725 /* Stop FCF redisc wait timer */ 2726 __lpfc_sli4_stop_fcf_redisc_wait_timer( 2727 phba); 2728 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2729 /* Fast failover, mark completed */ 2730 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2731 spin_unlock_irq(&phba->hbalock); 2732 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2733 "2836 New FCF matches in-use " 2734 "FCF (x%x), port_state:x%x, " 2735 "fc_flag:x%x\n", 2736 phba->fcf.current_rec.fcf_indx, 2737 phba->pport->port_state, 2738 phba->pport->fc_flag); 2739 goto out; 2740 } else 2741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2742 "2863 New FCF (x%x) matches " 2743 "property of in-use FCF (x%x)\n", 2744 bf_get(lpfc_fcf_record_fcf_index, 2745 new_fcf_record), 2746 phba->fcf.current_rec.fcf_indx); 2747 } 2748 /* 2749 * Read next FCF record from HBA searching for the matching 2750 * with in-use record only if not during the fast failover 2751 * period. In case of fast failover period, it shall try to 2752 * determine whether the FCF record just read should be the 2753 * next candidate. 2754 */ 2755 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2756 spin_unlock_irq(&phba->hbalock); 2757 goto read_next_fcf; 2758 } 2759 } 2760 /* 2761 * Update on failover FCF record only if it's in FCF fast-failover 2762 * period; otherwise, update on current FCF record. 2763 */ 2764 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2765 fcf_rec = &phba->fcf.failover_rec; 2766 else 2767 fcf_rec = &phba->fcf.current_rec; 2768 2769 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 2770 /* 2771 * If the driver FCF record does not have boot flag 2772 * set and new hba fcf record has boot flag set, use 2773 * the new hba fcf record. 2774 */ 2775 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 2776 /* Choose this FCF record */ 2777 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2778 "2837 Update current FCF record " 2779 "(x%x) with new FCF record (x%x)\n", 2780 fcf_rec->fcf_indx, 2781 bf_get(lpfc_fcf_record_fcf_index, 2782 new_fcf_record)); 2783 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2784 addr_mode, vlan_id, BOOT_ENABLE); 2785 spin_unlock_irq(&phba->hbalock); 2786 goto read_next_fcf; 2787 } 2788 /* 2789 * If the driver FCF record has boot flag set and the 2790 * new hba FCF record does not have boot flag, read 2791 * the next FCF record. 2792 */ 2793 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 2794 spin_unlock_irq(&phba->hbalock); 2795 goto read_next_fcf; 2796 } 2797 /* 2798 * If the new hba FCF record has lower priority value 2799 * than the driver FCF record, use the new record. 2800 */ 2801 if (new_fcf_record->fip_priority < fcf_rec->priority) { 2802 /* Choose the new FCF record with lower priority */ 2803 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2804 "2838 Update current FCF record " 2805 "(x%x) with new FCF record (x%x)\n", 2806 fcf_rec->fcf_indx, 2807 bf_get(lpfc_fcf_record_fcf_index, 2808 new_fcf_record)); 2809 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2810 addr_mode, vlan_id, 0); 2811 /* Reset running random FCF selection count */ 2812 phba->fcf.eligible_fcf_cnt = 1; 2813 } else if (new_fcf_record->fip_priority == fcf_rec->priority) { 2814 /* Update running random FCF selection count */ 2815 phba->fcf.eligible_fcf_cnt++; 2816 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 2817 phba->fcf.eligible_fcf_cnt); 2818 if (select_new_fcf) { 2819 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2820 "2839 Update current FCF record " 2821 "(x%x) with new FCF record (x%x)\n", 2822 fcf_rec->fcf_indx, 2823 bf_get(lpfc_fcf_record_fcf_index, 2824 new_fcf_record)); 2825 /* Choose the new FCF by random selection */ 2826 __lpfc_update_fcf_record(phba, fcf_rec, 2827 new_fcf_record, 2828 addr_mode, vlan_id, 0); 2829 } 2830 } 2831 spin_unlock_irq(&phba->hbalock); 2832 goto read_next_fcf; 2833 } 2834 /* 2835 * This is the first suitable FCF record, choose this record for 2836 * initial best-fit FCF. 2837 */ 2838 if (fcf_rec) { 2839 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2840 "2840 Update initial FCF candidate " 2841 "with FCF (x%x)\n", 2842 bf_get(lpfc_fcf_record_fcf_index, 2843 new_fcf_record)); 2844 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2845 addr_mode, vlan_id, (boot_flag ? 2846 BOOT_ENABLE : 0)); 2847 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2848 /* Setup initial running random FCF selection count */ 2849 phba->fcf.eligible_fcf_cnt = 1; 2850 } 2851 spin_unlock_irq(&phba->hbalock); 2852 goto read_next_fcf; 2853 2854 read_next_fcf: 2855 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2856 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { 2857 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 2858 /* 2859 * Case of FCF fast failover scan 2860 */ 2861 2862 /* 2863 * It has not found any suitable FCF record, cancel 2864 * FCF scan inprogress, and do nothing 2865 */ 2866 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2867 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2868 "2782 No suitable FCF found: " 2869 "(x%x/x%x)\n", 2870 phba->fcoe_eventtag_at_fcf_scan, 2871 bf_get(lpfc_fcf_record_fcf_index, 2872 new_fcf_record)); 2873 spin_lock_irq(&phba->hbalock); 2874 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2875 phba->hba_flag &= ~FCF_TS_INPROG; 2876 spin_unlock_irq(&phba->hbalock); 2877 /* Unregister in-use FCF and rescan */ 2878 lpfc_printf_log(phba, KERN_INFO, 2879 LOG_FIP, 2880 "2864 On devloss tmo " 2881 "unreg in-use FCF and " 2882 "rescan FCF table\n"); 2883 lpfc_unregister_fcf_rescan(phba); 2884 return; 2885 } 2886 /* 2887 * Let next new FCF event trigger fast failover 2888 */ 2889 phba->hba_flag &= ~FCF_TS_INPROG; 2890 spin_unlock_irq(&phba->hbalock); 2891 return; 2892 } 2893 /* 2894 * It has found a suitable FCF record that is not 2895 * the same as in-use FCF record, unregister the 2896 * in-use FCF record, replace the in-use FCF record 2897 * with the new FCF record, mark FCF fast failover 2898 * completed, and then start register the new FCF 2899 * record. 2900 */ 2901 2902 /* Unregister the current in-use FCF record */ 2903 lpfc_unregister_fcf(phba); 2904 2905 /* Replace in-use record with the new record */ 2906 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2907 "2842 Replace in-use FCF (x%x) " 2908 "with failover FCF (x%x)\n", 2909 phba->fcf.current_rec.fcf_indx, 2910 phba->fcf.failover_rec.fcf_indx); 2911 memcpy(&phba->fcf.current_rec, 2912 &phba->fcf.failover_rec, 2913 sizeof(struct lpfc_fcf_rec)); 2914 /* 2915 * Mark the fast FCF failover rediscovery completed 2916 * and the start of the first round of the roundrobin 2917 * FCF failover. 2918 */ 2919 spin_lock_irq(&phba->hbalock); 2920 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2921 spin_unlock_irq(&phba->hbalock); 2922 /* Register to the new FCF record */ 2923 lpfc_register_fcf(phba); 2924 } else { 2925 /* 2926 * In case of transaction period to fast FCF failover, 2927 * do nothing when search to the end of the FCF table. 2928 */ 2929 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2930 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2931 return; 2932 2933 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2934 phba->fcf.fcf_flag & FCF_IN_USE) { 2935 /* 2936 * In case the current in-use FCF record no 2937 * longer existed during FCF discovery that 2938 * was not triggered by fast FCF failover 2939 * process, treat it as fast FCF failover. 2940 */ 2941 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2942 "2841 In-use FCF record (x%x) " 2943 "not reported, entering fast " 2944 "FCF failover mode scanning.\n", 2945 phba->fcf.current_rec.fcf_indx); 2946 spin_lock_irq(&phba->hbalock); 2947 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2948 spin_unlock_irq(&phba->hbalock); 2949 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2950 LPFC_FCOE_FCF_GET_FIRST); 2951 return; 2952 } 2953 /* Register to the new FCF record */ 2954 lpfc_register_fcf(phba); 2955 } 2956 } else 2957 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); 2958 return; 2959 2960 out: 2961 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2962 lpfc_register_fcf(phba); 2963 2964 return; 2965 } 2966 2967 /** 2968 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2969 * @phba: pointer to lpfc hba data structure. 2970 * @mboxq: pointer to mailbox object. 2971 * 2972 * This is the callback function for FLOGI failure roundrobin FCF failover 2973 * read FCF record mailbox command from the eligible FCF record bmask for 2974 * performing the failover. If the FCF read back is not valid/available, it 2975 * fails through to retrying FLOGI to the currently registered FCF again. 2976 * Otherwise, if the FCF read back is valid and available, it will set the 2977 * newly read FCF record to the failover FCF record, unregister currently 2978 * registered FCF record, copy the failover FCF record to the current 2979 * FCF record, and then register the current FCF record before proceeding 2980 * to trying FLOGI on the new failover FCF. 2981 */ 2982 void 2983 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2984 { 2985 struct fcf_record *new_fcf_record; 2986 uint32_t boot_flag, addr_mode; 2987 uint16_t next_fcf_index, fcf_index; 2988 uint16_t current_fcf_index; 2989 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 2990 int rc; 2991 2992 /* If link state is not up, stop the roundrobin failover process */ 2993 if (phba->link_state < LPFC_LINK_UP) { 2994 spin_lock_irq(&phba->hbalock); 2995 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2996 phba->hba_flag &= ~FCF_RR_INPROG; 2997 spin_unlock_irq(&phba->hbalock); 2998 goto out; 2999 } 3000 3001 /* Parse the FCF record from the non-embedded mailbox command */ 3002 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 3003 &next_fcf_index); 3004 if (!new_fcf_record) { 3005 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 3006 "2766 Mailbox command READ_FCF_RECORD " 3007 "failed to retrieve a FCF record. " 3008 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, 3009 phba->fcf.fcf_flag); 3010 lpfc_unregister_fcf_rescan(phba); 3011 goto out; 3012 } 3013 3014 /* Get the needed parameters from FCF record */ 3015 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 3016 &addr_mode, &vlan_id); 3017 3018 /* Log the FCF record information if turned on */ 3019 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 3020 next_fcf_index); 3021 3022 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 3023 if (!rc) { 3024 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3025 "2848 Remove ineligible FCF (x%x) from " 3026 "from roundrobin bmask\n", fcf_index); 3027 /* Clear roundrobin bmask bit for ineligible FCF */ 3028 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 3029 /* Perform next round of roundrobin FCF failover */ 3030 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 3031 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 3032 if (rc) 3033 goto out; 3034 goto error_out; 3035 } 3036 3037 if (fcf_index == phba->fcf.current_rec.fcf_indx) { 3038 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3039 "2760 Perform FLOGI roundrobin FCF failover: " 3040 "FCF (x%x) back to FCF (x%x)\n", 3041 phba->fcf.current_rec.fcf_indx, fcf_index); 3042 /* Wait 500 ms before retrying FLOGI to current FCF */ 3043 msleep(500); 3044 lpfc_issue_init_vfi(phba->pport); 3045 goto out; 3046 } 3047 3048 /* Upload new FCF record to the failover FCF record */ 3049 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3050 "2834 Update current FCF (x%x) with new FCF (x%x)\n", 3051 phba->fcf.failover_rec.fcf_indx, fcf_index); 3052 spin_lock_irq(&phba->hbalock); 3053 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 3054 new_fcf_record, addr_mode, vlan_id, 3055 (boot_flag ? BOOT_ENABLE : 0)); 3056 spin_unlock_irq(&phba->hbalock); 3057 3058 current_fcf_index = phba->fcf.current_rec.fcf_indx; 3059 3060 /* Unregister the current in-use FCF record */ 3061 lpfc_unregister_fcf(phba); 3062 3063 /* Replace in-use record with the new record */ 3064 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, 3065 sizeof(struct lpfc_fcf_rec)); 3066 3067 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3068 "2783 Perform FLOGI roundrobin FCF failover: FCF " 3069 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 3070 3071 error_out: 3072 lpfc_register_fcf(phba); 3073 out: 3074 lpfc_sli4_mbox_cmd_free(phba, mboxq); 3075 } 3076 3077 /** 3078 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. 3079 * @phba: pointer to lpfc hba data structure. 3080 * @mboxq: pointer to mailbox object. 3081 * 3082 * This is the callback function of read FCF record mailbox command for 3083 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 3084 * failover when a new FCF event happened. If the FCF read back is 3085 * valid/available and it passes the connection list check, it updates 3086 * the bmask for the eligible FCF record for roundrobin failover. 3087 */ 3088 void 3089 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3090 { 3091 struct fcf_record *new_fcf_record; 3092 uint32_t boot_flag, addr_mode; 3093 uint16_t fcf_index, next_fcf_index; 3094 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 3095 int rc; 3096 3097 /* If link state is not up, no need to proceed */ 3098 if (phba->link_state < LPFC_LINK_UP) 3099 goto out; 3100 3101 /* If FCF discovery period is over, no need to proceed */ 3102 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) 3103 goto out; 3104 3105 /* Parse the FCF record from the non-embedded mailbox command */ 3106 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 3107 &next_fcf_index); 3108 if (!new_fcf_record) { 3109 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3110 "2767 Mailbox command READ_FCF_RECORD " 3111 "failed to retrieve a FCF record.\n"); 3112 goto out; 3113 } 3114 3115 /* Check the connection list for eligibility */ 3116 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 3117 &addr_mode, &vlan_id); 3118 3119 /* Log the FCF record information if turned on */ 3120 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 3121 next_fcf_index); 3122 3123 if (!rc) 3124 goto out; 3125 3126 /* Update the eligible FCF record index bmask */ 3127 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 3128 3129 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); 3130 3131 out: 3132 lpfc_sli4_mbox_cmd_free(phba, mboxq); 3133 } 3134 3135 /** 3136 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. 3137 * @phba: pointer to lpfc hba data structure. 3138 * @mboxq: pointer to mailbox data structure. 3139 * 3140 * This function handles completion of init vfi mailbox command. 3141 */ 3142 static void 3143 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3144 { 3145 struct lpfc_vport *vport = mboxq->vport; 3146 3147 /* 3148 * VFI not supported on interface type 0, just do the flogi 3149 * Also continue if the VFI is in use - just use the same one. 3150 */ 3151 if (mboxq->u.mb.mbxStatus && 3152 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3153 LPFC_SLI_INTF_IF_TYPE_0) && 3154 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 3155 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3156 "2891 Init VFI mailbox failed 0x%x\n", 3157 mboxq->u.mb.mbxStatus); 3158 mempool_free(mboxq, phba->mbox_mem_pool); 3159 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3160 return; 3161 } 3162 3163 lpfc_initial_flogi(vport); 3164 mempool_free(mboxq, phba->mbox_mem_pool); 3165 return; 3166 } 3167 3168 /** 3169 * lpfc_issue_init_vfi - Issue init_vfi mailbox command. 3170 * @vport: pointer to lpfc_vport data structure. 3171 * 3172 * This function issue a init_vfi mailbox command to initialize the VFI and 3173 * VPI for the physical port. 3174 */ 3175 void 3176 lpfc_issue_init_vfi(struct lpfc_vport *vport) 3177 { 3178 LPFC_MBOXQ_t *mboxq; 3179 int rc; 3180 struct lpfc_hba *phba = vport->phba; 3181 3182 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3183 if (!mboxq) { 3184 lpfc_printf_vlog(vport, KERN_ERR, 3185 LOG_TRACE_EVENT, "2892 Failed to allocate " 3186 "init_vfi mailbox\n"); 3187 return; 3188 } 3189 lpfc_init_vfi(mboxq, vport); 3190 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; 3191 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 3192 if (rc == MBX_NOT_FINISHED) { 3193 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3194 "2893 Failed to issue init_vfi mailbox\n"); 3195 mempool_free(mboxq, vport->phba->mbox_mem_pool); 3196 } 3197 } 3198 3199 /** 3200 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 3201 * @phba: pointer to lpfc hba data structure. 3202 * @mboxq: pointer to mailbox data structure. 3203 * 3204 * This function handles completion of init vpi mailbox command. 3205 */ 3206 void 3207 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3208 { 3209 struct lpfc_vport *vport = mboxq->vport; 3210 struct lpfc_nodelist *ndlp; 3211 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3212 3213 if (mboxq->u.mb.mbxStatus) { 3214 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3215 "2609 Init VPI mailbox failed 0x%x\n", 3216 mboxq->u.mb.mbxStatus); 3217 mempool_free(mboxq, phba->mbox_mem_pool); 3218 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3219 return; 3220 } 3221 spin_lock_irq(shost->host_lock); 3222 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 3223 spin_unlock_irq(shost->host_lock); 3224 3225 /* If this port is physical port or FDISC is done, do reg_vpi */ 3226 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { 3227 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3228 if (!ndlp) 3229 lpfc_printf_vlog(vport, KERN_ERR, 3230 LOG_TRACE_EVENT, 3231 "2731 Cannot find fabric " 3232 "controller node\n"); 3233 else 3234 lpfc_register_new_vport(phba, vport, ndlp); 3235 mempool_free(mboxq, phba->mbox_mem_pool); 3236 return; 3237 } 3238 3239 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 3240 lpfc_initial_fdisc(vport); 3241 else { 3242 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 3243 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3244 "2606 No NPIV Fabric support\n"); 3245 } 3246 mempool_free(mboxq, phba->mbox_mem_pool); 3247 return; 3248 } 3249 3250 /** 3251 * lpfc_issue_init_vpi - Issue init_vpi mailbox command. 3252 * @vport: pointer to lpfc_vport data structure. 3253 * 3254 * This function issue a init_vpi mailbox command to initialize 3255 * VPI for the vport. 3256 */ 3257 void 3258 lpfc_issue_init_vpi(struct lpfc_vport *vport) 3259 { 3260 LPFC_MBOXQ_t *mboxq; 3261 int rc, vpi; 3262 3263 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { 3264 vpi = lpfc_alloc_vpi(vport->phba); 3265 if (!vpi) { 3266 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3267 "3303 Failed to obtain vport vpi\n"); 3268 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3269 return; 3270 } 3271 vport->vpi = vpi; 3272 } 3273 3274 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 3275 if (!mboxq) { 3276 lpfc_printf_vlog(vport, KERN_ERR, 3277 LOG_TRACE_EVENT, "2607 Failed to allocate " 3278 "init_vpi mailbox\n"); 3279 return; 3280 } 3281 lpfc_init_vpi(vport->phba, mboxq, vport->vpi); 3282 mboxq->vport = vport; 3283 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; 3284 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); 3285 if (rc == MBX_NOT_FINISHED) { 3286 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3287 "2608 Failed to issue init_vpi mailbox\n"); 3288 mempool_free(mboxq, vport->phba->mbox_mem_pool); 3289 } 3290 } 3291 3292 /** 3293 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 3294 * @phba: pointer to lpfc hba data structure. 3295 * 3296 * This function loops through the list of vports on the @phba and issues an 3297 * FDISC if possible. 3298 */ 3299 void 3300 lpfc_start_fdiscs(struct lpfc_hba *phba) 3301 { 3302 struct lpfc_vport **vports; 3303 int i; 3304 3305 vports = lpfc_create_vport_work_array(phba); 3306 if (vports != NULL) { 3307 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3308 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 3309 continue; 3310 /* There are no vpi for this vport */ 3311 if (vports[i]->vpi > phba->max_vpi) { 3312 lpfc_vport_set_state(vports[i], 3313 FC_VPORT_FAILED); 3314 continue; 3315 } 3316 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3317 lpfc_vport_set_state(vports[i], 3318 FC_VPORT_LINKDOWN); 3319 continue; 3320 } 3321 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 3322 lpfc_issue_init_vpi(vports[i]); 3323 continue; 3324 } 3325 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 3326 lpfc_initial_fdisc(vports[i]); 3327 else { 3328 lpfc_vport_set_state(vports[i], 3329 FC_VPORT_NO_FABRIC_SUPP); 3330 lpfc_printf_vlog(vports[i], KERN_ERR, 3331 LOG_TRACE_EVENT, 3332 "0259 No NPIV " 3333 "Fabric support\n"); 3334 } 3335 } 3336 } 3337 lpfc_destroy_vport_work_array(phba, vports); 3338 } 3339 3340 void 3341 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3342 { 3343 struct lpfc_vport *vport = mboxq->vport; 3344 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3345 3346 /* 3347 * VFI not supported for interface type 0, so ignore any mailbox 3348 * error (except VFI in use) and continue with the discovery. 3349 */ 3350 if (mboxq->u.mb.mbxStatus && 3351 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3352 LPFC_SLI_INTF_IF_TYPE_0) && 3353 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 3354 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3355 "2018 REG_VFI mbxStatus error x%x " 3356 "HBA state x%x\n", 3357 mboxq->u.mb.mbxStatus, vport->port_state); 3358 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3359 /* FLOGI failed, use loop map to make discovery list */ 3360 lpfc_disc_list_loopmap(vport); 3361 /* Start discovery */ 3362 lpfc_disc_start(vport); 3363 goto out_free_mem; 3364 } 3365 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3366 goto out_free_mem; 3367 } 3368 3369 /* If the VFI is already registered, there is nothing else to do 3370 * Unless this was a VFI update and we are in PT2PT mode, then 3371 * we should drop through to set the port state to ready. 3372 */ 3373 if (vport->fc_flag & FC_VFI_REGISTERED) 3374 if (!(phba->sli_rev == LPFC_SLI_REV4 && 3375 vport->fc_flag & FC_PT2PT)) 3376 goto out_free_mem; 3377 3378 /* The VPI is implicitly registered when the VFI is registered */ 3379 spin_lock_irq(shost->host_lock); 3380 vport->vpi_state |= LPFC_VPI_REGISTERED; 3381 vport->fc_flag |= FC_VFI_REGISTERED; 3382 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 3383 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 3384 spin_unlock_irq(shost->host_lock); 3385 3386 /* In case SLI4 FC loopback test, we are ready */ 3387 if ((phba->sli_rev == LPFC_SLI_REV4) && 3388 (phba->link_flag & LS_LOOPBACK_MODE)) { 3389 phba->link_state = LPFC_HBA_READY; 3390 goto out_free_mem; 3391 } 3392 3393 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3394 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " 3395 "alpacnt:%d LinkState:%x topology:%x\n", 3396 vport->port_state, vport->fc_flag, vport->fc_myDID, 3397 vport->phba->alpa_map[0], 3398 phba->link_state, phba->fc_topology); 3399 3400 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 3401 /* 3402 * For private loop or for NPort pt2pt, 3403 * just start discovery and we are done. 3404 */ 3405 if ((vport->fc_flag & FC_PT2PT) || 3406 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 3407 !(vport->fc_flag & FC_PUBLIC_LOOP))) { 3408 3409 /* Use loop map to make discovery list */ 3410 lpfc_disc_list_loopmap(vport); 3411 /* Start discovery */ 3412 if (vport->fc_flag & FC_PT2PT) 3413 vport->port_state = LPFC_VPORT_READY; 3414 else 3415 lpfc_disc_start(vport); 3416 } else { 3417 lpfc_start_fdiscs(phba); 3418 lpfc_do_scr_ns_plogi(phba, vport); 3419 } 3420 } 3421 3422 out_free_mem: 3423 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 3424 } 3425 3426 static void 3427 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3428 { 3429 MAILBOX_t *mb = &pmb->u.mb; 3430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 3431 struct lpfc_vport *vport = pmb->vport; 3432 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3433 struct serv_parm *sp = &vport->fc_sparam; 3434 uint32_t ed_tov; 3435 3436 /* Check for error */ 3437 if (mb->mbxStatus) { 3438 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 3439 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3440 "0319 READ_SPARAM mbxStatus error x%x " 3441 "hba state x%x>\n", 3442 mb->mbxStatus, vport->port_state); 3443 lpfc_linkdown(phba); 3444 goto out; 3445 } 3446 3447 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 3448 sizeof (struct serv_parm)); 3449 3450 ed_tov = be32_to_cpu(sp->cmn.e_d_tov); 3451 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 3452 ed_tov = (ed_tov + 999999) / 1000000; 3453 3454 phba->fc_edtov = ed_tov; 3455 phba->fc_ratov = (2 * ed_tov) / 1000; 3456 if (phba->fc_ratov < FF_DEF_RATOV) { 3457 /* RA_TOV should be atleast 10sec for initial flogi */ 3458 phba->fc_ratov = FF_DEF_RATOV; 3459 } 3460 3461 lpfc_update_vport_wwn(vport); 3462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3463 if (vport->port_type == LPFC_PHYSICAL_PORT) { 3464 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 3465 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 3466 } 3467 3468 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3469 3470 /* Check if sending the FLOGI is being deferred to after we get 3471 * up to date CSPs from MBX_READ_SPARAM. 3472 */ 3473 if (phba->hba_flag & HBA_DEFER_FLOGI) { 3474 lpfc_initial_flogi(vport); 3475 phba->hba_flag &= ~HBA_DEFER_FLOGI; 3476 } 3477 return; 3478 3479 out: 3480 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3481 lpfc_issue_clear_la(phba, vport); 3482 } 3483 3484 static void 3485 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) 3486 { 3487 struct lpfc_vport *vport = phba->pport; 3488 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 3489 struct Scsi_Host *shost; 3490 int i; 3491 int rc; 3492 struct fcf_record *fcf_record; 3493 uint32_t fc_flags = 0; 3494 unsigned long iflags; 3495 3496 spin_lock_irqsave(&phba->hbalock, iflags); 3497 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 3498 3499 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3500 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 3501 case LPFC_LINK_SPEED_1GHZ: 3502 case LPFC_LINK_SPEED_2GHZ: 3503 case LPFC_LINK_SPEED_4GHZ: 3504 case LPFC_LINK_SPEED_8GHZ: 3505 case LPFC_LINK_SPEED_10GHZ: 3506 case LPFC_LINK_SPEED_16GHZ: 3507 case LPFC_LINK_SPEED_32GHZ: 3508 case LPFC_LINK_SPEED_64GHZ: 3509 case LPFC_LINK_SPEED_128GHZ: 3510 case LPFC_LINK_SPEED_256GHZ: 3511 break; 3512 default: 3513 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; 3514 break; 3515 } 3516 } 3517 3518 if (phba->fc_topology && 3519 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { 3520 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3521 "3314 Toplogy changed was 0x%x is 0x%x\n", 3522 phba->fc_topology, 3523 bf_get(lpfc_mbx_read_top_topology, la)); 3524 phba->fc_topology_changed = 1; 3525 } 3526 3527 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3528 phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); 3529 3530 shost = lpfc_shost_from_vport(vport); 3531 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3532 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 3533 3534 /* if npiv is enabled and this adapter supports npiv log 3535 * a message that npiv is not supported in this topology 3536 */ 3537 if (phba->cfg_enable_npiv && phba->max_vpi) 3538 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3539 "1309 Link Up Event npiv not supported in loop " 3540 "topology\n"); 3541 /* Get Loop Map information */ 3542 if (bf_get(lpfc_mbx_read_top_il, la)) 3543 fc_flags |= FC_LBIT; 3544 3545 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3546 i = la->lilpBde64.tus.f.bdeSize; 3547 3548 if (i == 0) { 3549 phba->alpa_map[0] = 0; 3550 } else { 3551 if (vport->cfg_log_verbose & LOG_LINK_EVENT) { 3552 int numalpa, j, k; 3553 union { 3554 uint8_t pamap[16]; 3555 struct { 3556 uint32_t wd1; 3557 uint32_t wd2; 3558 uint32_t wd3; 3559 uint32_t wd4; 3560 } pa; 3561 } un; 3562 numalpa = phba->alpa_map[0]; 3563 j = 0; 3564 while (j < numalpa) { 3565 memset(un.pamap, 0, 16); 3566 for (k = 1; j < numalpa; k++) { 3567 un.pamap[k - 1] = 3568 phba->alpa_map[j + 1]; 3569 j++; 3570 if (k == 16) 3571 break; 3572 } 3573 /* Link Up Event ALPA map */ 3574 lpfc_printf_log(phba, 3575 KERN_WARNING, 3576 LOG_LINK_EVENT, 3577 "1304 Link Up Event " 3578 "ALPA map Data: x%x " 3579 "x%x x%x x%x\n", 3580 un.pa.wd1, un.pa.wd2, 3581 un.pa.wd3, un.pa.wd4); 3582 } 3583 } 3584 } 3585 } else { 3586 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 3587 if (phba->max_vpi && phba->cfg_enable_npiv && 3588 (phba->sli_rev >= LPFC_SLI_REV3)) 3589 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3590 } 3591 vport->fc_myDID = phba->fc_pref_DID; 3592 fc_flags |= FC_LBIT; 3593 } 3594 spin_unlock_irqrestore(&phba->hbalock, iflags); 3595 3596 if (fc_flags) { 3597 spin_lock_irqsave(shost->host_lock, iflags); 3598 vport->fc_flag |= fc_flags; 3599 spin_unlock_irqrestore(shost->host_lock, iflags); 3600 } 3601 3602 lpfc_linkup(phba); 3603 sparam_mbox = NULL; 3604 3605 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3606 if (!sparam_mbox) 3607 goto out; 3608 3609 rc = lpfc_read_sparam(phba, sparam_mbox, 0); 3610 if (rc) { 3611 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3612 goto out; 3613 } 3614 sparam_mbox->vport = vport; 3615 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 3616 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 3617 if (rc == MBX_NOT_FINISHED) { 3618 lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED); 3619 goto out; 3620 } 3621 3622 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3623 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3624 if (!cfglink_mbox) 3625 goto out; 3626 vport->port_state = LPFC_LOCAL_CFG_LINK; 3627 lpfc_config_link(phba, cfglink_mbox); 3628 cfglink_mbox->vport = vport; 3629 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 3630 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 3631 if (rc == MBX_NOT_FINISHED) { 3632 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 3633 goto out; 3634 } 3635 } else { 3636 vport->port_state = LPFC_VPORT_UNKNOWN; 3637 /* 3638 * Add the driver's default FCF record at FCF index 0 now. This 3639 * is phase 1 implementation that support FCF index 0 and driver 3640 * defaults. 3641 */ 3642 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 3643 fcf_record = kzalloc(sizeof(struct fcf_record), 3644 GFP_KERNEL); 3645 if (unlikely(!fcf_record)) { 3646 lpfc_printf_log(phba, KERN_ERR, 3647 LOG_TRACE_EVENT, 3648 "2554 Could not allocate memory for " 3649 "fcf record\n"); 3650 rc = -ENODEV; 3651 goto out; 3652 } 3653 3654 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, 3655 LPFC_FCOE_FCF_DEF_INDEX); 3656 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 3657 if (unlikely(rc)) { 3658 lpfc_printf_log(phba, KERN_ERR, 3659 LOG_TRACE_EVENT, 3660 "2013 Could not manually add FCF " 3661 "record 0, status %d\n", rc); 3662 rc = -ENODEV; 3663 kfree(fcf_record); 3664 goto out; 3665 } 3666 kfree(fcf_record); 3667 } 3668 /* 3669 * The driver is expected to do FIP/FCF. Call the port 3670 * and get the FCF Table. 3671 */ 3672 spin_lock_irqsave(&phba->hbalock, iflags); 3673 if (phba->hba_flag & FCF_TS_INPROG) { 3674 spin_unlock_irqrestore(&phba->hbalock, iflags); 3675 return; 3676 } 3677 /* This is the initial FCF discovery scan */ 3678 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3679 spin_unlock_irqrestore(&phba->hbalock, iflags); 3680 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3681 "2778 Start FCF table scan at linkup\n"); 3682 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3683 LPFC_FCOE_FCF_GET_FIRST); 3684 if (rc) { 3685 spin_lock_irqsave(&phba->hbalock, iflags); 3686 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 3687 spin_unlock_irqrestore(&phba->hbalock, iflags); 3688 goto out; 3689 } 3690 /* Reset FCF roundrobin bmask for new discovery */ 3691 lpfc_sli4_clear_fcf_rr_bmask(phba); 3692 } 3693 3694 /* Prepare for LINK up registrations */ 3695 memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); 3696 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", 3697 init_utsname()->nodename); 3698 return; 3699 out: 3700 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3701 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3702 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", 3703 vport->port_state, sparam_mbox, cfglink_mbox); 3704 lpfc_issue_clear_la(phba, vport); 3705 return; 3706 } 3707 3708 static void 3709 lpfc_enable_la(struct lpfc_hba *phba) 3710 { 3711 uint32_t control; 3712 struct lpfc_sli *psli = &phba->sli; 3713 spin_lock_irq(&phba->hbalock); 3714 psli->sli_flag |= LPFC_PROCESS_LA; 3715 if (phba->sli_rev <= LPFC_SLI_REV3) { 3716 control = readl(phba->HCregaddr); 3717 control |= HC_LAINT_ENA; 3718 writel(control, phba->HCregaddr); 3719 readl(phba->HCregaddr); /* flush */ 3720 } 3721 spin_unlock_irq(&phba->hbalock); 3722 } 3723 3724 static void 3725 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 3726 { 3727 lpfc_linkdown(phba); 3728 lpfc_enable_la(phba); 3729 lpfc_unregister_unused_fcf(phba); 3730 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 3731 } 3732 3733 3734 /* 3735 * This routine handles processing a READ_TOPOLOGY mailbox 3736 * command upon completion. It is setup in the LPFC_MBOXQ 3737 * as the completion routine when the command is 3738 * handed off to the SLI layer. SLI4 only. 3739 */ 3740 void 3741 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3742 { 3743 struct lpfc_vport *vport = pmb->vport; 3744 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3745 struct lpfc_mbx_read_top *la; 3746 struct lpfc_sli_ring *pring; 3747 MAILBOX_t *mb = &pmb->u.mb; 3748 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 3749 uint8_t attn_type; 3750 unsigned long iflags; 3751 3752 /* Unblock ELS traffic */ 3753 pring = lpfc_phba_elsring(phba); 3754 if (pring) 3755 pring->flag &= ~LPFC_STOP_IOCB_EVENT; 3756 3757 /* Check for error */ 3758 if (mb->mbxStatus) { 3759 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3760 "1307 READ_LA mbox error x%x state x%x\n", 3761 mb->mbxStatus, vport->port_state); 3762 lpfc_mbx_issue_link_down(phba); 3763 phba->link_state = LPFC_HBA_ERROR; 3764 goto lpfc_mbx_cmpl_read_topology_free_mbuf; 3765 } 3766 3767 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3768 attn_type = bf_get(lpfc_mbx_read_top_att_type, la); 3769 3770 memcpy(&phba->alpa_map[0], mp->virt, 128); 3771 3772 spin_lock_irqsave(shost->host_lock, iflags); 3773 if (bf_get(lpfc_mbx_read_top_pb, la)) 3774 vport->fc_flag |= FC_BYPASSED_MODE; 3775 else 3776 vport->fc_flag &= ~FC_BYPASSED_MODE; 3777 spin_unlock_irqrestore(shost->host_lock, iflags); 3778 3779 if (phba->fc_eventTag <= la->eventTag) { 3780 phba->fc_stat.LinkMultiEvent++; 3781 if (attn_type == LPFC_ATT_LINK_UP) 3782 if (phba->fc_eventTag != 0) 3783 lpfc_linkdown(phba); 3784 } 3785 3786 phba->fc_eventTag = la->eventTag; 3787 phba->link_events++; 3788 if (attn_type == LPFC_ATT_LINK_UP) { 3789 phba->fc_stat.LinkUp++; 3790 if (phba->link_flag & LS_LOOPBACK_MODE) { 3791 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3792 "1306 Link Up Event in loop back mode " 3793 "x%x received Data: x%x x%x x%x x%x\n", 3794 la->eventTag, phba->fc_eventTag, 3795 bf_get(lpfc_mbx_read_top_alpa_granted, 3796 la), 3797 bf_get(lpfc_mbx_read_top_link_spd, la), 3798 phba->alpa_map[0]); 3799 } else { 3800 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3801 "1303 Link Up Event x%x received " 3802 "Data: x%x x%x x%x x%x x%x\n", 3803 la->eventTag, phba->fc_eventTag, 3804 bf_get(lpfc_mbx_read_top_alpa_granted, 3805 la), 3806 bf_get(lpfc_mbx_read_top_link_spd, la), 3807 phba->alpa_map[0], 3808 bf_get(lpfc_mbx_read_top_fa, la)); 3809 } 3810 lpfc_mbx_process_link_up(phba, la); 3811 3812 if (phba->cmf_active_mode != LPFC_CFG_OFF) 3813 lpfc_cmf_signal_init(phba); 3814 3815 if (phba->lmt & LMT_64Gb) 3816 lpfc_read_lds_params(phba); 3817 3818 } else if (attn_type == LPFC_ATT_LINK_DOWN || 3819 attn_type == LPFC_ATT_UNEXP_WWPN) { 3820 phba->fc_stat.LinkDown++; 3821 if (phba->link_flag & LS_LOOPBACK_MODE) 3822 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3823 "1308 Link Down Event in loop back mode " 3824 "x%x received " 3825 "Data: x%x x%x x%x\n", 3826 la->eventTag, phba->fc_eventTag, 3827 phba->pport->port_state, vport->fc_flag); 3828 else if (attn_type == LPFC_ATT_UNEXP_WWPN) 3829 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3830 "1313 Link Down Unexpected FA WWPN Event x%x " 3831 "received Data: x%x x%x x%x x%x\n", 3832 la->eventTag, phba->fc_eventTag, 3833 phba->pport->port_state, vport->fc_flag, 3834 bf_get(lpfc_mbx_read_top_fa, la)); 3835 else 3836 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3837 "1305 Link Down Event x%x received " 3838 "Data: x%x x%x x%x x%x\n", 3839 la->eventTag, phba->fc_eventTag, 3840 phba->pport->port_state, vport->fc_flag, 3841 bf_get(lpfc_mbx_read_top_fa, la)); 3842 lpfc_mbx_issue_link_down(phba); 3843 } 3844 3845 if ((phba->sli_rev < LPFC_SLI_REV4) && 3846 bf_get(lpfc_mbx_read_top_fa, la)) 3847 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3848 "1311 fa %d\n", 3849 bf_get(lpfc_mbx_read_top_fa, la)); 3850 3851 lpfc_mbx_cmpl_read_topology_free_mbuf: 3852 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3853 } 3854 3855 /* 3856 * This routine handles processing a REG_LOGIN mailbox 3857 * command upon completion. It is setup in the LPFC_MBOXQ 3858 * as the completion routine when the command is 3859 * handed off to the SLI layer. 3860 */ 3861 void 3862 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3863 { 3864 struct lpfc_vport *vport = pmb->vport; 3865 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 3866 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 3867 3868 /* The driver calls the state machine with the pmb pointer 3869 * but wants to make sure a stale ctx_buf isn't acted on. 3870 * The ctx_buf is restored later and cleaned up. 3871 */ 3872 pmb->ctx_buf = NULL; 3873 pmb->ctx_ndlp = NULL; 3874 3875 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, 3876 "0002 rpi:%x DID:%x flg:%x %d x%px\n", 3877 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3878 kref_read(&ndlp->kref), 3879 ndlp); 3880 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3881 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3882 3883 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || 3884 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 3885 /* We rcvd a rscn after issuing this 3886 * mbox reg login, we may have cycled 3887 * back through the state and be 3888 * back at reg login state so this 3889 * mbox needs to be ignored becase 3890 * there is another reg login in 3891 * process. 3892 */ 3893 spin_lock_irq(&ndlp->lock); 3894 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3895 spin_unlock_irq(&ndlp->lock); 3896 3897 /* 3898 * We cannot leave the RPI registered because 3899 * if we go thru discovery again for this ndlp 3900 * a subsequent REG_RPI will fail. 3901 */ 3902 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3903 lpfc_unreg_rpi(vport, ndlp); 3904 } 3905 3906 /* Call state machine */ 3907 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 3908 pmb->ctx_buf = mp; 3909 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3910 3911 /* decrement the node reference count held for this callback 3912 * function. 3913 */ 3914 lpfc_nlp_put(ndlp); 3915 3916 return; 3917 } 3918 3919 static void 3920 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3921 { 3922 MAILBOX_t *mb = &pmb->u.mb; 3923 struct lpfc_vport *vport = pmb->vport; 3924 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3925 3926 switch (mb->mbxStatus) { 3927 case 0x0011: 3928 case 0x0020: 3929 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3930 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 3931 mb->mbxStatus); 3932 break; 3933 /* If VPI is busy, reset the HBA */ 3934 case 0x9700: 3935 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3936 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", 3937 vport->vpi, mb->mbxStatus); 3938 if (!(phba->pport->load_flag & FC_UNLOADING)) 3939 lpfc_workq_post_event(phba, NULL, NULL, 3940 LPFC_EVT_RESET_HBA); 3941 } 3942 spin_lock_irq(shost->host_lock); 3943 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 3944 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3945 spin_unlock_irq(shost->host_lock); 3946 mempool_free(pmb, phba->mbox_mem_pool); 3947 lpfc_cleanup_vports_rrqs(vport, NULL); 3948 /* 3949 * This shost reference might have been taken at the beginning of 3950 * lpfc_vport_delete() 3951 */ 3952 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) 3953 scsi_host_put(shost); 3954 } 3955 3956 int 3957 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 3958 { 3959 struct lpfc_hba *phba = vport->phba; 3960 LPFC_MBOXQ_t *mbox; 3961 int rc; 3962 3963 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3964 if (!mbox) 3965 return 1; 3966 3967 lpfc_unreg_vpi(phba, vport->vpi, mbox); 3968 mbox->vport = vport; 3969 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 3970 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3971 if (rc == MBX_NOT_FINISHED) { 3972 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3973 "1800 Could not issue unreg_vpi\n"); 3974 mempool_free(mbox, phba->mbox_mem_pool); 3975 return rc; 3976 } 3977 return 0; 3978 } 3979 3980 static void 3981 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3982 { 3983 struct lpfc_vport *vport = pmb->vport; 3984 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3985 MAILBOX_t *mb = &pmb->u.mb; 3986 3987 switch (mb->mbxStatus) { 3988 case 0x0011: 3989 case 0x9601: 3990 case 0x9602: 3991 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3992 "0912 cmpl_reg_vpi, mb status = 0x%x\n", 3993 mb->mbxStatus); 3994 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3995 spin_lock_irq(shost->host_lock); 3996 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3997 spin_unlock_irq(shost->host_lock); 3998 vport->fc_myDID = 0; 3999 4000 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4001 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 4002 if (phba->nvmet_support) 4003 lpfc_nvmet_update_targetport(phba); 4004 else 4005 lpfc_nvme_update_localport(vport); 4006 } 4007 goto out; 4008 } 4009 4010 spin_lock_irq(shost->host_lock); 4011 vport->vpi_state |= LPFC_VPI_REGISTERED; 4012 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 4013 spin_unlock_irq(shost->host_lock); 4014 vport->num_disc_nodes = 0; 4015 /* go thru NPR list and issue ELS PLOGIs */ 4016 if (vport->fc_npr_cnt) 4017 lpfc_els_disc_plogi(vport); 4018 4019 if (!vport->num_disc_nodes) { 4020 spin_lock_irq(shost->host_lock); 4021 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4022 spin_unlock_irq(shost->host_lock); 4023 lpfc_can_disctmo(vport); 4024 } 4025 vport->port_state = LPFC_VPORT_READY; 4026 4027 out: 4028 mempool_free(pmb, phba->mbox_mem_pool); 4029 return; 4030 } 4031 4032 /** 4033 * lpfc_create_static_vport - Read HBA config region to create static vports. 4034 * @phba: pointer to lpfc hba data structure. 4035 * 4036 * This routine issue a DUMP mailbox command for config region 22 to get 4037 * the list of static vports to be created. The function create vports 4038 * based on the information returned from the HBA. 4039 **/ 4040 void 4041 lpfc_create_static_vport(struct lpfc_hba *phba) 4042 { 4043 LPFC_MBOXQ_t *pmb = NULL; 4044 MAILBOX_t *mb; 4045 struct static_vport_info *vport_info; 4046 int mbx_wait_rc = 0, i; 4047 struct fc_vport_identifiers vport_id; 4048 struct fc_vport *new_fc_vport; 4049 struct Scsi_Host *shost; 4050 struct lpfc_vport *vport; 4051 uint16_t offset = 0; 4052 uint8_t *vport_buff; 4053 struct lpfc_dmabuf *mp; 4054 uint32_t byte_count = 0; 4055 4056 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4057 if (!pmb) { 4058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4059 "0542 lpfc_create_static_vport failed to" 4060 " allocate mailbox memory\n"); 4061 return; 4062 } 4063 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 4064 mb = &pmb->u.mb; 4065 4066 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 4067 if (!vport_info) { 4068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4069 "0543 lpfc_create_static_vport failed to" 4070 " allocate vport_info\n"); 4071 mempool_free(pmb, phba->mbox_mem_pool); 4072 return; 4073 } 4074 4075 vport_buff = (uint8_t *) vport_info; 4076 do { 4077 /* While loop iteration forces a free dma buffer from 4078 * the previous loop because the mbox is reused and 4079 * the dump routine is a single-use construct. 4080 */ 4081 if (pmb->ctx_buf) { 4082 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 4083 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4084 kfree(mp); 4085 pmb->ctx_buf = NULL; 4086 } 4087 if (lpfc_dump_static_vport(phba, pmb, offset)) 4088 goto out; 4089 4090 pmb->vport = phba->pport; 4091 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, 4092 LPFC_MBOX_TMO); 4093 4094 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { 4095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4096 "0544 lpfc_create_static_vport failed to" 4097 " issue dump mailbox command ret 0x%x " 4098 "status 0x%x\n", 4099 mbx_wait_rc, mb->mbxStatus); 4100 goto out; 4101 } 4102 4103 if (phba->sli_rev == LPFC_SLI_REV4) { 4104 byte_count = pmb->u.mqe.un.mb_words[5]; 4105 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 4106 if (byte_count > sizeof(struct static_vport_info) - 4107 offset) 4108 byte_count = sizeof(struct static_vport_info) 4109 - offset; 4110 memcpy(vport_buff + offset, mp->virt, byte_count); 4111 offset += byte_count; 4112 } else { 4113 if (mb->un.varDmp.word_cnt > 4114 sizeof(struct static_vport_info) - offset) 4115 mb->un.varDmp.word_cnt = 4116 sizeof(struct static_vport_info) 4117 - offset; 4118 byte_count = mb->un.varDmp.word_cnt; 4119 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 4120 vport_buff + offset, 4121 byte_count); 4122 4123 offset += byte_count; 4124 } 4125 4126 } while (byte_count && 4127 offset < sizeof(struct static_vport_info)); 4128 4129 4130 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 4131 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 4132 != VPORT_INFO_REV)) { 4133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4134 "0545 lpfc_create_static_vport bad" 4135 " information header 0x%x 0x%x\n", 4136 le32_to_cpu(vport_info->signature), 4137 le32_to_cpu(vport_info->rev) & 4138 VPORT_INFO_REV_MASK); 4139 4140 goto out; 4141 } 4142 4143 shost = lpfc_shost_from_vport(phba->pport); 4144 4145 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { 4146 memset(&vport_id, 0, sizeof(vport_id)); 4147 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); 4148 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); 4149 if (!vport_id.port_name || !vport_id.node_name) 4150 continue; 4151 4152 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 4153 vport_id.vport_type = FC_PORTTYPE_NPIV; 4154 vport_id.disable = false; 4155 new_fc_vport = fc_vport_create(shost, 0, &vport_id); 4156 4157 if (!new_fc_vport) { 4158 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4159 "0546 lpfc_create_static_vport failed to" 4160 " create vport\n"); 4161 continue; 4162 } 4163 4164 vport = *(struct lpfc_vport **)new_fc_vport->dd_data; 4165 vport->vport_flag |= STATIC_VPORT; 4166 } 4167 4168 out: 4169 kfree(vport_info); 4170 if (mbx_wait_rc != MBX_TIMEOUT) 4171 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4172 } 4173 4174 /* 4175 * This routine handles processing a Fabric REG_LOGIN mailbox 4176 * command upon completion. It is setup in the LPFC_MBOXQ 4177 * as the completion routine when the command is 4178 * handed off to the SLI layer. 4179 */ 4180 void 4181 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4182 { 4183 struct lpfc_vport *vport = pmb->vport; 4184 MAILBOX_t *mb = &pmb->u.mb; 4185 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4186 struct Scsi_Host *shost; 4187 4188 pmb->ctx_ndlp = NULL; 4189 4190 if (mb->mbxStatus) { 4191 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4192 "0258 Register Fabric login error: 0x%x\n", 4193 mb->mbxStatus); 4194 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4195 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4196 /* FLOGI failed, use loop map to make discovery list */ 4197 lpfc_disc_list_loopmap(vport); 4198 4199 /* Start discovery */ 4200 lpfc_disc_start(vport); 4201 /* Decrement the reference count to ndlp after the 4202 * reference to the ndlp are done. 4203 */ 4204 lpfc_nlp_put(ndlp); 4205 return; 4206 } 4207 4208 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4209 /* Decrement the reference count to ndlp after the reference 4210 * to the ndlp are done. 4211 */ 4212 lpfc_nlp_put(ndlp); 4213 return; 4214 } 4215 4216 if (phba->sli_rev < LPFC_SLI_REV4) 4217 ndlp->nlp_rpi = mb->un.varWords[0]; 4218 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4219 ndlp->nlp_type |= NLP_FABRIC; 4220 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4221 4222 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 4223 /* when physical port receive logo donot start 4224 * vport discovery */ 4225 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 4226 lpfc_start_fdiscs(phba); 4227 else { 4228 shost = lpfc_shost_from_vport(vport); 4229 spin_lock_irq(shost->host_lock); 4230 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; 4231 spin_unlock_irq(shost->host_lock); 4232 } 4233 lpfc_do_scr_ns_plogi(phba, vport); 4234 } 4235 4236 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4237 4238 /* Drop the reference count from the mbox at the end after 4239 * all the current reference to the ndlp have been done. 4240 */ 4241 lpfc_nlp_put(ndlp); 4242 return; 4243 } 4244 4245 /* 4246 * This routine will issue a GID_FT for each FC4 Type supported 4247 * by the driver. ALL GID_FTs must complete before discovery is started. 4248 */ 4249 int 4250 lpfc_issue_gidft(struct lpfc_vport *vport) 4251 { 4252 /* Good status, issue CT Request to NameServer */ 4253 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4254 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { 4255 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { 4256 /* Cannot issue NameServer FCP Query, so finish up 4257 * discovery 4258 */ 4259 lpfc_printf_vlog(vport, KERN_ERR, 4260 LOG_TRACE_EVENT, 4261 "0604 %s FC TYPE %x %s\n", 4262 "Failed to issue GID_FT to ", 4263 FC_TYPE_FCP, 4264 "Finishing discovery."); 4265 return 0; 4266 } 4267 vport->gidft_inp++; 4268 } 4269 4270 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4271 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 4272 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { 4273 /* Cannot issue NameServer NVME Query, so finish up 4274 * discovery 4275 */ 4276 lpfc_printf_vlog(vport, KERN_ERR, 4277 LOG_TRACE_EVENT, 4278 "0605 %s FC_TYPE %x %s %d\n", 4279 "Failed to issue GID_FT to ", 4280 FC_TYPE_NVME, 4281 "Finishing discovery: gidftinp ", 4282 vport->gidft_inp); 4283 if (vport->gidft_inp == 0) 4284 return 0; 4285 } else 4286 vport->gidft_inp++; 4287 } 4288 return vport->gidft_inp; 4289 } 4290 4291 /** 4292 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports 4293 * @vport: The virtual port for which this call is being executed. 4294 * 4295 * This routine will issue a GID_PT to get a list of all N_Ports 4296 * 4297 * Return value : 4298 * 0 - Failure to issue a GID_PT 4299 * 1 - GID_PT issued 4300 **/ 4301 int 4302 lpfc_issue_gidpt(struct lpfc_vport *vport) 4303 { 4304 /* Good status, issue CT Request to NameServer */ 4305 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { 4306 /* Cannot issue NameServer FCP Query, so finish up 4307 * discovery 4308 */ 4309 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4310 "0606 %s Port TYPE %x %s\n", 4311 "Failed to issue GID_PT to ", 4312 GID_PT_N_PORT, 4313 "Finishing discovery."); 4314 return 0; 4315 } 4316 vport->gidft_inp++; 4317 return 1; 4318 } 4319 4320 /* 4321 * This routine handles processing a NameServer REG_LOGIN mailbox 4322 * command upon completion. It is setup in the LPFC_MBOXQ 4323 * as the completion routine when the command is 4324 * handed off to the SLI layer. 4325 */ 4326 void 4327 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4328 { 4329 MAILBOX_t *mb = &pmb->u.mb; 4330 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4331 struct lpfc_vport *vport = pmb->vport; 4332 int rc; 4333 4334 pmb->ctx_ndlp = NULL; 4335 vport->gidft_inp = 0; 4336 4337 if (mb->mbxStatus) { 4338 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4339 "0260 Register NameServer error: 0x%x\n", 4340 mb->mbxStatus); 4341 4342 out: 4343 /* decrement the node reference count held for this 4344 * callback function. 4345 */ 4346 lpfc_nlp_put(ndlp); 4347 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4348 4349 /* If the node is not registered with the scsi or nvme 4350 * transport, remove the fabric node. The failed reg_login 4351 * is terminal. 4352 */ 4353 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 4354 spin_lock_irq(&ndlp->lock); 4355 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4356 spin_unlock_irq(&ndlp->lock); 4357 lpfc_nlp_not_used(ndlp); 4358 } 4359 4360 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4361 /* 4362 * RegLogin failed, use loop map to make discovery 4363 * list 4364 */ 4365 lpfc_disc_list_loopmap(vport); 4366 4367 /* Start discovery */ 4368 lpfc_disc_start(vport); 4369 return; 4370 } 4371 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4372 return; 4373 } 4374 4375 if (phba->sli_rev < LPFC_SLI_REV4) 4376 ndlp->nlp_rpi = mb->un.varWords[0]; 4377 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4378 ndlp->nlp_type |= NLP_FABRIC; 4379 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4380 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 4381 "0003 rpi:%x DID:%x flg:%x %d x%px\n", 4382 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4383 kref_read(&ndlp->kref), 4384 ndlp); 4385 4386 if (vport->port_state < LPFC_VPORT_READY) { 4387 /* Link up discovery requires Fabric registration. */ 4388 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 4389 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 4390 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4391 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 4392 4393 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4394 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) 4395 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); 4396 4397 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4398 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) 4399 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 4400 FC_TYPE_NVME); 4401 4402 /* Issue SCR just before NameServer GID_FT Query */ 4403 lpfc_issue_els_scr(vport, 0); 4404 4405 /* Link was bounced or a Fabric LOGO occurred. Start EDC 4406 * with initial FW values provided the congestion mode is 4407 * not off. Note that signals may or may not be supported 4408 * by the adapter but FPIN is provided by default for 1 4409 * or both missing signals support. 4410 */ 4411 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 4412 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 4413 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 4414 rc = lpfc_issue_els_edc(vport, 0); 4415 lpfc_printf_log(phba, KERN_INFO, 4416 LOG_INIT | LOG_ELS | LOG_DISCOVERY, 4417 "4220 Issue EDC status x%x Data x%x\n", 4418 rc, phba->cgn_init_reg_signal); 4419 } else if (phba->lmt & LMT_64Gb) { 4420 /* may send link fault capability descriptor */ 4421 lpfc_issue_els_edc(vport, 0); 4422 } else { 4423 lpfc_issue_els_rdf(vport, 0); 4424 } 4425 } 4426 4427 vport->fc_ns_retry = 0; 4428 if (lpfc_issue_gidft(vport) == 0) 4429 goto out; 4430 4431 /* 4432 * At this point in time we may need to wait for multiple 4433 * SLI_CTNS_GID_FT CT commands to complete before we start discovery. 4434 * 4435 * decrement the node reference count held for this 4436 * callback function. 4437 */ 4438 lpfc_nlp_put(ndlp); 4439 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4440 return; 4441 } 4442 4443 /* 4444 * This routine handles processing a Fabric Controller REG_LOGIN mailbox 4445 * command upon completion. It is setup in the LPFC_MBOXQ 4446 * as the completion routine when the command is handed off to the SLI layer. 4447 */ 4448 void 4449 lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4450 { 4451 struct lpfc_vport *vport = pmb->vport; 4452 MAILBOX_t *mb = &pmb->u.mb; 4453 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4454 4455 pmb->ctx_ndlp = NULL; 4456 if (mb->mbxStatus) { 4457 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4458 "0933 %s: Register FC login error: 0x%x\n", 4459 __func__, mb->mbxStatus); 4460 goto out; 4461 } 4462 4463 lpfc_check_nlp_post_devloss(vport, ndlp); 4464 4465 if (phba->sli_rev < LPFC_SLI_REV4) 4466 ndlp->nlp_rpi = mb->un.varWords[0]; 4467 4468 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4469 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n", 4470 __func__, ndlp->nlp_DID, ndlp->nlp_rpi, 4471 ndlp->nlp_state); 4472 4473 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4474 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4475 ndlp->nlp_type |= NLP_FABRIC; 4476 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4477 4478 out: 4479 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4480 4481 /* Drop the reference count from the mbox at the end after 4482 * all the current reference to the ndlp have been done. 4483 */ 4484 lpfc_nlp_put(ndlp); 4485 } 4486 4487 static void 4488 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4489 { 4490 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4491 struct fc_rport *rport; 4492 struct lpfc_rport_data *rdata; 4493 struct fc_rport_identifiers rport_ids; 4494 struct lpfc_hba *phba = vport->phba; 4495 unsigned long flags; 4496 4497 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) 4498 return; 4499 4500 /* Remote port has reappeared. Re-register w/ FC transport */ 4501 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 4502 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 4503 rport_ids.port_id = ndlp->nlp_DID; 4504 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 4505 4506 4507 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 4508 "rport add: did:x%x flg:x%x type x%x", 4509 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 4510 4511 /* Don't add the remote port if unloading. */ 4512 if (vport->load_flag & FC_UNLOADING) 4513 return; 4514 4515 /* 4516 * Disassociate any older association between this ndlp and rport 4517 */ 4518 if (ndlp->rport) { 4519 rdata = ndlp->rport->dd_data; 4520 rdata->pnode = NULL; 4521 } 4522 4523 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 4524 if (!rport) { 4525 dev_printk(KERN_WARNING, &phba->pcidev->dev, 4526 "Warning: fc_remote_port_add failed\n"); 4527 return; 4528 } 4529 4530 /* Successful port add. Complete initializing node data */ 4531 rport->maxframe_size = ndlp->nlp_maxframe; 4532 rport->supported_classes = ndlp->nlp_class_sup; 4533 rdata = rport->dd_data; 4534 rdata->pnode = lpfc_nlp_get(ndlp); 4535 if (!rdata->pnode) { 4536 dev_warn(&phba->pcidev->dev, 4537 "Warning - node ref failed. Unreg rport\n"); 4538 fc_remote_port_delete(rport); 4539 ndlp->rport = NULL; 4540 return; 4541 } 4542 4543 spin_lock_irqsave(&ndlp->lock, flags); 4544 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; 4545 spin_unlock_irqrestore(&ndlp->lock, flags); 4546 4547 if (ndlp->nlp_type & NLP_FCP_TARGET) 4548 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; 4549 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 4550 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 4551 if (ndlp->nlp_type & NLP_NVME_INITIATOR) 4552 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; 4553 if (ndlp->nlp_type & NLP_NVME_TARGET) 4554 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; 4555 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) 4556 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; 4557 4558 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 4559 fc_remote_port_rolechg(rport, rport_ids.roles); 4560 4561 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4562 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n", 4563 __func__, rport, rport->port_id, rport->roles, 4564 kref_read(&ndlp->kref)); 4565 4566 if ((rport->scsi_target_id != -1) && 4567 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 4568 ndlp->nlp_sid = rport->scsi_target_id; 4569 } 4570 4571 return; 4572 } 4573 4574 static void 4575 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 4576 { 4577 struct fc_rport *rport = ndlp->rport; 4578 struct lpfc_vport *vport = ndlp->vport; 4579 4580 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) 4581 return; 4582 4583 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 4584 "rport delete: did:x%x flg:x%x type x%x", 4585 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 4586 4587 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4588 "3184 rport unregister x%06x, rport x%px " 4589 "xptflg x%x refcnt %d\n", 4590 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, 4591 kref_read(&ndlp->kref)); 4592 4593 fc_remote_port_delete(rport); 4594 lpfc_nlp_put(ndlp); 4595 } 4596 4597 static void 4598 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 4599 { 4600 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4601 unsigned long iflags; 4602 4603 spin_lock_irqsave(shost->host_lock, iflags); 4604 switch (state) { 4605 case NLP_STE_UNUSED_NODE: 4606 vport->fc_unused_cnt += count; 4607 break; 4608 case NLP_STE_PLOGI_ISSUE: 4609 vport->fc_plogi_cnt += count; 4610 break; 4611 case NLP_STE_ADISC_ISSUE: 4612 vport->fc_adisc_cnt += count; 4613 break; 4614 case NLP_STE_REG_LOGIN_ISSUE: 4615 vport->fc_reglogin_cnt += count; 4616 break; 4617 case NLP_STE_PRLI_ISSUE: 4618 vport->fc_prli_cnt += count; 4619 break; 4620 case NLP_STE_UNMAPPED_NODE: 4621 vport->fc_unmap_cnt += count; 4622 break; 4623 case NLP_STE_MAPPED_NODE: 4624 vport->fc_map_cnt += count; 4625 break; 4626 case NLP_STE_NPR_NODE: 4627 if (vport->fc_npr_cnt == 0 && count == -1) 4628 vport->fc_npr_cnt = 0; 4629 else 4630 vport->fc_npr_cnt += count; 4631 break; 4632 } 4633 spin_unlock_irqrestore(shost->host_lock, iflags); 4634 } 4635 4636 /* Register a node with backend if not already done */ 4637 void 4638 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4639 { 4640 unsigned long iflags; 4641 4642 lpfc_check_nlp_post_devloss(vport, ndlp); 4643 4644 spin_lock_irqsave(&ndlp->lock, iflags); 4645 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { 4646 /* Already registered with backend, trigger rescan */ 4647 spin_unlock_irqrestore(&ndlp->lock, iflags); 4648 4649 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && 4650 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { 4651 lpfc_nvme_rescan_port(vport, ndlp); 4652 } 4653 return; 4654 } 4655 4656 ndlp->fc4_xpt_flags |= NLP_XPT_REGD; 4657 spin_unlock_irqrestore(&ndlp->lock, iflags); 4658 4659 if (lpfc_valid_xpt_node(ndlp)) { 4660 vport->phba->nport_event_cnt++; 4661 /* 4662 * Tell the fc transport about the port, if we haven't 4663 * already. If we have, and it's a scsi entity, be 4664 */ 4665 lpfc_register_remote_port(vport, ndlp); 4666 } 4667 4668 /* We are done if we do not have any NVME remote node */ 4669 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) 4670 return; 4671 4672 /* Notify the NVME transport of this new rport. */ 4673 if (vport->phba->sli_rev >= LPFC_SLI_REV4 && 4674 ndlp->nlp_fc4_type & NLP_FC4_NVME) { 4675 if (vport->phba->nvmet_support == 0) { 4676 /* Register this rport with the transport. 4677 * Only NVME Target Rports are registered with 4678 * the transport. 4679 */ 4680 if (ndlp->nlp_type & NLP_NVME_TARGET) { 4681 vport->phba->nport_event_cnt++; 4682 lpfc_nvme_register_port(vport, ndlp); 4683 } 4684 } else { 4685 /* Just take an NDLP ref count since the 4686 * target does not register rports. 4687 */ 4688 lpfc_nlp_get(ndlp); 4689 } 4690 } 4691 } 4692 4693 /* Unregister a node with backend if not already done */ 4694 void 4695 lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4696 { 4697 unsigned long iflags; 4698 4699 spin_lock_irqsave(&ndlp->lock, iflags); 4700 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { 4701 spin_unlock_irqrestore(&ndlp->lock, iflags); 4702 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4703 "0999 %s Not regd: ndlp x%px rport x%px DID " 4704 "x%x FLG x%x XPT x%x\n", 4705 __func__, ndlp, ndlp->rport, ndlp->nlp_DID, 4706 ndlp->nlp_flag, ndlp->fc4_xpt_flags); 4707 return; 4708 } 4709 4710 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; 4711 spin_unlock_irqrestore(&ndlp->lock, iflags); 4712 4713 if (ndlp->rport && 4714 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { 4715 vport->phba->nport_event_cnt++; 4716 lpfc_unregister_remote_port(ndlp); 4717 } else if (!ndlp->rport) { 4718 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4719 "1999 %s NDLP in devloss x%px DID x%x FLG x%x" 4720 " XPT x%x refcnt %d\n", 4721 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, 4722 ndlp->fc4_xpt_flags, 4723 kref_read(&ndlp->kref)); 4724 } 4725 4726 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { 4727 vport->phba->nport_event_cnt++; 4728 if (vport->phba->nvmet_support == 0) { 4729 /* Start devloss if target. */ 4730 if (ndlp->nlp_type & NLP_NVME_TARGET) 4731 lpfc_nvme_unregister_port(vport, ndlp); 4732 } else { 4733 /* NVMET has no upcall. */ 4734 lpfc_nlp_put(ndlp); 4735 } 4736 } 4737 4738 } 4739 4740 /* 4741 * Adisc state change handling 4742 */ 4743 static void 4744 lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4745 int new_state) 4746 { 4747 switch (new_state) { 4748 /* 4749 * Any state to ADISC_ISSUE 4750 * Do nothing, adisc cmpl handling will trigger state changes 4751 */ 4752 case NLP_STE_ADISC_ISSUE: 4753 break; 4754 4755 /* 4756 * ADISC_ISSUE to mapped states 4757 * Trigger a registration with backend, it will be nop if 4758 * already registered 4759 */ 4760 case NLP_STE_UNMAPPED_NODE: 4761 ndlp->nlp_type |= NLP_FC_NODE; 4762 fallthrough; 4763 case NLP_STE_MAPPED_NODE: 4764 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4765 lpfc_nlp_reg_node(vport, ndlp); 4766 break; 4767 4768 /* 4769 * ADISC_ISSUE to non-mapped states 4770 * We are moving from ADISC_ISSUE to a non-mapped state because 4771 * ADISC failed, we would have skipped unregistering with 4772 * backend, attempt it now 4773 */ 4774 case NLP_STE_NPR_NODE: 4775 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 4776 fallthrough; 4777 default: 4778 lpfc_nlp_unreg_node(vport, ndlp); 4779 break; 4780 } 4781 4782 } 4783 4784 static void 4785 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4786 int old_state, int new_state) 4787 { 4788 /* Trap ADISC changes here */ 4789 if (new_state == NLP_STE_ADISC_ISSUE || 4790 old_state == NLP_STE_ADISC_ISSUE) { 4791 lpfc_handle_adisc_state(vport, ndlp, new_state); 4792 return; 4793 } 4794 4795 if (new_state == NLP_STE_UNMAPPED_NODE) { 4796 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4797 ndlp->nlp_type |= NLP_FC_NODE; 4798 } 4799 if (new_state == NLP_STE_MAPPED_NODE) 4800 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4801 if (new_state == NLP_STE_NPR_NODE) 4802 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 4803 4804 /* Reg/Unreg for FCP and NVME Transport interface */ 4805 if ((old_state == NLP_STE_MAPPED_NODE || 4806 old_state == NLP_STE_UNMAPPED_NODE)) { 4807 /* For nodes marked for ADISC, Handle unreg in ADISC cmpl 4808 * if linkup. In linkdown do unreg_node 4809 */ 4810 if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || 4811 !lpfc_is_link_up(vport->phba)) 4812 lpfc_nlp_unreg_node(vport, ndlp); 4813 } 4814 4815 if (new_state == NLP_STE_MAPPED_NODE || 4816 new_state == NLP_STE_UNMAPPED_NODE) 4817 lpfc_nlp_reg_node(vport, ndlp); 4818 4819 /* 4820 * If the node just added to Mapped list was an FCP target, 4821 * but the remote port registration failed or assigned a target 4822 * id outside the presentable range - move the node to the 4823 * Unmapped List. 4824 */ 4825 if ((new_state == NLP_STE_MAPPED_NODE) && 4826 (ndlp->nlp_type & NLP_FCP_TARGET) && 4827 (!ndlp->rport || 4828 ndlp->rport->scsi_target_id == -1 || 4829 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 4830 spin_lock_irq(&ndlp->lock); 4831 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 4832 spin_unlock_irq(&ndlp->lock); 4833 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4834 } 4835 } 4836 4837 static char * 4838 lpfc_nlp_state_name(char *buffer, size_t size, int state) 4839 { 4840 static char *states[] = { 4841 [NLP_STE_UNUSED_NODE] = "UNUSED", 4842 [NLP_STE_PLOGI_ISSUE] = "PLOGI", 4843 [NLP_STE_ADISC_ISSUE] = "ADISC", 4844 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 4845 [NLP_STE_PRLI_ISSUE] = "PRLI", 4846 [NLP_STE_LOGO_ISSUE] = "LOGO", 4847 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4848 [NLP_STE_MAPPED_NODE] = "MAPPED", 4849 [NLP_STE_NPR_NODE] = "NPR", 4850 }; 4851 4852 if (state < NLP_STE_MAX_STATE && states[state]) 4853 strlcpy(buffer, states[state], size); 4854 else 4855 snprintf(buffer, size, "unknown (%d)", state); 4856 return buffer; 4857 } 4858 4859 void 4860 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4861 int state) 4862 { 4863 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4864 int old_state = ndlp->nlp_state; 4865 int node_dropped = ndlp->nlp_flag & NLP_DROPPED; 4866 char name1[16], name2[16]; 4867 4868 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4869 "0904 NPort state transition x%06x, %s -> %s\n", 4870 ndlp->nlp_DID, 4871 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 4872 lpfc_nlp_state_name(name2, sizeof(name2), state)); 4873 4874 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4875 "node statechg did:x%x old:%d ste:%d", 4876 ndlp->nlp_DID, old_state, state); 4877 4878 if (node_dropped && old_state == NLP_STE_UNUSED_NODE && 4879 state != NLP_STE_UNUSED_NODE) { 4880 ndlp->nlp_flag &= ~NLP_DROPPED; 4881 lpfc_nlp_get(ndlp); 4882 } 4883 4884 if (old_state == NLP_STE_NPR_NODE && 4885 state != NLP_STE_NPR_NODE) 4886 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4887 if (old_state == NLP_STE_UNMAPPED_NODE) { 4888 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 4889 ndlp->nlp_type &= ~NLP_FC_NODE; 4890 } 4891 4892 if (list_empty(&ndlp->nlp_listp)) { 4893 spin_lock_irq(shost->host_lock); 4894 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4895 spin_unlock_irq(shost->host_lock); 4896 } else if (old_state) 4897 lpfc_nlp_counters(vport, old_state, -1); 4898 4899 ndlp->nlp_state = state; 4900 lpfc_nlp_counters(vport, state, 1); 4901 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); 4902 } 4903 4904 void 4905 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4906 { 4907 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4908 4909 if (list_empty(&ndlp->nlp_listp)) { 4910 spin_lock_irq(shost->host_lock); 4911 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4912 spin_unlock_irq(shost->host_lock); 4913 } 4914 } 4915 4916 void 4917 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4918 { 4919 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4920 4921 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4922 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4923 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4924 spin_lock_irq(shost->host_lock); 4925 list_del_init(&ndlp->nlp_listp); 4926 spin_unlock_irq(shost->host_lock); 4927 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4928 NLP_STE_UNUSED_NODE); 4929 } 4930 4931 /** 4932 * lpfc_initialize_node - Initialize all fields of node object 4933 * @vport: Pointer to Virtual Port object. 4934 * @ndlp: Pointer to FC node object. 4935 * @did: FC_ID of the node. 4936 * 4937 * This function is always called when node object need to be initialized. 4938 * It initializes all the fields of the node object. Although the reference 4939 * to phba from @ndlp can be obtained indirectly through it's reference to 4940 * @vport, a direct reference to phba is taken here by @ndlp. This is due 4941 * to the life-span of the @ndlp might go beyond the existence of @vport as 4942 * the final release of ndlp is determined by its reference count. And, the 4943 * operation on @ndlp needs the reference to phba. 4944 **/ 4945 static inline void 4946 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4947 uint32_t did) 4948 { 4949 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4950 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4951 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); 4952 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); 4953 4954 ndlp->nlp_DID = did; 4955 ndlp->vport = vport; 4956 ndlp->phba = vport->phba; 4957 ndlp->nlp_sid = NLP_NO_SID; 4958 ndlp->nlp_fc4_type = NLP_FC4_NONE; 4959 kref_init(&ndlp->kref); 4960 atomic_set(&ndlp->cmd_pending, 0); 4961 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4962 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 4963 } 4964 4965 void 4966 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4967 { 4968 /* 4969 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should 4970 * be used if we wish to issue the "last" lpfc_nlp_put() to remove 4971 * the ndlp from the vport. The ndlp marked as UNUSED on the list 4972 * until ALL other outstanding threads have completed. We check 4973 * that the ndlp not already in the UNUSED state before we proceed. 4974 */ 4975 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 4976 return; 4977 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 4978 ndlp->nlp_flag |= NLP_DROPPED; 4979 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4980 lpfc_cleanup_vports_rrqs(vport, ndlp); 4981 lpfc_unreg_rpi(vport, ndlp); 4982 } 4983 4984 lpfc_nlp_put(ndlp); 4985 return; 4986 } 4987 4988 /* 4989 * Start / ReStart rescue timer for Discovery / RSCN handling 4990 */ 4991 void 4992 lpfc_set_disctmo(struct lpfc_vport *vport) 4993 { 4994 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4995 struct lpfc_hba *phba = vport->phba; 4996 uint32_t tmo; 4997 4998 if (vport->port_state == LPFC_LOCAL_CFG_LINK) { 4999 /* For FAN, timeout should be greater than edtov */ 5000 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 5001 } else { 5002 /* Normal discovery timeout should be > than ELS/CT timeout 5003 * FC spec states we need 3 * ratov for CT requests 5004 */ 5005 tmo = ((phba->fc_ratov * 3) + 3); 5006 } 5007 5008 5009 if (!timer_pending(&vport->fc_disctmo)) { 5010 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5011 "set disc timer: tmo:x%x state:x%x flg:x%x", 5012 tmo, vport->port_state, vport->fc_flag); 5013 } 5014 5015 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); 5016 spin_lock_irq(shost->host_lock); 5017 vport->fc_flag |= FC_DISC_TMO; 5018 spin_unlock_irq(shost->host_lock); 5019 5020 /* Start Discovery Timer state <hba_state> */ 5021 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5022 "0247 Start Discovery Timer state x%x " 5023 "Data: x%x x%lx x%x x%x\n", 5024 vport->port_state, tmo, 5025 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 5026 vport->fc_adisc_cnt); 5027 5028 return; 5029 } 5030 5031 /* 5032 * Cancel rescue timer for Discovery / RSCN handling 5033 */ 5034 int 5035 lpfc_can_disctmo(struct lpfc_vport *vport) 5036 { 5037 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5038 unsigned long iflags; 5039 5040 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5041 "can disc timer: state:x%x rtry:x%x flg:x%x", 5042 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 5043 5044 /* Turn off discovery timer if its running */ 5045 if (vport->fc_flag & FC_DISC_TMO || 5046 timer_pending(&vport->fc_disctmo)) { 5047 spin_lock_irqsave(shost->host_lock, iflags); 5048 vport->fc_flag &= ~FC_DISC_TMO; 5049 spin_unlock_irqrestore(shost->host_lock, iflags); 5050 del_timer_sync(&vport->fc_disctmo); 5051 spin_lock_irqsave(&vport->work_port_lock, iflags); 5052 vport->work_port_events &= ~WORKER_DISC_TMO; 5053 spin_unlock_irqrestore(&vport->work_port_lock, iflags); 5054 } 5055 5056 /* Cancel Discovery Timer state <hba_state> */ 5057 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5058 "0248 Cancel Discovery Timer state x%x " 5059 "Data: x%x x%x x%x\n", 5060 vport->port_state, vport->fc_flag, 5061 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 5062 return 0; 5063 } 5064 5065 /* 5066 * Check specified ring for outstanding IOCB on the SLI queue 5067 * Return true if iocb matches the specified nport 5068 */ 5069 int 5070 lpfc_check_sli_ndlp(struct lpfc_hba *phba, 5071 struct lpfc_sli_ring *pring, 5072 struct lpfc_iocbq *iocb, 5073 struct lpfc_nodelist *ndlp) 5074 { 5075 struct lpfc_vport *vport = ndlp->vport; 5076 u8 ulp_command; 5077 u16 ulp_context; 5078 u32 remote_id; 5079 5080 if (iocb->vport != vport) 5081 return 0; 5082 5083 ulp_command = get_job_cmnd(phba, iocb); 5084 ulp_context = get_job_ulpcontext(phba, iocb); 5085 remote_id = get_job_els_rsp64_did(phba, iocb); 5086 5087 if (pring->ringno == LPFC_ELS_RING) { 5088 switch (ulp_command) { 5089 case CMD_GEN_REQUEST64_CR: 5090 if (iocb->ndlp == ndlp) 5091 return 1; 5092 fallthrough; 5093 case CMD_ELS_REQUEST64_CR: 5094 if (remote_id == ndlp->nlp_DID) 5095 return 1; 5096 fallthrough; 5097 case CMD_XMIT_ELS_RSP64_CX: 5098 if (iocb->ndlp == ndlp) 5099 return 1; 5100 } 5101 } else if (pring->ringno == LPFC_FCP_RING) { 5102 /* Skip match check if waiting to relogin to FCP target */ 5103 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5104 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 5105 return 0; 5106 } 5107 if (ulp_context == ndlp->nlp_rpi) 5108 return 1; 5109 } 5110 return 0; 5111 } 5112 5113 static void 5114 __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, 5115 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, 5116 struct list_head *dequeue_list) 5117 { 5118 struct lpfc_iocbq *iocb, *next_iocb; 5119 5120 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 5121 /* Check to see if iocb matches the nport */ 5122 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) 5123 /* match, dequeue */ 5124 list_move_tail(&iocb->list, dequeue_list); 5125 } 5126 } 5127 5128 static void 5129 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, 5130 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) 5131 { 5132 struct lpfc_sli *psli = &phba->sli; 5133 uint32_t i; 5134 5135 spin_lock_irq(&phba->hbalock); 5136 for (i = 0; i < psli->num_rings; i++) 5137 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], 5138 dequeue_list); 5139 spin_unlock_irq(&phba->hbalock); 5140 } 5141 5142 static void 5143 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, 5144 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) 5145 { 5146 struct lpfc_sli_ring *pring; 5147 struct lpfc_queue *qp = NULL; 5148 5149 spin_lock_irq(&phba->hbalock); 5150 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 5151 pring = qp->pring; 5152 if (!pring) 5153 continue; 5154 spin_lock(&pring->ring_lock); 5155 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); 5156 spin_unlock(&pring->ring_lock); 5157 } 5158 spin_unlock_irq(&phba->hbalock); 5159 } 5160 5161 /* 5162 * Free resources / clean up outstanding I/Os 5163 * associated with nlp_rpi in the LPFC_NODELIST entry. 5164 */ 5165 static int 5166 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5167 { 5168 LIST_HEAD(completions); 5169 5170 lpfc_fabric_abort_nport(ndlp); 5171 5172 /* 5173 * Everything that matches on txcmplq will be returned 5174 * by firmware with a no rpi error. 5175 */ 5176 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5177 if (phba->sli_rev != LPFC_SLI_REV4) 5178 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); 5179 else 5180 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); 5181 } 5182 5183 /* Cancel all the IOCBs from the completions list */ 5184 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 5185 IOERR_SLI_ABORTED); 5186 5187 return 0; 5188 } 5189 5190 /** 5191 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO 5192 * @phba: Pointer to HBA context object. 5193 * @pmb: Pointer to mailbox object. 5194 * 5195 * This function will issue an ELS LOGO command after completing 5196 * the UNREG_RPI. 5197 **/ 5198 static void 5199 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5200 { 5201 struct lpfc_vport *vport = pmb->vport; 5202 struct lpfc_nodelist *ndlp; 5203 5204 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp); 5205 if (!ndlp) 5206 return; 5207 lpfc_issue_els_logo(vport, ndlp, 0); 5208 5209 /* Check to see if there are any deferred events to process */ 5210 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 5211 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 5212 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5213 "1434 UNREG cmpl deferred logo x%x " 5214 "on NPort x%x Data: x%x x%px\n", 5215 ndlp->nlp_rpi, ndlp->nlp_DID, 5216 ndlp->nlp_defer_did, ndlp); 5217 5218 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5219 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 5220 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5221 } else { 5222 /* NLP_RELEASE_RPI is only set for SLI4 ports. */ 5223 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5224 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 5225 spin_lock_irq(&ndlp->lock); 5226 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5227 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5228 spin_unlock_irq(&ndlp->lock); 5229 } 5230 spin_lock_irq(&ndlp->lock); 5231 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5232 spin_unlock_irq(&ndlp->lock); 5233 } 5234 5235 /* The node has an outstanding reference for the unreg. Now 5236 * that the LOGO action and cleanup are finished, release 5237 * resources. 5238 */ 5239 lpfc_nlp_put(ndlp); 5240 mempool_free(pmb, phba->mbox_mem_pool); 5241 } 5242 5243 /* 5244 * Sets the mailbox completion handler to be used for the 5245 * unreg_rpi command. The handler varies based on the state of 5246 * the port and what will be happening to the rpi next. 5247 */ 5248 static void 5249 lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, 5250 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) 5251 { 5252 unsigned long iflags; 5253 5254 /* Driver always gets a reference on the mailbox job 5255 * in support of async jobs. 5256 */ 5257 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5258 if (!mbox->ctx_ndlp) 5259 return; 5260 5261 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { 5262 mbox->mbox_cmpl = lpfc_nlp_logo_unreg; 5263 5264 } else if (phba->sli_rev == LPFC_SLI_REV4 && 5265 (!(vport->load_flag & FC_UNLOADING)) && 5266 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 5267 LPFC_SLI_INTF_IF_TYPE_2) && 5268 (kref_read(&ndlp->kref) > 0)) { 5269 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; 5270 } else { 5271 if (vport->load_flag & FC_UNLOADING) { 5272 if (phba->sli_rev == LPFC_SLI_REV4) { 5273 spin_lock_irqsave(&ndlp->lock, iflags); 5274 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5275 spin_unlock_irqrestore(&ndlp->lock, iflags); 5276 } 5277 } 5278 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5279 } 5280 } 5281 5282 /* 5283 * Free rpi associated with LPFC_NODELIST entry. 5284 * This routine is called from lpfc_freenode(), when we are removing 5285 * a LPFC_NODELIST entry. It is also called if the driver initiates a 5286 * LOGO that completes successfully, and we are waiting to PLOGI back 5287 * to the remote NPort. In addition, it is called after we receive 5288 * and unsolicated ELS cmd, send back a rsp, the rsp completes and 5289 * we are waiting to PLOGI back to the remote NPort. 5290 */ 5291 int 5292 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5293 { 5294 struct lpfc_hba *phba = vport->phba; 5295 LPFC_MBOXQ_t *mbox; 5296 int rc, acc_plogi = 1; 5297 uint16_t rpi; 5298 5299 if (ndlp->nlp_flag & NLP_RPI_REGISTERED || 5300 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { 5301 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 5302 lpfc_printf_vlog(vport, KERN_INFO, 5303 LOG_NODE | LOG_DISCOVERY, 5304 "3366 RPI x%x needs to be " 5305 "unregistered nlp_flag x%x " 5306 "did x%x\n", 5307 ndlp->nlp_rpi, ndlp->nlp_flag, 5308 ndlp->nlp_DID); 5309 5310 /* If there is already an UNREG in progress for this ndlp, 5311 * no need to queue up another one. 5312 */ 5313 if (ndlp->nlp_flag & NLP_UNREG_INP) { 5314 lpfc_printf_vlog(vport, KERN_INFO, 5315 LOG_NODE | LOG_DISCOVERY, 5316 "1436 unreg_rpi SKIP UNREG x%x on " 5317 "NPort x%x deferred x%x flg x%x " 5318 "Data: x%px\n", 5319 ndlp->nlp_rpi, ndlp->nlp_DID, 5320 ndlp->nlp_defer_did, 5321 ndlp->nlp_flag, ndlp); 5322 goto out; 5323 } 5324 5325 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5326 if (mbox) { 5327 /* SLI4 ports require the physical rpi value. */ 5328 rpi = ndlp->nlp_rpi; 5329 if (phba->sli_rev == LPFC_SLI_REV4) 5330 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 5331 5332 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 5333 mbox->vport = vport; 5334 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); 5335 if (!mbox->ctx_ndlp) { 5336 mempool_free(mbox, phba->mbox_mem_pool); 5337 return 1; 5338 } 5339 5340 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) 5341 /* 5342 * accept PLOGIs after unreg_rpi_cmpl 5343 */ 5344 acc_plogi = 0; 5345 if (((ndlp->nlp_DID & Fabric_DID_MASK) != 5346 Fabric_DID_MASK) && 5347 (!(vport->fc_flag & FC_OFFLINE_MODE))) 5348 ndlp->nlp_flag |= NLP_UNREG_INP; 5349 5350 lpfc_printf_vlog(vport, KERN_INFO, 5351 LOG_NODE | LOG_DISCOVERY, 5352 "1433 unreg_rpi UNREG x%x on " 5353 "NPort x%x deferred flg x%x " 5354 "Data:x%px\n", 5355 ndlp->nlp_rpi, ndlp->nlp_DID, 5356 ndlp->nlp_flag, ndlp); 5357 5358 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5359 if (rc == MBX_NOT_FINISHED) { 5360 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5361 mempool_free(mbox, phba->mbox_mem_pool); 5362 acc_plogi = 1; 5363 lpfc_nlp_put(ndlp); 5364 } 5365 } else { 5366 lpfc_printf_vlog(vport, KERN_INFO, 5367 LOG_NODE | LOG_DISCOVERY, 5368 "1444 Failed to allocate mempool " 5369 "unreg_rpi UNREG x%x, " 5370 "DID x%x, flag x%x, " 5371 "ndlp x%px\n", 5372 ndlp->nlp_rpi, ndlp->nlp_DID, 5373 ndlp->nlp_flag, ndlp); 5374 5375 /* Because mempool_alloc failed, we 5376 * will issue a LOGO here and keep the rpi alive if 5377 * not unloading. 5378 */ 5379 if (!(vport->load_flag & FC_UNLOADING)) { 5380 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5381 lpfc_issue_els_logo(vport, ndlp, 0); 5382 ndlp->nlp_prev_state = ndlp->nlp_state; 5383 lpfc_nlp_set_state(vport, ndlp, 5384 NLP_STE_NPR_NODE); 5385 } 5386 5387 return 1; 5388 } 5389 lpfc_no_rpi(phba, ndlp); 5390 out: 5391 if (phba->sli_rev != LPFC_SLI_REV4) 5392 ndlp->nlp_rpi = 0; 5393 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 5394 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5395 if (acc_plogi) 5396 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5397 return 1; 5398 } 5399 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5400 return 0; 5401 } 5402 5403 /** 5404 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. 5405 * @phba: pointer to lpfc hba data structure. 5406 * 5407 * This routine is invoked to unregister all the currently registered RPIs 5408 * to the HBA. 5409 **/ 5410 void 5411 lpfc_unreg_hba_rpis(struct lpfc_hba *phba) 5412 { 5413 struct lpfc_vport **vports; 5414 struct lpfc_nodelist *ndlp; 5415 struct Scsi_Host *shost; 5416 int i; 5417 5418 vports = lpfc_create_vport_work_array(phba); 5419 if (!vports) { 5420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5421 "2884 Vport array allocation failed \n"); 5422 return; 5423 } 5424 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5425 shost = lpfc_shost_from_vport(vports[i]); 5426 spin_lock_irq(shost->host_lock); 5427 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5428 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5429 /* The mempool_alloc might sleep */ 5430 spin_unlock_irq(shost->host_lock); 5431 lpfc_unreg_rpi(vports[i], ndlp); 5432 spin_lock_irq(shost->host_lock); 5433 } 5434 } 5435 spin_unlock_irq(shost->host_lock); 5436 } 5437 lpfc_destroy_vport_work_array(phba, vports); 5438 } 5439 5440 void 5441 lpfc_unreg_all_rpis(struct lpfc_vport *vport) 5442 { 5443 struct lpfc_hba *phba = vport->phba; 5444 LPFC_MBOXQ_t *mbox; 5445 int rc; 5446 5447 if (phba->sli_rev == LPFC_SLI_REV4) { 5448 lpfc_sli4_unreg_all_rpis(vport); 5449 return; 5450 } 5451 5452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5453 if (mbox) { 5454 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, 5455 mbox); 5456 mbox->vport = vport; 5457 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5458 mbox->ctx_ndlp = NULL; 5459 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 5460 if (rc != MBX_TIMEOUT) 5461 mempool_free(mbox, phba->mbox_mem_pool); 5462 5463 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5464 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5465 "1836 Could not issue " 5466 "unreg_login(all_rpis) status %d\n", 5467 rc); 5468 } 5469 } 5470 5471 void 5472 lpfc_unreg_default_rpis(struct lpfc_vport *vport) 5473 { 5474 struct lpfc_hba *phba = vport->phba; 5475 LPFC_MBOXQ_t *mbox; 5476 int rc; 5477 5478 /* Unreg DID is an SLI3 operation. */ 5479 if (phba->sli_rev > LPFC_SLI_REV3) 5480 return; 5481 5482 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5483 if (mbox) { 5484 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, 5485 mbox); 5486 mbox->vport = vport; 5487 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5488 mbox->ctx_ndlp = NULL; 5489 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 5490 if (rc != MBX_TIMEOUT) 5491 mempool_free(mbox, phba->mbox_mem_pool); 5492 5493 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5495 "1815 Could not issue " 5496 "unreg_did (default rpis) status %d\n", 5497 rc); 5498 } 5499 } 5500 5501 /* 5502 * Free resources associated with LPFC_NODELIST entry 5503 * so it can be freed. 5504 */ 5505 static int 5506 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5507 { 5508 struct lpfc_hba *phba = vport->phba; 5509 LPFC_MBOXQ_t *mb, *nextmb; 5510 5511 /* Cleanup node for NPort <nlp_DID> */ 5512 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5513 "0900 Cleanup node for NPort x%x " 5514 "Data: x%x x%x x%x\n", 5515 ndlp->nlp_DID, ndlp->nlp_flag, 5516 ndlp->nlp_state, ndlp->nlp_rpi); 5517 lpfc_dequeue_node(vport, ndlp); 5518 5519 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ 5520 5521 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 5522 if ((mb = phba->sli.mbox_active)) { 5523 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5524 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5525 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { 5526 mb->ctx_ndlp = NULL; 5527 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5528 } 5529 } 5530 5531 spin_lock_irq(&phba->hbalock); 5532 /* Cleanup REG_LOGIN completions which are not yet processed */ 5533 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 5534 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 5535 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || 5536 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp)) 5537 continue; 5538 5539 mb->ctx_ndlp = NULL; 5540 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5541 } 5542 5543 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 5544 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5545 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5546 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) { 5547 list_del(&mb->list); 5548 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); 5549 5550 /* Don't invoke lpfc_nlp_put. The driver is in 5551 * lpfc_nlp_release context. 5552 */ 5553 } 5554 } 5555 spin_unlock_irq(&phba->hbalock); 5556 5557 lpfc_els_abort(phba, ndlp); 5558 5559 spin_lock_irq(&ndlp->lock); 5560 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 5561 spin_unlock_irq(&ndlp->lock); 5562 5563 ndlp->nlp_last_elscmd = 0; 5564 del_timer_sync(&ndlp->nlp_delayfunc); 5565 5566 list_del_init(&ndlp->els_retry_evt.evt_listp); 5567 list_del_init(&ndlp->dev_loss_evt.evt_listp); 5568 list_del_init(&ndlp->recovery_evt.evt_listp); 5569 lpfc_cleanup_vports_rrqs(vport, ndlp); 5570 5571 if (phba->sli_rev == LPFC_SLI_REV4) 5572 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5573 5574 return 0; 5575 } 5576 5577 static int 5578 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5579 uint32_t did) 5580 { 5581 D_ID mydid, ndlpdid, matchdid; 5582 5583 if (did == Bcast_DID) 5584 return 0; 5585 5586 /* First check for Direct match */ 5587 if (ndlp->nlp_DID == did) 5588 return 1; 5589 5590 /* Next check for area/domain identically equals 0 match */ 5591 mydid.un.word = vport->fc_myDID; 5592 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 5593 return 0; 5594 } 5595 5596 matchdid.un.word = did; 5597 ndlpdid.un.word = ndlp->nlp_DID; 5598 if (matchdid.un.b.id == ndlpdid.un.b.id) { 5599 if ((mydid.un.b.domain == matchdid.un.b.domain) && 5600 (mydid.un.b.area == matchdid.un.b.area)) { 5601 /* This code is supposed to match the ID 5602 * for a private loop device that is 5603 * connect to fl_port. But we need to 5604 * check that the port did not just go 5605 * from pt2pt to fabric or we could end 5606 * up matching ndlp->nlp_DID 000001 to 5607 * fabric DID 0x20101 5608 */ 5609 if ((ndlpdid.un.b.domain == 0) && 5610 (ndlpdid.un.b.area == 0)) { 5611 if (ndlpdid.un.b.id && 5612 vport->phba->fc_topology == 5613 LPFC_TOPOLOGY_LOOP) 5614 return 1; 5615 } 5616 return 0; 5617 } 5618 5619 matchdid.un.word = ndlp->nlp_DID; 5620 if ((mydid.un.b.domain == ndlpdid.un.b.domain) && 5621 (mydid.un.b.area == ndlpdid.un.b.area)) { 5622 if ((matchdid.un.b.domain == 0) && 5623 (matchdid.un.b.area == 0)) { 5624 if (matchdid.un.b.id) 5625 return 1; 5626 } 5627 } 5628 } 5629 return 0; 5630 } 5631 5632 /* Search for a nodelist entry */ 5633 static struct lpfc_nodelist * 5634 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 5635 { 5636 struct lpfc_nodelist *ndlp; 5637 uint32_t data1; 5638 5639 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5640 if (lpfc_matchdid(vport, ndlp, did)) { 5641 data1 = (((uint32_t)ndlp->nlp_state << 24) | 5642 ((uint32_t)ndlp->nlp_xri << 16) | 5643 ((uint32_t)ndlp->nlp_type << 8) 5644 ); 5645 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5646 "0929 FIND node DID " 5647 "Data: x%px x%x x%x x%x x%x x%px\n", 5648 ndlp, ndlp->nlp_DID, 5649 ndlp->nlp_flag, data1, ndlp->nlp_rpi, 5650 ndlp->active_rrqs_xri_bitmap); 5651 return ndlp; 5652 } 5653 } 5654 5655 /* FIND node did <did> NOT FOUND */ 5656 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5657 "0932 FIND node did x%x NOT FOUND.\n", did); 5658 return NULL; 5659 } 5660 5661 struct lpfc_nodelist * 5662 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 5663 { 5664 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5665 struct lpfc_nodelist *ndlp; 5666 unsigned long iflags; 5667 5668 spin_lock_irqsave(shost->host_lock, iflags); 5669 ndlp = __lpfc_findnode_did(vport, did); 5670 spin_unlock_irqrestore(shost->host_lock, iflags); 5671 return ndlp; 5672 } 5673 5674 struct lpfc_nodelist * 5675 lpfc_findnode_mapped(struct lpfc_vport *vport) 5676 { 5677 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5678 struct lpfc_nodelist *ndlp; 5679 uint32_t data1; 5680 unsigned long iflags; 5681 5682 spin_lock_irqsave(shost->host_lock, iflags); 5683 5684 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5685 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 5686 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 5687 data1 = (((uint32_t)ndlp->nlp_state << 24) | 5688 ((uint32_t)ndlp->nlp_xri << 16) | 5689 ((uint32_t)ndlp->nlp_type << 8) | 5690 ((uint32_t)ndlp->nlp_rpi & 0xff)); 5691 spin_unlock_irqrestore(shost->host_lock, iflags); 5692 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5693 "2025 FIND node DID " 5694 "Data: x%px x%x x%x x%x x%px\n", 5695 ndlp, ndlp->nlp_DID, 5696 ndlp->nlp_flag, data1, 5697 ndlp->active_rrqs_xri_bitmap); 5698 return ndlp; 5699 } 5700 } 5701 spin_unlock_irqrestore(shost->host_lock, iflags); 5702 5703 /* FIND node did <did> NOT FOUND */ 5704 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5705 "2026 FIND mapped did NOT FOUND.\n"); 5706 return NULL; 5707 } 5708 5709 struct lpfc_nodelist * 5710 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) 5711 { 5712 struct lpfc_nodelist *ndlp; 5713 5714 ndlp = lpfc_findnode_did(vport, did); 5715 if (!ndlp) { 5716 if (vport->phba->nvmet_support) 5717 return NULL; 5718 if ((vport->fc_flag & FC_RSCN_MODE) != 0 && 5719 lpfc_rscn_payload_check(vport, did) == 0) 5720 return NULL; 5721 ndlp = lpfc_nlp_init(vport, did); 5722 if (!ndlp) 5723 return NULL; 5724 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 5725 5726 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5727 "6453 Setup New Node 2B_DISC x%x " 5728 "Data:x%x x%x x%x\n", 5729 ndlp->nlp_DID, ndlp->nlp_flag, 5730 ndlp->nlp_state, vport->fc_flag); 5731 5732 spin_lock_irq(&ndlp->lock); 5733 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5734 spin_unlock_irq(&ndlp->lock); 5735 return ndlp; 5736 } 5737 5738 /* The NVME Target does not want to actively manage an rport. 5739 * The goal is to allow the target to reset its state and clear 5740 * pending IO in preparation for the initiator to recover. 5741 */ 5742 if ((vport->fc_flag & FC_RSCN_MODE) && 5743 !(vport->fc_flag & FC_NDISC_ACTIVE)) { 5744 if (lpfc_rscn_payload_check(vport, did)) { 5745 5746 /* Since this node is marked for discovery, 5747 * delay timeout is not needed. 5748 */ 5749 lpfc_cancel_retry_delay_tmo(vport, ndlp); 5750 5751 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5752 "6455 Setup RSCN Node 2B_DISC x%x " 5753 "Data:x%x x%x x%x\n", 5754 ndlp->nlp_DID, ndlp->nlp_flag, 5755 ndlp->nlp_state, vport->fc_flag); 5756 5757 /* NVME Target mode waits until rport is known to be 5758 * impacted by the RSCN before it transitions. No 5759 * active management - just go to NPR provided the 5760 * node had a valid login. 5761 */ 5762 if (vport->phba->nvmet_support) 5763 return ndlp; 5764 5765 /* If we've already received a PLOGI from this NPort 5766 * we don't need to try to discover it again. 5767 */ 5768 if (ndlp->nlp_flag & NLP_RCV_PLOGI && 5769 !(ndlp->nlp_type & 5770 (NLP_FCP_TARGET | NLP_NVME_TARGET))) 5771 return NULL; 5772 5773 ndlp->nlp_prev_state = ndlp->nlp_state; 5774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 5775 5776 spin_lock_irq(&ndlp->lock); 5777 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5778 spin_unlock_irq(&ndlp->lock); 5779 } else { 5780 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5781 "6456 Skip Setup RSCN Node x%x " 5782 "Data:x%x x%x x%x\n", 5783 ndlp->nlp_DID, ndlp->nlp_flag, 5784 ndlp->nlp_state, vport->fc_flag); 5785 ndlp = NULL; 5786 } 5787 } else { 5788 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5789 "6457 Setup Active Node 2B_DISC x%x " 5790 "Data:x%x x%x x%x\n", 5791 ndlp->nlp_DID, ndlp->nlp_flag, 5792 ndlp->nlp_state, vport->fc_flag); 5793 5794 /* If the initiator received a PLOGI from this NPort or if the 5795 * initiator is already in the process of discovery on it, 5796 * there's no need to try to discover it again. 5797 */ 5798 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 5799 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5800 (!vport->phba->nvmet_support && 5801 ndlp->nlp_flag & NLP_RCV_PLOGI)) 5802 return NULL; 5803 5804 if (vport->phba->nvmet_support) 5805 return ndlp; 5806 5807 /* Moving to NPR state clears unsolicited flags and 5808 * allows for rediscovery 5809 */ 5810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 5811 5812 spin_lock_irq(&ndlp->lock); 5813 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5814 spin_unlock_irq(&ndlp->lock); 5815 } 5816 return ndlp; 5817 } 5818 5819 /* Build a list of nodes to discover based on the loopmap */ 5820 void 5821 lpfc_disc_list_loopmap(struct lpfc_vport *vport) 5822 { 5823 struct lpfc_hba *phba = vport->phba; 5824 int j; 5825 uint32_t alpa, index; 5826 5827 if (!lpfc_is_link_up(phba)) 5828 return; 5829 5830 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5831 return; 5832 5833 /* Check for loop map present or not */ 5834 if (phba->alpa_map[0]) { 5835 for (j = 1; j <= phba->alpa_map[0]; j++) { 5836 alpa = phba->alpa_map[j]; 5837 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) 5838 continue; 5839 lpfc_setup_disc_node(vport, alpa); 5840 } 5841 } else { 5842 /* No alpamap, so try all alpa's */ 5843 for (j = 0; j < FC_MAXLOOP; j++) { 5844 /* If cfg_scan_down is set, start from highest 5845 * ALPA (0xef) to lowest (0x1). 5846 */ 5847 if (vport->cfg_scan_down) 5848 index = j; 5849 else 5850 index = FC_MAXLOOP - j - 1; 5851 alpa = lpfcAlpaArray[index]; 5852 if ((vport->fc_myDID & 0xff) == alpa) 5853 continue; 5854 lpfc_setup_disc_node(vport, alpa); 5855 } 5856 } 5857 return; 5858 } 5859 5860 /* SLI3 only */ 5861 void 5862 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) 5863 { 5864 LPFC_MBOXQ_t *mbox; 5865 struct lpfc_sli *psli = &phba->sli; 5866 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; 5867 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; 5868 int rc; 5869 5870 /* 5871 * if it's not a physical port or if we already send 5872 * clear_la then don't send it. 5873 */ 5874 if ((phba->link_state >= LPFC_CLEAR_LA) || 5875 (vport->port_type != LPFC_PHYSICAL_PORT) || 5876 (phba->sli_rev == LPFC_SLI_REV4)) 5877 return; 5878 5879 /* Link up discovery */ 5880 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 5881 phba->link_state = LPFC_CLEAR_LA; 5882 lpfc_clear_la(phba, mbox); 5883 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 5884 mbox->vport = vport; 5885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5886 if (rc == MBX_NOT_FINISHED) { 5887 mempool_free(mbox, phba->mbox_mem_pool); 5888 lpfc_disc_flush_list(vport); 5889 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 5890 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 5891 phba->link_state = LPFC_HBA_ERROR; 5892 } 5893 } 5894 } 5895 5896 /* Reg_vpi to tell firmware to resume normal operations */ 5897 void 5898 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) 5899 { 5900 LPFC_MBOXQ_t *regvpimbox; 5901 5902 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5903 if (regvpimbox) { 5904 lpfc_reg_vpi(vport, regvpimbox); 5905 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 5906 regvpimbox->vport = vport; 5907 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 5908 == MBX_NOT_FINISHED) { 5909 mempool_free(regvpimbox, phba->mbox_mem_pool); 5910 } 5911 } 5912 } 5913 5914 /* Start Link up / RSCN discovery on NPR nodes */ 5915 void 5916 lpfc_disc_start(struct lpfc_vport *vport) 5917 { 5918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5919 struct lpfc_hba *phba = vport->phba; 5920 uint32_t num_sent; 5921 uint32_t clear_la_pending; 5922 5923 if (!lpfc_is_link_up(phba)) { 5924 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5925 "3315 Link is not up %x\n", 5926 phba->link_state); 5927 return; 5928 } 5929 5930 if (phba->link_state == LPFC_CLEAR_LA) 5931 clear_la_pending = 1; 5932 else 5933 clear_la_pending = 0; 5934 5935 if (vport->port_state < LPFC_VPORT_READY) 5936 vport->port_state = LPFC_DISC_AUTH; 5937 5938 lpfc_set_disctmo(vport); 5939 5940 vport->fc_prevDID = vport->fc_myDID; 5941 vport->num_disc_nodes = 0; 5942 5943 /* Start Discovery state <hba_state> */ 5944 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5945 "0202 Start Discovery port state x%x " 5946 "flg x%x Data: x%x x%x x%x\n", 5947 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, 5948 vport->fc_adisc_cnt, vport->fc_npr_cnt); 5949 5950 /* First do ADISCs - if any */ 5951 num_sent = lpfc_els_disc_adisc(vport); 5952 5953 if (num_sent) 5954 return; 5955 5956 /* Register the VPI for SLI3, NPIV only. */ 5957 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5958 !(vport->fc_flag & FC_PT2PT) && 5959 !(vport->fc_flag & FC_RSCN_MODE) && 5960 (phba->sli_rev < LPFC_SLI_REV4)) { 5961 lpfc_issue_clear_la(phba, vport); 5962 lpfc_issue_reg_vpi(phba, vport); 5963 return; 5964 } 5965 5966 /* 5967 * For SLI2, we need to set port_state to READY and continue 5968 * discovery. 5969 */ 5970 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 5971 /* If we get here, there is nothing to ADISC */ 5972 lpfc_issue_clear_la(phba, vport); 5973 5974 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 5975 vport->num_disc_nodes = 0; 5976 /* go thru NPR nodes and issue ELS PLOGIs */ 5977 if (vport->fc_npr_cnt) 5978 lpfc_els_disc_plogi(vport); 5979 5980 if (!vport->num_disc_nodes) { 5981 spin_lock_irq(shost->host_lock); 5982 vport->fc_flag &= ~FC_NDISC_ACTIVE; 5983 spin_unlock_irq(shost->host_lock); 5984 lpfc_can_disctmo(vport); 5985 } 5986 } 5987 vport->port_state = LPFC_VPORT_READY; 5988 } else { 5989 /* Next do PLOGIs - if any */ 5990 num_sent = lpfc_els_disc_plogi(vport); 5991 5992 if (num_sent) 5993 return; 5994 5995 if (vport->fc_flag & FC_RSCN_MODE) { 5996 /* Check to see if more RSCNs came in while we 5997 * were processing this one. 5998 */ 5999 if ((vport->fc_rscn_id_cnt == 0) && 6000 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { 6001 spin_lock_irq(shost->host_lock); 6002 vport->fc_flag &= ~FC_RSCN_MODE; 6003 spin_unlock_irq(shost->host_lock); 6004 lpfc_can_disctmo(vport); 6005 } else 6006 lpfc_els_handle_rscn(vport); 6007 } 6008 } 6009 return; 6010 } 6011 6012 /* 6013 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS 6014 * ring the match the sppecified nodelist. 6015 */ 6016 static void 6017 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 6018 { 6019 LIST_HEAD(completions); 6020 struct lpfc_iocbq *iocb, *next_iocb; 6021 struct lpfc_sli_ring *pring; 6022 u32 ulp_command; 6023 6024 pring = lpfc_phba_elsring(phba); 6025 if (unlikely(!pring)) 6026 return; 6027 6028 /* Error matching iocb on txq or txcmplq 6029 * First check the txq. 6030 */ 6031 spin_lock_irq(&phba->hbalock); 6032 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 6033 if (iocb->ndlp != ndlp) 6034 continue; 6035 6036 ulp_command = get_job_cmnd(phba, iocb); 6037 6038 if (ulp_command == CMD_ELS_REQUEST64_CR || 6039 ulp_command == CMD_XMIT_ELS_RSP64_CX) { 6040 6041 list_move_tail(&iocb->list, &completions); 6042 } 6043 } 6044 6045 /* Next check the txcmplq */ 6046 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 6047 if (iocb->ndlp != ndlp) 6048 continue; 6049 6050 ulp_command = get_job_cmnd(phba, iocb); 6051 6052 if (ulp_command == CMD_ELS_REQUEST64_CR || 6053 ulp_command == CMD_XMIT_ELS_RSP64_CX) { 6054 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 6055 } 6056 } 6057 spin_unlock_irq(&phba->hbalock); 6058 6059 /* Make sure HBA is alive */ 6060 lpfc_issue_hb_tmo(phba); 6061 6062 /* Cancel all the IOCBs from the completions list */ 6063 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6064 IOERR_SLI_ABORTED); 6065 } 6066 6067 static void 6068 lpfc_disc_flush_list(struct lpfc_vport *vport) 6069 { 6070 struct lpfc_nodelist *ndlp, *next_ndlp; 6071 struct lpfc_hba *phba = vport->phba; 6072 6073 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 6074 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6075 nlp_listp) { 6076 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 6077 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 6078 lpfc_free_tx(phba, ndlp); 6079 } 6080 } 6081 } 6082 } 6083 6084 /* 6085 * lpfc_notify_xport_npr - notifies xport of node disappearance 6086 * @vport: Pointer to Virtual Port object. 6087 * 6088 * Transitions all ndlps to NPR state. When lpfc_nlp_set_state 6089 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered 6090 * and transport notified that the node is gone. 6091 * Return Code: 6092 * none 6093 */ 6094 static void 6095 lpfc_notify_xport_npr(struct lpfc_vport *vport) 6096 { 6097 struct lpfc_nodelist *ndlp, *next_ndlp; 6098 6099 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6100 nlp_listp) { 6101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6102 } 6103 } 6104 void 6105 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 6106 { 6107 lpfc_els_flush_rscn(vport); 6108 lpfc_els_flush_cmd(vport); 6109 lpfc_disc_flush_list(vport); 6110 if (pci_channel_offline(vport->phba->pcidev)) 6111 lpfc_notify_xport_npr(vport); 6112 } 6113 6114 /*****************************************************************************/ 6115 /* 6116 * NAME: lpfc_disc_timeout 6117 * 6118 * FUNCTION: Fibre Channel driver discovery timeout routine. 6119 * 6120 * EXECUTION ENVIRONMENT: interrupt only 6121 * 6122 * CALLED FROM: 6123 * Timer function 6124 * 6125 * RETURNS: 6126 * none 6127 */ 6128 /*****************************************************************************/ 6129 void 6130 lpfc_disc_timeout(struct timer_list *t) 6131 { 6132 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); 6133 struct lpfc_hba *phba = vport->phba; 6134 uint32_t tmo_posted; 6135 unsigned long flags = 0; 6136 6137 if (unlikely(!phba)) 6138 return; 6139 6140 spin_lock_irqsave(&vport->work_port_lock, flags); 6141 tmo_posted = vport->work_port_events & WORKER_DISC_TMO; 6142 if (!tmo_posted) 6143 vport->work_port_events |= WORKER_DISC_TMO; 6144 spin_unlock_irqrestore(&vport->work_port_lock, flags); 6145 6146 if (!tmo_posted) 6147 lpfc_worker_wake_up(phba); 6148 return; 6149 } 6150 6151 static void 6152 lpfc_disc_timeout_handler(struct lpfc_vport *vport) 6153 { 6154 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6155 struct lpfc_hba *phba = vport->phba; 6156 struct lpfc_sli *psli = &phba->sli; 6157 struct lpfc_nodelist *ndlp, *next_ndlp; 6158 LPFC_MBOXQ_t *initlinkmbox; 6159 int rc, clrlaerr = 0; 6160 6161 if (!(vport->fc_flag & FC_DISC_TMO)) 6162 return; 6163 6164 spin_lock_irq(shost->host_lock); 6165 vport->fc_flag &= ~FC_DISC_TMO; 6166 spin_unlock_irq(shost->host_lock); 6167 6168 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 6169 "disc timeout: state:x%x rtry:x%x flg:x%x", 6170 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 6171 6172 switch (vport->port_state) { 6173 6174 case LPFC_LOCAL_CFG_LINK: 6175 /* 6176 * port_state is identically LPFC_LOCAL_CFG_LINK while 6177 * waiting for FAN timeout 6178 */ 6179 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 6180 "0221 FAN timeout\n"); 6181 6182 /* Start discovery by sending FLOGI, clean up old rpis */ 6183 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6184 nlp_listp) { 6185 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 6186 continue; 6187 if (ndlp->nlp_type & NLP_FABRIC) { 6188 /* Clean up the ndlp on Fabric connections */ 6189 lpfc_drop_node(vport, ndlp); 6190 6191 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 6192 /* Fail outstanding IO now since device 6193 * is marked for PLOGI. 6194 */ 6195 lpfc_unreg_rpi(vport, ndlp); 6196 } 6197 } 6198 if (vport->port_state != LPFC_FLOGI) { 6199 if (phba->sli_rev <= LPFC_SLI_REV3) 6200 lpfc_initial_flogi(vport); 6201 else 6202 lpfc_issue_init_vfi(vport); 6203 return; 6204 } 6205 break; 6206 6207 case LPFC_FDISC: 6208 case LPFC_FLOGI: 6209 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 6210 /* Initial FLOGI timeout */ 6211 lpfc_printf_vlog(vport, KERN_ERR, 6212 LOG_TRACE_EVENT, 6213 "0222 Initial %s timeout\n", 6214 vport->vpi ? "FDISC" : "FLOGI"); 6215 6216 /* Assume no Fabric and go on with discovery. 6217 * Check for outstanding ELS FLOGI to abort. 6218 */ 6219 6220 /* FLOGI failed, so just use loop map to make discovery list */ 6221 lpfc_disc_list_loopmap(vport); 6222 6223 /* Start discovery */ 6224 lpfc_disc_start(vport); 6225 break; 6226 6227 case LPFC_FABRIC_CFG_LINK: 6228 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 6229 NameServer login */ 6230 lpfc_printf_vlog(vport, KERN_ERR, 6231 LOG_TRACE_EVENT, 6232 "0223 Timeout while waiting for " 6233 "NameServer login\n"); 6234 /* Next look for NameServer ndlp */ 6235 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6236 if (ndlp) 6237 lpfc_els_abort(phba, ndlp); 6238 6239 /* ReStart discovery */ 6240 goto restart_disc; 6241 6242 case LPFC_NS_QRY: 6243 /* Check for wait for NameServer Rsp timeout */ 6244 lpfc_printf_vlog(vport, KERN_ERR, 6245 LOG_TRACE_EVENT, 6246 "0224 NameServer Query timeout " 6247 "Data: x%x x%x\n", 6248 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 6249 6250 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 6251 /* Try it one more time */ 6252 vport->fc_ns_retry++; 6253 vport->gidft_inp = 0; 6254 rc = lpfc_issue_gidft(vport); 6255 if (rc == 0) 6256 break; 6257 } 6258 vport->fc_ns_retry = 0; 6259 6260 restart_disc: 6261 /* 6262 * Discovery is over. 6263 * set port_state to PORT_READY if SLI2. 6264 * cmpl_reg_vpi will set port_state to READY for SLI3. 6265 */ 6266 if (phba->sli_rev < LPFC_SLI_REV4) { 6267 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 6268 lpfc_issue_reg_vpi(phba, vport); 6269 else { 6270 lpfc_issue_clear_la(phba, vport); 6271 vport->port_state = LPFC_VPORT_READY; 6272 } 6273 } 6274 6275 /* Setup and issue mailbox INITIALIZE LINK command */ 6276 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6277 if (!initlinkmbox) { 6278 lpfc_printf_vlog(vport, KERN_ERR, 6279 LOG_TRACE_EVENT, 6280 "0206 Device Discovery " 6281 "completion error\n"); 6282 phba->link_state = LPFC_HBA_ERROR; 6283 break; 6284 } 6285 6286 lpfc_linkdown(phba); 6287 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 6288 phba->cfg_link_speed); 6289 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 6290 initlinkmbox->vport = vport; 6291 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 6292 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 6293 lpfc_set_loopback_flag(phba); 6294 if (rc == MBX_NOT_FINISHED) 6295 mempool_free(initlinkmbox, phba->mbox_mem_pool); 6296 6297 break; 6298 6299 case LPFC_DISC_AUTH: 6300 /* Node Authentication timeout */ 6301 lpfc_printf_vlog(vport, KERN_ERR, 6302 LOG_TRACE_EVENT, 6303 "0227 Node Authentication timeout\n"); 6304 lpfc_disc_flush_list(vport); 6305 6306 /* 6307 * set port_state to PORT_READY if SLI2. 6308 * cmpl_reg_vpi will set port_state to READY for SLI3. 6309 */ 6310 if (phba->sli_rev < LPFC_SLI_REV4) { 6311 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 6312 lpfc_issue_reg_vpi(phba, vport); 6313 else { /* NPIV Not enabled */ 6314 lpfc_issue_clear_la(phba, vport); 6315 vport->port_state = LPFC_VPORT_READY; 6316 } 6317 } 6318 break; 6319 6320 case LPFC_VPORT_READY: 6321 if (vport->fc_flag & FC_RSCN_MODE) { 6322 lpfc_printf_vlog(vport, KERN_ERR, 6323 LOG_TRACE_EVENT, 6324 "0231 RSCN timeout Data: x%x " 6325 "x%x x%x x%x\n", 6326 vport->fc_ns_retry, LPFC_MAX_NS_RETRY, 6327 vport->port_state, vport->gidft_inp); 6328 6329 /* Cleanup any outstanding ELS commands */ 6330 lpfc_els_flush_cmd(vport); 6331 6332 lpfc_els_flush_rscn(vport); 6333 lpfc_disc_flush_list(vport); 6334 } 6335 break; 6336 6337 default: 6338 lpfc_printf_vlog(vport, KERN_ERR, 6339 LOG_TRACE_EVENT, 6340 "0273 Unexpected discovery timeout, " 6341 "vport State x%x\n", vport->port_state); 6342 break; 6343 } 6344 6345 switch (phba->link_state) { 6346 case LPFC_CLEAR_LA: 6347 /* CLEAR LA timeout */ 6348 lpfc_printf_vlog(vport, KERN_ERR, 6349 LOG_TRACE_EVENT, 6350 "0228 CLEAR LA timeout\n"); 6351 clrlaerr = 1; 6352 break; 6353 6354 case LPFC_LINK_UP: 6355 lpfc_issue_clear_la(phba, vport); 6356 fallthrough; 6357 case LPFC_LINK_UNKNOWN: 6358 case LPFC_WARM_START: 6359 case LPFC_INIT_START: 6360 case LPFC_INIT_MBX_CMDS: 6361 case LPFC_LINK_DOWN: 6362 case LPFC_HBA_ERROR: 6363 lpfc_printf_vlog(vport, KERN_ERR, 6364 LOG_TRACE_EVENT, 6365 "0230 Unexpected timeout, hba link " 6366 "state x%x\n", phba->link_state); 6367 clrlaerr = 1; 6368 break; 6369 6370 case LPFC_HBA_READY: 6371 break; 6372 } 6373 6374 if (clrlaerr) { 6375 lpfc_disc_flush_list(vport); 6376 if (phba->sli_rev != LPFC_SLI_REV4) { 6377 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= 6378 ~LPFC_STOP_IOCB_EVENT; 6379 psli->sli3_ring[LPFC_FCP_RING].flag &= 6380 ~LPFC_STOP_IOCB_EVENT; 6381 } 6382 vport->port_state = LPFC_VPORT_READY; 6383 } 6384 return; 6385 } 6386 6387 /* 6388 * This routine handles processing a NameServer REG_LOGIN mailbox 6389 * command upon completion. It is setup in the LPFC_MBOXQ 6390 * as the completion routine when the command is 6391 * handed off to the SLI layer. 6392 */ 6393 void 6394 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6395 { 6396 MAILBOX_t *mb = &pmb->u.mb; 6397 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 6398 struct lpfc_vport *vport = pmb->vport; 6399 6400 pmb->ctx_ndlp = NULL; 6401 6402 if (phba->sli_rev < LPFC_SLI_REV4) 6403 ndlp->nlp_rpi = mb->un.varWords[0]; 6404 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 6405 ndlp->nlp_type |= NLP_FABRIC; 6406 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 6407 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 6408 "0004 rpi:%x DID:%x flg:%x %d x%px\n", 6409 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 6410 kref_read(&ndlp->kref), 6411 ndlp); 6412 /* 6413 * Start issuing Fabric-Device Management Interface (FDMI) command to 6414 * 0xfffffa (FDMI well known port). 6415 * DHBA -> DPRT -> RHBA -> RPA (physical port) 6416 * DPRT -> RPRT (vports) 6417 */ 6418 if (vport->port_type == LPFC_PHYSICAL_PORT) { 6419 phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ 6420 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); 6421 } else { 6422 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); 6423 } 6424 6425 6426 /* decrement the node reference count held for this callback 6427 * function. 6428 */ 6429 lpfc_nlp_put(ndlp); 6430 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 6431 return; 6432 } 6433 6434 static int 6435 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) 6436 { 6437 uint16_t *rpi = param; 6438 6439 return ndlp->nlp_rpi == *rpi; 6440 } 6441 6442 static int 6443 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) 6444 { 6445 return memcmp(&ndlp->nlp_portname, param, 6446 sizeof(ndlp->nlp_portname)) == 0; 6447 } 6448 6449 static struct lpfc_nodelist * 6450 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 6451 { 6452 struct lpfc_nodelist *ndlp; 6453 6454 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6455 if (filter(ndlp, param)) { 6456 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6457 "3185 FIND node filter %ps DID " 6458 "ndlp x%px did x%x flg x%x st x%x " 6459 "xri x%x type x%x rpi x%x\n", 6460 filter, ndlp, ndlp->nlp_DID, 6461 ndlp->nlp_flag, ndlp->nlp_state, 6462 ndlp->nlp_xri, ndlp->nlp_type, 6463 ndlp->nlp_rpi); 6464 return ndlp; 6465 } 6466 } 6467 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6468 "3186 FIND node filter %ps NOT FOUND.\n", filter); 6469 return NULL; 6470 } 6471 6472 /* 6473 * This routine looks up the ndlp lists for the given RPI. If rpi found it 6474 * returns the node list element pointer else return NULL. 6475 */ 6476 struct lpfc_nodelist * 6477 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 6478 { 6479 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 6480 } 6481 6482 /* 6483 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 6484 * returns the node element list pointer else return NULL. 6485 */ 6486 struct lpfc_nodelist * 6487 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) 6488 { 6489 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6490 struct lpfc_nodelist *ndlp; 6491 6492 spin_lock_irq(shost->host_lock); 6493 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); 6494 spin_unlock_irq(shost->host_lock); 6495 return ndlp; 6496 } 6497 6498 /* 6499 * This routine looks up the ndlp lists for the given RPI. If the rpi 6500 * is found, the routine returns the node element list pointer else 6501 * return NULL. 6502 */ 6503 struct lpfc_nodelist * 6504 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 6505 { 6506 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6507 struct lpfc_nodelist *ndlp; 6508 unsigned long flags; 6509 6510 spin_lock_irqsave(shost->host_lock, flags); 6511 ndlp = __lpfc_findnode_rpi(vport, rpi); 6512 spin_unlock_irqrestore(shost->host_lock, flags); 6513 return ndlp; 6514 } 6515 6516 /** 6517 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier 6518 * @phba: pointer to lpfc hba data structure. 6519 * @vpi: the physical host virtual N_Port identifier. 6520 * 6521 * This routine finds a vport on a HBA (referred by @phba) through a 6522 * @vpi. The function walks the HBA's vport list and returns the address 6523 * of the vport with the matching @vpi. 6524 * 6525 * Return code 6526 * NULL - No vport with the matching @vpi found 6527 * Otherwise - Address to the vport with the matching @vpi. 6528 **/ 6529 struct lpfc_vport * 6530 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 6531 { 6532 struct lpfc_vport *vport; 6533 unsigned long flags; 6534 int i = 0; 6535 6536 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6537 if (vpi > 0) { 6538 /* 6539 * Translate the physical vpi to the logical vpi. The 6540 * vport stores the logical vpi. 6541 */ 6542 for (i = 0; i <= phba->max_vpi; i++) { 6543 if (vpi == phba->vpi_ids[i]) 6544 break; 6545 } 6546 6547 if (i > phba->max_vpi) { 6548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6549 "2936 Could not find Vport mapped " 6550 "to vpi %d\n", vpi); 6551 return NULL; 6552 } 6553 } 6554 6555 spin_lock_irqsave(&phba->port_list_lock, flags); 6556 list_for_each_entry(vport, &phba->port_list, listentry) { 6557 if (vport->vpi == i) { 6558 spin_unlock_irqrestore(&phba->port_list_lock, flags); 6559 return vport; 6560 } 6561 } 6562 spin_unlock_irqrestore(&phba->port_list_lock, flags); 6563 return NULL; 6564 } 6565 6566 struct lpfc_nodelist * 6567 lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) 6568 { 6569 struct lpfc_nodelist *ndlp; 6570 int rpi = LPFC_RPI_ALLOC_ERROR; 6571 6572 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 6573 rpi = lpfc_sli4_alloc_rpi(vport->phba); 6574 if (rpi == LPFC_RPI_ALLOC_ERROR) 6575 return NULL; 6576 } 6577 6578 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); 6579 if (!ndlp) { 6580 if (vport->phba->sli_rev == LPFC_SLI_REV4) 6581 lpfc_sli4_free_rpi(vport->phba, rpi); 6582 return NULL; 6583 } 6584 6585 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 6586 6587 spin_lock_init(&ndlp->lock); 6588 6589 lpfc_initialize_node(vport, ndlp, did); 6590 INIT_LIST_HEAD(&ndlp->nlp_listp); 6591 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 6592 ndlp->nlp_rpi = rpi; 6593 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 6594 "0007 Init New ndlp x%px, rpi:x%x DID:%x " 6595 "flg:x%x refcnt:%d\n", 6596 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, 6597 ndlp->nlp_flag, kref_read(&ndlp->kref)); 6598 6599 ndlp->active_rrqs_xri_bitmap = 6600 mempool_alloc(vport->phba->active_rrq_pool, 6601 GFP_KERNEL); 6602 if (ndlp->active_rrqs_xri_bitmap) 6603 memset(ndlp->active_rrqs_xri_bitmap, 0, 6604 ndlp->phba->cfg_rrq_xri_bitmap_sz); 6605 } 6606 6607 6608 6609 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 6610 "node init: did:x%x", 6611 ndlp->nlp_DID, 0, 0); 6612 6613 return ndlp; 6614 } 6615 6616 /* This routine releases all resources associated with a specifc NPort's ndlp 6617 * and mempool_free's the nodelist. 6618 */ 6619 static void 6620 lpfc_nlp_release(struct kref *kref) 6621 { 6622 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 6623 kref); 6624 struct lpfc_vport *vport = ndlp->vport; 6625 6626 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6627 "node release: did:x%x flg:x%x type:x%x", 6628 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 6629 6630 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6631 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n", 6632 __func__, ndlp, ndlp->nlp_DID, 6633 kref_read(&ndlp->kref), ndlp->nlp_rpi); 6634 6635 /* remove ndlp from action. */ 6636 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6637 lpfc_cleanup_node(vport, ndlp); 6638 6639 /* Not all ELS transactions have registered the RPI with the port. 6640 * In these cases the rpi usage is temporary and the node is 6641 * released when the WQE is completed. Catch this case to free the 6642 * RPI to the pool. Because this node is in the release path, a lock 6643 * is unnecessary. All references are gone and the node has been 6644 * dequeued. 6645 */ 6646 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 6647 if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && 6648 !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { 6649 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 6650 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 6651 } 6652 } 6653 6654 /* The node is not freed back to memory, it is released to a pool so 6655 * the node fields need to be cleaned up. 6656 */ 6657 ndlp->vport = NULL; 6658 ndlp->nlp_state = NLP_STE_FREED_NODE; 6659 ndlp->nlp_flag = 0; 6660 ndlp->fc4_xpt_flags = 0; 6661 6662 /* free ndlp memory for final ndlp release */ 6663 if (ndlp->phba->sli_rev == LPFC_SLI_REV4) 6664 mempool_free(ndlp->active_rrqs_xri_bitmap, 6665 ndlp->phba->active_rrq_pool); 6666 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 6667 } 6668 6669 /* This routine bumps the reference count for a ndlp structure to ensure 6670 * that one discovery thread won't free a ndlp while another discovery thread 6671 * is using it. 6672 */ 6673 struct lpfc_nodelist * 6674 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 6675 { 6676 unsigned long flags; 6677 6678 if (ndlp) { 6679 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6680 "node get: did:x%x flg:x%x refcnt:x%x", 6681 ndlp->nlp_DID, ndlp->nlp_flag, 6682 kref_read(&ndlp->kref)); 6683 6684 /* The check of ndlp usage to prevent incrementing the 6685 * ndlp reference count that is in the process of being 6686 * released. 6687 */ 6688 spin_lock_irqsave(&ndlp->lock, flags); 6689 if (!kref_get_unless_zero(&ndlp->kref)) { 6690 spin_unlock_irqrestore(&ndlp->lock, flags); 6691 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6692 "0276 %s: ndlp:x%px refcnt:%d\n", 6693 __func__, (void *)ndlp, kref_read(&ndlp->kref)); 6694 return NULL; 6695 } 6696 spin_unlock_irqrestore(&ndlp->lock, flags); 6697 } else { 6698 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__); 6699 } 6700 6701 return ndlp; 6702 } 6703 6704 /* This routine decrements the reference count for a ndlp structure. If the 6705 * count goes to 0, this indicates the associated nodelist should be freed. 6706 */ 6707 int 6708 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 6709 { 6710 if (ndlp) { 6711 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6712 "node put: did:x%x flg:x%x refcnt:x%x", 6713 ndlp->nlp_DID, ndlp->nlp_flag, 6714 kref_read(&ndlp->kref)); 6715 } else { 6716 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__); 6717 } 6718 6719 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 6720 } 6721 6722 /* This routine free's the specified nodelist if it is not in use 6723 * by any other discovery thread. This routine returns 1 if the 6724 * ndlp has been freed. A return value of 0 indicates the ndlp is 6725 * not yet been released. 6726 */ 6727 int 6728 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 6729 { 6730 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6731 "node not used: did:x%x flg:x%x refcnt:x%x", 6732 ndlp->nlp_DID, ndlp->nlp_flag, 6733 kref_read(&ndlp->kref)); 6734 6735 if (kref_read(&ndlp->kref) == 1) 6736 if (lpfc_nlp_put(ndlp)) 6737 return 1; 6738 return 0; 6739 } 6740 6741 /** 6742 * lpfc_fcf_inuse - Check if FCF can be unregistered. 6743 * @phba: Pointer to hba context object. 6744 * 6745 * This function iterate through all FC nodes associated 6746 * will all vports to check if there is any node with 6747 * fc_rports associated with it. If there is an fc_rport 6748 * associated with the node, then the node is either in 6749 * discovered state or its devloss_timer is pending. 6750 */ 6751 static int 6752 lpfc_fcf_inuse(struct lpfc_hba *phba) 6753 { 6754 struct lpfc_vport **vports; 6755 int i, ret = 0; 6756 struct lpfc_nodelist *ndlp; 6757 struct Scsi_Host *shost; 6758 6759 vports = lpfc_create_vport_work_array(phba); 6760 6761 /* If driver cannot allocate memory, indicate fcf is in use */ 6762 if (!vports) 6763 return 1; 6764 6765 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 6766 shost = lpfc_shost_from_vport(vports[i]); 6767 spin_lock_irq(shost->host_lock); 6768 /* 6769 * IF the CVL_RCVD bit is not set then we have sent the 6770 * flogi. 6771 * If dev_loss fires while we are waiting we do not want to 6772 * unreg the fcf. 6773 */ 6774 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { 6775 spin_unlock_irq(shost->host_lock); 6776 ret = 1; 6777 goto out; 6778 } 6779 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 6780 if (ndlp->rport && 6781 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 6782 ret = 1; 6783 spin_unlock_irq(shost->host_lock); 6784 goto out; 6785 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 6786 ret = 1; 6787 lpfc_printf_log(phba, KERN_INFO, 6788 LOG_NODE | LOG_DISCOVERY, 6789 "2624 RPI %x DID %x flag %x " 6790 "still logged in\n", 6791 ndlp->nlp_rpi, ndlp->nlp_DID, 6792 ndlp->nlp_flag); 6793 } 6794 } 6795 spin_unlock_irq(shost->host_lock); 6796 } 6797 out: 6798 lpfc_destroy_vport_work_array(phba, vports); 6799 return ret; 6800 } 6801 6802 /** 6803 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. 6804 * @phba: Pointer to hba context object. 6805 * @mboxq: Pointer to mailbox object. 6806 * 6807 * This function frees memory associated with the mailbox command. 6808 */ 6809 void 6810 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6811 { 6812 struct lpfc_vport *vport = mboxq->vport; 6813 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6814 6815 if (mboxq->u.mb.mbxStatus) { 6816 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6817 "2555 UNREG_VFI mbxStatus error x%x " 6818 "HBA state x%x\n", 6819 mboxq->u.mb.mbxStatus, vport->port_state); 6820 } 6821 spin_lock_irq(shost->host_lock); 6822 phba->pport->fc_flag &= ~FC_VFI_REGISTERED; 6823 spin_unlock_irq(shost->host_lock); 6824 mempool_free(mboxq, phba->mbox_mem_pool); 6825 return; 6826 } 6827 6828 /** 6829 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. 6830 * @phba: Pointer to hba context object. 6831 * @mboxq: Pointer to mailbox object. 6832 * 6833 * This function frees memory associated with the mailbox command. 6834 */ 6835 static void 6836 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6837 { 6838 struct lpfc_vport *vport = mboxq->vport; 6839 6840 if (mboxq->u.mb.mbxStatus) { 6841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6842 "2550 UNREG_FCFI mbxStatus error x%x " 6843 "HBA state x%x\n", 6844 mboxq->u.mb.mbxStatus, vport->port_state); 6845 } 6846 mempool_free(mboxq, phba->mbox_mem_pool); 6847 return; 6848 } 6849 6850 /** 6851 * lpfc_unregister_fcf_prep - Unregister fcf record preparation 6852 * @phba: Pointer to hba context object. 6853 * 6854 * This function prepare the HBA for unregistering the currently registered 6855 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and 6856 * VFIs. 6857 */ 6858 int 6859 lpfc_unregister_fcf_prep(struct lpfc_hba *phba) 6860 { 6861 struct lpfc_vport **vports; 6862 struct lpfc_nodelist *ndlp; 6863 struct Scsi_Host *shost; 6864 int i = 0, rc; 6865 6866 /* Unregister RPIs */ 6867 if (lpfc_fcf_inuse(phba)) 6868 lpfc_unreg_hba_rpis(phba); 6869 6870 /* At this point, all discovery is aborted */ 6871 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 6872 6873 /* Unregister VPIs */ 6874 vports = lpfc_create_vport_work_array(phba); 6875 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 6876 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 6877 /* Stop FLOGI/FDISC retries */ 6878 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 6879 if (ndlp) 6880 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 6881 lpfc_cleanup_pending_mbox(vports[i]); 6882 if (phba->sli_rev == LPFC_SLI_REV4) 6883 lpfc_sli4_unreg_all_rpis(vports[i]); 6884 lpfc_mbx_unreg_vpi(vports[i]); 6885 shost = lpfc_shost_from_vport(vports[i]); 6886 spin_lock_irq(shost->host_lock); 6887 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6888 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 6889 spin_unlock_irq(shost->host_lock); 6890 } 6891 lpfc_destroy_vport_work_array(phba, vports); 6892 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { 6893 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6894 if (ndlp) 6895 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 6896 lpfc_cleanup_pending_mbox(phba->pport); 6897 if (phba->sli_rev == LPFC_SLI_REV4) 6898 lpfc_sli4_unreg_all_rpis(phba->pport); 6899 lpfc_mbx_unreg_vpi(phba->pport); 6900 shost = lpfc_shost_from_vport(phba->pport); 6901 spin_lock_irq(shost->host_lock); 6902 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6903 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; 6904 spin_unlock_irq(shost->host_lock); 6905 } 6906 6907 /* Cleanup any outstanding ELS commands */ 6908 lpfc_els_flush_all_cmd(phba); 6909 6910 /* Unregister the physical port VFI */ 6911 rc = lpfc_issue_unreg_vfi(phba->pport); 6912 return rc; 6913 } 6914 6915 /** 6916 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record 6917 * @phba: Pointer to hba context object. 6918 * 6919 * This function issues synchronous unregister FCF mailbox command to HBA to 6920 * unregister the currently registered FCF record. The driver does not reset 6921 * the driver FCF usage state flags. 6922 * 6923 * Return 0 if successfully issued, none-zero otherwise. 6924 */ 6925 int 6926 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) 6927 { 6928 LPFC_MBOXQ_t *mbox; 6929 int rc; 6930 6931 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6932 if (!mbox) { 6933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6934 "2551 UNREG_FCFI mbox allocation failed" 6935 "HBA state x%x\n", phba->pport->port_state); 6936 return -ENOMEM; 6937 } 6938 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 6939 mbox->vport = phba->pport; 6940 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 6941 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6942 6943 if (rc == MBX_NOT_FINISHED) { 6944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6945 "2552 Unregister FCFI command failed rc x%x " 6946 "HBA state x%x\n", 6947 rc, phba->pport->port_state); 6948 return -EINVAL; 6949 } 6950 return 0; 6951 } 6952 6953 /** 6954 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan 6955 * @phba: Pointer to hba context object. 6956 * 6957 * This function unregisters the currently reigstered FCF. This function 6958 * also tries to find another FCF for discovery by rescan the HBA FCF table. 6959 */ 6960 void 6961 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) 6962 { 6963 int rc; 6964 6965 /* Preparation for unregistering fcf */ 6966 rc = lpfc_unregister_fcf_prep(phba); 6967 if (rc) { 6968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6969 "2748 Failed to prepare for unregistering " 6970 "HBA's FCF record: rc=%d\n", rc); 6971 return; 6972 } 6973 6974 /* Now, unregister FCF record and reset HBA FCF state */ 6975 rc = lpfc_sli4_unregister_fcf(phba); 6976 if (rc) 6977 return; 6978 /* Reset HBA FCF states after successful unregister FCF */ 6979 phba->fcf.fcf_flag = 0; 6980 phba->fcf.current_rec.flag = 0; 6981 6982 /* 6983 * If driver is not unloading, check if there is any other 6984 * FCF record that can be used for discovery. 6985 */ 6986 if ((phba->pport->load_flag & FC_UNLOADING) || 6987 (phba->link_state < LPFC_LINK_UP)) 6988 return; 6989 6990 /* This is considered as the initial FCF discovery scan */ 6991 spin_lock_irq(&phba->hbalock); 6992 phba->fcf.fcf_flag |= FCF_INIT_DISC; 6993 spin_unlock_irq(&phba->hbalock); 6994 6995 /* Reset FCF roundrobin bmask for new discovery */ 6996 lpfc_sli4_clear_fcf_rr_bmask(phba); 6997 6998 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6999 7000 if (rc) { 7001 spin_lock_irq(&phba->hbalock); 7002 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 7003 spin_unlock_irq(&phba->hbalock); 7004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7005 "2553 lpfc_unregister_unused_fcf failed " 7006 "to read FCF record HBA state x%x\n", 7007 phba->pport->port_state); 7008 } 7009 } 7010 7011 /** 7012 * lpfc_unregister_fcf - Unregister the currently registered fcf record 7013 * @phba: Pointer to hba context object. 7014 * 7015 * This function just unregisters the currently reigstered FCF. It does not 7016 * try to find another FCF for discovery. 7017 */ 7018 void 7019 lpfc_unregister_fcf(struct lpfc_hba *phba) 7020 { 7021 int rc; 7022 7023 /* Preparation for unregistering fcf */ 7024 rc = lpfc_unregister_fcf_prep(phba); 7025 if (rc) { 7026 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7027 "2749 Failed to prepare for unregistering " 7028 "HBA's FCF record: rc=%d\n", rc); 7029 return; 7030 } 7031 7032 /* Now, unregister FCF record and reset HBA FCF state */ 7033 rc = lpfc_sli4_unregister_fcf(phba); 7034 if (rc) 7035 return; 7036 /* Set proper HBA FCF states after successful unregister FCF */ 7037 spin_lock_irq(&phba->hbalock); 7038 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 7039 spin_unlock_irq(&phba->hbalock); 7040 } 7041 7042 /** 7043 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 7044 * @phba: Pointer to hba context object. 7045 * 7046 * This function check if there are any connected remote port for the FCF and 7047 * if all the devices are disconnected, this function unregister FCFI. 7048 * This function also tries to use another FCF for discovery. 7049 */ 7050 void 7051 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 7052 { 7053 /* 7054 * If HBA is not running in FIP mode, if HBA does not support 7055 * FCoE, if FCF discovery is ongoing, or if FCF has not been 7056 * registered, do nothing. 7057 */ 7058 spin_lock_irq(&phba->hbalock); 7059 if (!(phba->hba_flag & HBA_FCOE_MODE) || 7060 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 7061 !(phba->hba_flag & HBA_FIP_SUPPORT) || 7062 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 7063 (phba->pport->port_state == LPFC_FLOGI)) { 7064 spin_unlock_irq(&phba->hbalock); 7065 return; 7066 } 7067 spin_unlock_irq(&phba->hbalock); 7068 7069 if (lpfc_fcf_inuse(phba)) 7070 return; 7071 7072 lpfc_unregister_fcf_rescan(phba); 7073 } 7074 7075 /** 7076 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. 7077 * @phba: Pointer to hba context object. 7078 * @buff: Buffer containing the FCF connection table as in the config 7079 * region. 7080 * This function create driver data structure for the FCF connection 7081 * record table read from config region 23. 7082 */ 7083 static void 7084 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, 7085 uint8_t *buff) 7086 { 7087 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7088 struct lpfc_fcf_conn_hdr *conn_hdr; 7089 struct lpfc_fcf_conn_rec *conn_rec; 7090 uint32_t record_count; 7091 int i; 7092 7093 /* Free the current connect table */ 7094 list_for_each_entry_safe(conn_entry, next_conn_entry, 7095 &phba->fcf_conn_rec_list, list) { 7096 list_del_init(&conn_entry->list); 7097 kfree(conn_entry); 7098 } 7099 7100 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 7101 record_count = conn_hdr->length * sizeof(uint32_t)/ 7102 sizeof(struct lpfc_fcf_conn_rec); 7103 7104 conn_rec = (struct lpfc_fcf_conn_rec *) 7105 (buff + sizeof(struct lpfc_fcf_conn_hdr)); 7106 7107 for (i = 0; i < record_count; i++) { 7108 if (!(conn_rec[i].flags & FCFCNCT_VALID)) 7109 continue; 7110 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 7111 GFP_KERNEL); 7112 if (!conn_entry) { 7113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7114 "2566 Failed to allocate connection" 7115 " table entry\n"); 7116 return; 7117 } 7118 7119 memcpy(&conn_entry->conn_rec, &conn_rec[i], 7120 sizeof(struct lpfc_fcf_conn_rec)); 7121 list_add_tail(&conn_entry->list, 7122 &phba->fcf_conn_rec_list); 7123 } 7124 7125 if (!list_empty(&phba->fcf_conn_rec_list)) { 7126 i = 0; 7127 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, 7128 list) { 7129 conn_rec = &conn_entry->conn_rec; 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7131 "3345 FCF connection list rec[%02d]: " 7132 "flags:x%04x, vtag:x%04x, " 7133 "fabric_name:x%02x:%02x:%02x:%02x:" 7134 "%02x:%02x:%02x:%02x, " 7135 "switch_name:x%02x:%02x:%02x:%02x:" 7136 "%02x:%02x:%02x:%02x\n", i++, 7137 conn_rec->flags, conn_rec->vlan_tag, 7138 conn_rec->fabric_name[0], 7139 conn_rec->fabric_name[1], 7140 conn_rec->fabric_name[2], 7141 conn_rec->fabric_name[3], 7142 conn_rec->fabric_name[4], 7143 conn_rec->fabric_name[5], 7144 conn_rec->fabric_name[6], 7145 conn_rec->fabric_name[7], 7146 conn_rec->switch_name[0], 7147 conn_rec->switch_name[1], 7148 conn_rec->switch_name[2], 7149 conn_rec->switch_name[3], 7150 conn_rec->switch_name[4], 7151 conn_rec->switch_name[5], 7152 conn_rec->switch_name[6], 7153 conn_rec->switch_name[7]); 7154 } 7155 } 7156 } 7157 7158 /** 7159 * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. 7160 * @phba: Pointer to hba context object. 7161 * @buff: Buffer containing the FCoE parameter data structure. 7162 * 7163 * This function update driver data structure with config 7164 * parameters read from config region 23. 7165 */ 7166 static void 7167 lpfc_read_fcoe_param(struct lpfc_hba *phba, 7168 uint8_t *buff) 7169 { 7170 struct lpfc_fip_param_hdr *fcoe_param_hdr; 7171 struct lpfc_fcoe_params *fcoe_param; 7172 7173 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 7174 buff; 7175 fcoe_param = (struct lpfc_fcoe_params *) 7176 (buff + sizeof(struct lpfc_fip_param_hdr)); 7177 7178 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 7179 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 7180 return; 7181 7182 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 7183 phba->valid_vlan = 1; 7184 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 7185 0xFFF; 7186 } 7187 7188 phba->fc_map[0] = fcoe_param->fc_map[0]; 7189 phba->fc_map[1] = fcoe_param->fc_map[1]; 7190 phba->fc_map[2] = fcoe_param->fc_map[2]; 7191 return; 7192 } 7193 7194 /** 7195 * lpfc_get_rec_conf23 - Get a record type in config region data. 7196 * @buff: Buffer containing config region 23 data. 7197 * @size: Size of the data buffer. 7198 * @rec_type: Record type to be searched. 7199 * 7200 * This function searches config region data to find the beginning 7201 * of the record specified by record_type. If record found, this 7202 * function return pointer to the record else return NULL. 7203 */ 7204 static uint8_t * 7205 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) 7206 { 7207 uint32_t offset = 0, rec_length; 7208 7209 if ((buff[0] == LPFC_REGION23_LAST_REC) || 7210 (size < sizeof(uint32_t))) 7211 return NULL; 7212 7213 rec_length = buff[offset + 1]; 7214 7215 /* 7216 * One TLV record has one word header and number of data words 7217 * specified in the rec_length field of the record header. 7218 */ 7219 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) 7220 <= size) { 7221 if (buff[offset] == rec_type) 7222 return &buff[offset]; 7223 7224 if (buff[offset] == LPFC_REGION23_LAST_REC) 7225 return NULL; 7226 7227 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); 7228 rec_length = buff[offset + 1]; 7229 } 7230 return NULL; 7231 } 7232 7233 /** 7234 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. 7235 * @phba: Pointer to lpfc_hba data structure. 7236 * @buff: Buffer containing config region 23 data. 7237 * @size: Size of the data buffer. 7238 * 7239 * This function parses the FCoE config parameters in config region 23 and 7240 * populate driver data structure with the parameters. 7241 */ 7242 void 7243 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, 7244 uint8_t *buff, 7245 uint32_t size) 7246 { 7247 uint32_t offset = 0; 7248 uint8_t *rec_ptr; 7249 7250 /* 7251 * If data size is less than 2 words signature and version cannot be 7252 * verified. 7253 */ 7254 if (size < 2*sizeof(uint32_t)) 7255 return; 7256 7257 /* Check the region signature first */ 7258 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 7259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7260 "2567 Config region 23 has bad signature\n"); 7261 return; 7262 } 7263 7264 offset += 4; 7265 7266 /* Check the data structure version */ 7267 if (buff[offset] != LPFC_REGION23_VERSION) { 7268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7269 "2568 Config region 23 has bad version\n"); 7270 return; 7271 } 7272 offset += 4; 7273 7274 /* Read FCoE param record */ 7275 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 7276 size - offset, FCOE_PARAM_TYPE); 7277 if (rec_ptr) 7278 lpfc_read_fcoe_param(phba, rec_ptr); 7279 7280 /* Read FCF connection table */ 7281 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 7282 size - offset, FCOE_CONN_TBL_TYPE); 7283 if (rec_ptr) 7284 lpfc_read_fcf_conn_tbl(phba, rec_ptr); 7285 7286 } 7287