1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/slab.h> 25 #include <linux/pci.h> 26 #include <linux/kthread.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_nl.h" 37 #include "lpfc_disc.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_debugfs.h" 46 47 /* AlpaArray for assignment of scsid for scan-down and bind_method */ 48 static uint8_t lpfcAlpaArray[] = { 49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 62 }; 63 64 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 65 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 67 static int lpfc_fcf_inuse(struct lpfc_hba *); 68 69 void 70 lpfc_terminate_rport_io(struct fc_rport *rport) 71 { 72 struct lpfc_rport_data *rdata; 73 struct lpfc_nodelist * ndlp; 74 struct lpfc_hba *phba; 75 76 rdata = rport->dd_data; 77 ndlp = rdata->pnode; 78 79 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 80 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 81 printk(KERN_ERR "Cannot find remote node" 82 " to terminate I/O Data x%x\n", 83 rport->port_id); 84 return; 85 } 86 87 phba = ndlp->phba; 88 89 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 90 "rport terminate: sid:x%x did:x%x flg:x%x", 91 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 92 93 if (ndlp->nlp_sid != NLP_NO_SID) { 94 lpfc_sli_abort_iocb(ndlp->vport, 95 &phba->sli.ring[phba->sli.fcp_ring], 96 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 97 } 98 } 99 100 /* 101 * This function will be called when dev_loss_tmo fire. 102 */ 103 void 104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) 105 { 106 struct lpfc_rport_data *rdata; 107 struct lpfc_nodelist * ndlp; 108 struct lpfc_vport *vport; 109 struct lpfc_hba *phba; 110 struct lpfc_work_evt *evtp; 111 int put_node; 112 int put_rport; 113 114 rdata = rport->dd_data; 115 ndlp = rdata->pnode; 116 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 117 return; 118 119 vport = ndlp->vport; 120 phba = vport->phba; 121 122 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 123 "rport devlosscb: sid:x%x did:x%x flg:x%x", 124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 125 126 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n", 128 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 129 130 /* Don't defer this if we are in the process of deleting the vport 131 * or unloading the driver. The unload will cleanup the node 132 * appropriately we just need to cleanup the ndlp rport info here. 133 */ 134 if (vport->load_flag & FC_UNLOADING) { 135 put_node = rdata->pnode != NULL; 136 put_rport = ndlp->rport != NULL; 137 rdata->pnode = NULL; 138 ndlp->rport = NULL; 139 if (put_node) 140 lpfc_nlp_put(ndlp); 141 if (put_rport) 142 put_device(&rport->dev); 143 return; 144 } 145 146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 147 return; 148 149 if (ndlp->nlp_type & NLP_FABRIC) { 150 151 /* If the WWPN of the rport and ndlp don't match, ignore it */ 152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { 153 put_device(&rport->dev); 154 return; 155 } 156 } 157 158 evtp = &ndlp->dev_loss_evt; 159 160 if (!list_empty(&evtp->evt_listp)) 161 return; 162 163 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 164 165 spin_lock_irq(&phba->hbalock); 166 /* We need to hold the node by incrementing the reference 167 * count until this queued work is done 168 */ 169 if (evtp->evt_arg1) { 170 evtp->evt = LPFC_EVT_DEV_LOSS; 171 list_add_tail(&evtp->evt_listp, &phba->work_list); 172 lpfc_worker_wake_up(phba); 173 } 174 spin_unlock_irq(&phba->hbalock); 175 176 return; 177 } 178 179 /** 180 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 181 * @ndlp: Pointer to remote node object. 182 * 183 * This function is called from the worker thread when devloss timeout timer 184 * expires. For SLI4 host, this routine shall return 1 when at lease one 185 * remote node, including this @ndlp, is still in use of FCF; otherwise, this 186 * routine shall return 0 when there is no remote node is still in use of FCF 187 * when devloss timeout happened to this @ndlp. 188 **/ 189 static int 190 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 191 { 192 struct lpfc_rport_data *rdata; 193 struct fc_rport *rport; 194 struct lpfc_vport *vport; 195 struct lpfc_hba *phba; 196 uint8_t *name; 197 int put_node; 198 int put_rport; 199 int warn_on = 0; 200 int fcf_inuse = 0; 201 202 rport = ndlp->rport; 203 204 if (!rport) 205 return fcf_inuse; 206 207 rdata = rport->dd_data; 208 name = (uint8_t *) &ndlp->nlp_portname; 209 vport = ndlp->vport; 210 phba = vport->phba; 211 212 if (phba->sli_rev == LPFC_SLI_REV4) 213 fcf_inuse = lpfc_fcf_inuse(phba); 214 215 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 216 "rport devlosstmo:did:x%x type:x%x id:x%x", 217 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 218 219 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 220 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", 221 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 222 223 /* Don't defer this if we are in the process of deleting the vport 224 * or unloading the driver. The unload will cleanup the node 225 * appropriately we just need to cleanup the ndlp rport info here. 226 */ 227 if (vport->load_flag & FC_UNLOADING) { 228 if (ndlp->nlp_sid != NLP_NO_SID) { 229 /* flush the target */ 230 lpfc_sli_abort_iocb(vport, 231 &phba->sli.ring[phba->sli.fcp_ring], 232 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 233 } 234 put_node = rdata->pnode != NULL; 235 put_rport = ndlp->rport != NULL; 236 rdata->pnode = NULL; 237 ndlp->rport = NULL; 238 if (put_node) 239 lpfc_nlp_put(ndlp); 240 if (put_rport) 241 put_device(&rport->dev); 242 return fcf_inuse; 243 } 244 245 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 246 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 247 "0284 Devloss timeout Ignored on " 248 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 249 "NPort x%x\n", 250 *name, *(name+1), *(name+2), *(name+3), 251 *(name+4), *(name+5), *(name+6), *(name+7), 252 ndlp->nlp_DID); 253 return fcf_inuse; 254 } 255 256 if (ndlp->nlp_type & NLP_FABRIC) { 257 /* We will clean up these Nodes in linkup */ 258 put_node = rdata->pnode != NULL; 259 put_rport = ndlp->rport != NULL; 260 rdata->pnode = NULL; 261 ndlp->rport = NULL; 262 if (put_node) 263 lpfc_nlp_put(ndlp); 264 if (put_rport) 265 put_device(&rport->dev); 266 return fcf_inuse; 267 } 268 269 if (ndlp->nlp_sid != NLP_NO_SID) { 270 warn_on = 1; 271 /* flush the target */ 272 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 273 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 274 } 275 276 if (warn_on) { 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 278 "0203 Devloss timeout on " 279 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 280 "NPort x%06x Data: x%x x%x x%x\n", 281 *name, *(name+1), *(name+2), *(name+3), 282 *(name+4), *(name+5), *(name+6), *(name+7), 283 ndlp->nlp_DID, ndlp->nlp_flag, 284 ndlp->nlp_state, ndlp->nlp_rpi); 285 } else { 286 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 287 "0204 Devloss timeout on " 288 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 289 "NPort x%06x Data: x%x x%x x%x\n", 290 *name, *(name+1), *(name+2), *(name+3), 291 *(name+4), *(name+5), *(name+6), *(name+7), 292 ndlp->nlp_DID, ndlp->nlp_flag, 293 ndlp->nlp_state, ndlp->nlp_rpi); 294 } 295 296 put_node = rdata->pnode != NULL; 297 put_rport = ndlp->rport != NULL; 298 rdata->pnode = NULL; 299 ndlp->rport = NULL; 300 if (put_node) 301 lpfc_nlp_put(ndlp); 302 if (put_rport) 303 put_device(&rport->dev); 304 305 if (!(vport->load_flag & FC_UNLOADING) && 306 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 307 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 308 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 309 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && 310 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 311 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 312 313 return fcf_inuse; 314 } 315 316 /** 317 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 318 * @phba: Pointer to hba context object. 319 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 320 * @nlp_did: remote node identifer with devloss timeout. 321 * 322 * This function is called from the worker thread after invoking devloss 323 * timeout handler and releasing the reference count for the ndlp with 324 * which the devloss timeout was handled for SLI4 host. For the devloss 325 * timeout of the last remote node which had been in use of FCF, when this 326 * routine is invoked, it shall be guaranteed that none of the remote are 327 * in-use of FCF. When devloss timeout to the last remote using the FCF, 328 * if the FIP engine is neither in FCF table scan process nor roundrobin 329 * failover process, the in-use FCF shall be unregistered. If the FIP 330 * engine is in FCF discovery process, the devloss timeout state shall 331 * be set for either the FCF table scan process or roundrobin failover 332 * process to unregister the in-use FCF. 333 **/ 334 static void 335 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 336 uint32_t nlp_did) 337 { 338 /* If devloss timeout happened to a remote node when FCF had no 339 * longer been in-use, do nothing. 340 */ 341 if (!fcf_inuse) 342 return; 343 344 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 345 spin_lock_irq(&phba->hbalock); 346 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 347 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 348 spin_unlock_irq(&phba->hbalock); 349 return; 350 } 351 phba->hba_flag |= HBA_DEVLOSS_TMO; 352 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 353 "2847 Last remote node (x%x) using " 354 "FCF devloss tmo\n", nlp_did); 355 } 356 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 357 spin_unlock_irq(&phba->hbalock); 358 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 359 "2868 Devloss tmo to FCF rediscovery " 360 "in progress\n"); 361 return; 362 } 363 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 364 spin_unlock_irq(&phba->hbalock); 365 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 366 "2869 Devloss tmo to idle FIP engine, " 367 "unreg in-use FCF and rescan.\n"); 368 /* Unregister in-use FCF and rescan */ 369 lpfc_unregister_fcf_rescan(phba); 370 return; 371 } 372 spin_unlock_irq(&phba->hbalock); 373 if (phba->hba_flag & FCF_TS_INPROG) 374 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 375 "2870 FCF table scan in progress\n"); 376 if (phba->hba_flag & FCF_RR_INPROG) 377 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 378 "2871 FLOGI roundrobin FCF failover " 379 "in progress\n"); 380 } 381 lpfc_unregister_unused_fcf(phba); 382 } 383 384 /** 385 * lpfc_alloc_fast_evt - Allocates data structure for posting event 386 * @phba: Pointer to hba context object. 387 * 388 * This function is called from the functions which need to post 389 * events from interrupt context. This function allocates data 390 * structure required for posting event. It also keeps track of 391 * number of events pending and prevent event storm when there are 392 * too many events. 393 **/ 394 struct lpfc_fast_path_event * 395 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 396 struct lpfc_fast_path_event *ret; 397 398 /* If there are lot of fast event do not exhaust memory due to this */ 399 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 400 return NULL; 401 402 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 403 GFP_ATOMIC); 404 if (ret) { 405 atomic_inc(&phba->fast_event_count); 406 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 407 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 408 } 409 return ret; 410 } 411 412 /** 413 * lpfc_free_fast_evt - Frees event data structure 414 * @phba: Pointer to hba context object. 415 * @evt: Event object which need to be freed. 416 * 417 * This function frees the data structure required for posting 418 * events. 419 **/ 420 void 421 lpfc_free_fast_evt(struct lpfc_hba *phba, 422 struct lpfc_fast_path_event *evt) { 423 424 atomic_dec(&phba->fast_event_count); 425 kfree(evt); 426 } 427 428 /** 429 * lpfc_send_fastpath_evt - Posts events generated from fast path 430 * @phba: Pointer to hba context object. 431 * @evtp: Event data structure. 432 * 433 * This function is called from worker thread, when the interrupt 434 * context need to post an event. This function posts the event 435 * to fc transport netlink interface. 436 **/ 437 static void 438 lpfc_send_fastpath_evt(struct lpfc_hba *phba, 439 struct lpfc_work_evt *evtp) 440 { 441 unsigned long evt_category, evt_sub_category; 442 struct lpfc_fast_path_event *fast_evt_data; 443 char *evt_data; 444 uint32_t evt_data_size; 445 struct Scsi_Host *shost; 446 447 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 448 work_evt); 449 450 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 451 evt_sub_category = (unsigned long) fast_evt_data->un. 452 fabric_evt.subcategory; 453 shost = lpfc_shost_from_vport(fast_evt_data->vport); 454 if (evt_category == FC_REG_FABRIC_EVENT) { 455 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 456 evt_data = (char *) &fast_evt_data->un.read_check_error; 457 evt_data_size = sizeof(fast_evt_data->un. 458 read_check_error); 459 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 460 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { 461 evt_data = (char *) &fast_evt_data->un.fabric_evt; 462 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 463 } else { 464 lpfc_free_fast_evt(phba, fast_evt_data); 465 return; 466 } 467 } else if (evt_category == FC_REG_SCSI_EVENT) { 468 switch (evt_sub_category) { 469 case LPFC_EVENT_QFULL: 470 case LPFC_EVENT_DEVBSY: 471 evt_data = (char *) &fast_evt_data->un.scsi_evt; 472 evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 473 break; 474 case LPFC_EVENT_CHECK_COND: 475 evt_data = (char *) &fast_evt_data->un.check_cond_evt; 476 evt_data_size = sizeof(fast_evt_data->un. 477 check_cond_evt); 478 break; 479 case LPFC_EVENT_VARQUEDEPTH: 480 evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 481 evt_data_size = sizeof(fast_evt_data->un. 482 queue_depth_evt); 483 break; 484 default: 485 lpfc_free_fast_evt(phba, fast_evt_data); 486 return; 487 } 488 } else { 489 lpfc_free_fast_evt(phba, fast_evt_data); 490 return; 491 } 492 493 fc_host_post_vendor_event(shost, 494 fc_get_event_number(), 495 evt_data_size, 496 evt_data, 497 LPFC_NL_VENDOR_ID); 498 499 lpfc_free_fast_evt(phba, fast_evt_data); 500 return; 501 } 502 503 static void 504 lpfc_work_list_done(struct lpfc_hba *phba) 505 { 506 struct lpfc_work_evt *evtp = NULL; 507 struct lpfc_nodelist *ndlp; 508 int free_evt; 509 int fcf_inuse; 510 uint32_t nlp_did; 511 512 spin_lock_irq(&phba->hbalock); 513 while (!list_empty(&phba->work_list)) { 514 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 515 evt_listp); 516 spin_unlock_irq(&phba->hbalock); 517 free_evt = 1; 518 switch (evtp->evt) { 519 case LPFC_EVT_ELS_RETRY: 520 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 521 lpfc_els_retry_delay_handler(ndlp); 522 free_evt = 0; /* evt is part of ndlp */ 523 /* decrement the node reference count held 524 * for this queued work 525 */ 526 lpfc_nlp_put(ndlp); 527 break; 528 case LPFC_EVT_DEV_LOSS: 529 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 530 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 531 free_evt = 0; 532 /* decrement the node reference count held for 533 * this queued work 534 */ 535 nlp_did = ndlp->nlp_DID; 536 lpfc_nlp_put(ndlp); 537 if (phba->sli_rev == LPFC_SLI_REV4) 538 lpfc_sli4_post_dev_loss_tmo_handler(phba, 539 fcf_inuse, 540 nlp_did); 541 break; 542 case LPFC_EVT_ONLINE: 543 if (phba->link_state < LPFC_LINK_DOWN) 544 *(int *) (evtp->evt_arg1) = lpfc_online(phba); 545 else 546 *(int *) (evtp->evt_arg1) = 0; 547 complete((struct completion *)(evtp->evt_arg2)); 548 break; 549 case LPFC_EVT_OFFLINE_PREP: 550 if (phba->link_state >= LPFC_LINK_DOWN) 551 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 552 *(int *)(evtp->evt_arg1) = 0; 553 complete((struct completion *)(evtp->evt_arg2)); 554 break; 555 case LPFC_EVT_OFFLINE: 556 lpfc_offline(phba); 557 lpfc_sli_brdrestart(phba); 558 *(int *)(evtp->evt_arg1) = 559 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); 560 lpfc_unblock_mgmt_io(phba); 561 complete((struct completion *)(evtp->evt_arg2)); 562 break; 563 case LPFC_EVT_WARM_START: 564 lpfc_offline(phba); 565 lpfc_reset_barrier(phba); 566 lpfc_sli_brdreset(phba); 567 lpfc_hba_down_post(phba); 568 *(int *)(evtp->evt_arg1) = 569 lpfc_sli_brdready(phba, HS_MBRDY); 570 lpfc_unblock_mgmt_io(phba); 571 complete((struct completion *)(evtp->evt_arg2)); 572 break; 573 case LPFC_EVT_KILL: 574 lpfc_offline(phba); 575 *(int *)(evtp->evt_arg1) 576 = (phba->pport->stopped) 577 ? 0 : lpfc_sli_brdkill(phba); 578 lpfc_unblock_mgmt_io(phba); 579 complete((struct completion *)(evtp->evt_arg2)); 580 break; 581 case LPFC_EVT_FASTPATH_MGMT_EVT: 582 lpfc_send_fastpath_evt(phba, evtp); 583 free_evt = 0; 584 break; 585 case LPFC_EVT_RESET_HBA: 586 if (!(phba->pport->load_flag & FC_UNLOADING)) 587 lpfc_reset_hba(phba); 588 break; 589 } 590 if (free_evt) 591 kfree(evtp); 592 spin_lock_irq(&phba->hbalock); 593 } 594 spin_unlock_irq(&phba->hbalock); 595 596 } 597 598 static void 599 lpfc_work_done(struct lpfc_hba *phba) 600 { 601 struct lpfc_sli_ring *pring; 602 uint32_t ha_copy, status, control, work_port_events; 603 struct lpfc_vport **vports; 604 struct lpfc_vport *vport; 605 int i; 606 607 spin_lock_irq(&phba->hbalock); 608 ha_copy = phba->work_ha; 609 phba->work_ha = 0; 610 spin_unlock_irq(&phba->hbalock); 611 612 /* First, try to post the next mailbox command to SLI4 device */ 613 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 614 lpfc_sli4_post_async_mbox(phba); 615 616 if (ha_copy & HA_ERATT) 617 /* Handle the error attention event */ 618 lpfc_handle_eratt(phba); 619 620 if (ha_copy & HA_MBATT) 621 lpfc_sli_handle_mb_event(phba); 622 623 if (ha_copy & HA_LATT) 624 lpfc_handle_latt(phba); 625 626 /* Process SLI4 events */ 627 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 628 if (phba->hba_flag & HBA_RRQ_ACTIVE) 629 lpfc_handle_rrq_active(phba); 630 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 631 lpfc_sli4_fcp_xri_abort_event_proc(phba); 632 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 633 lpfc_sli4_els_xri_abort_event_proc(phba); 634 if (phba->hba_flag & ASYNC_EVENT) 635 lpfc_sli4_async_event_proc(phba); 636 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { 637 spin_lock_irq(&phba->hbalock); 638 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; 639 spin_unlock_irq(&phba->hbalock); 640 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 641 } 642 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) 643 lpfc_sli4_fcf_redisc_event_proc(phba); 644 } 645 646 vports = lpfc_create_vport_work_array(phba); 647 if (vports != NULL) 648 for (i = 0; i <= phba->max_vports; i++) { 649 /* 650 * We could have no vports in array if unloading, so if 651 * this happens then just use the pport 652 */ 653 if (vports[i] == NULL && i == 0) 654 vport = phba->pport; 655 else 656 vport = vports[i]; 657 if (vport == NULL) 658 break; 659 spin_lock_irq(&vport->work_port_lock); 660 work_port_events = vport->work_port_events; 661 vport->work_port_events &= ~work_port_events; 662 spin_unlock_irq(&vport->work_port_lock); 663 if (work_port_events & WORKER_DISC_TMO) 664 lpfc_disc_timeout_handler(vport); 665 if (work_port_events & WORKER_ELS_TMO) 666 lpfc_els_timeout_handler(vport); 667 if (work_port_events & WORKER_HB_TMO) 668 lpfc_hb_timeout_handler(phba); 669 if (work_port_events & WORKER_MBOX_TMO) 670 lpfc_mbox_timeout_handler(phba); 671 if (work_port_events & WORKER_FABRIC_BLOCK_TMO) 672 lpfc_unblock_fabric_iocbs(phba); 673 if (work_port_events & WORKER_FDMI_TMO) 674 lpfc_fdmi_timeout_handler(vport); 675 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 676 lpfc_ramp_down_queue_handler(phba); 677 if (work_port_events & WORKER_DELAYED_DISC_TMO) 678 lpfc_delayed_disc_timeout_handler(vport); 679 } 680 lpfc_destroy_vport_work_array(phba, vports); 681 682 pring = &phba->sli.ring[LPFC_ELS_RING]; 683 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 684 status >>= (4*LPFC_ELS_RING); 685 if ((status & HA_RXMASK) || 686 (pring->flag & LPFC_DEFERRED_RING_EVENT) || 687 (phba->hba_flag & HBA_SP_QUEUE_EVT)) { 688 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 689 pring->flag |= LPFC_DEFERRED_RING_EVENT; 690 /* Set the lpfc data pending flag */ 691 set_bit(LPFC_DATA_READY, &phba->data_flags); 692 } else { 693 if (phba->link_state >= LPFC_LINK_UP) { 694 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 695 lpfc_sli_handle_slow_ring_event(phba, pring, 696 (status & 697 HA_RXMASK)); 698 } 699 } 700 if ((phba->sli_rev == LPFC_SLI_REV4) & 701 (!list_empty(&pring->txq))) 702 lpfc_drain_txq(phba); 703 /* 704 * Turn on Ring interrupts 705 */ 706 if (phba->sli_rev <= LPFC_SLI_REV3) { 707 spin_lock_irq(&phba->hbalock); 708 control = readl(phba->HCregaddr); 709 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 710 lpfc_debugfs_slow_ring_trc(phba, 711 "WRK Enable ring: cntl:x%x hacopy:x%x", 712 control, ha_copy, 0); 713 714 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 715 writel(control, phba->HCregaddr); 716 readl(phba->HCregaddr); /* flush */ 717 } else { 718 lpfc_debugfs_slow_ring_trc(phba, 719 "WRK Ring ok: cntl:x%x hacopy:x%x", 720 control, ha_copy, 0); 721 } 722 spin_unlock_irq(&phba->hbalock); 723 } 724 } 725 lpfc_work_list_done(phba); 726 } 727 728 int 729 lpfc_do_work(void *p) 730 { 731 struct lpfc_hba *phba = p; 732 int rc; 733 734 set_user_nice(current, MIN_NICE); 735 current->flags |= PF_NOFREEZE; 736 phba->data_flags = 0; 737 738 while (!kthread_should_stop()) { 739 /* wait and check worker queue activities */ 740 rc = wait_event_interruptible(phba->work_waitq, 741 (test_and_clear_bit(LPFC_DATA_READY, 742 &phba->data_flags) 743 || kthread_should_stop())); 744 /* Signal wakeup shall terminate the worker thread */ 745 if (rc) { 746 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 747 "0433 Wakeup on signal: rc=x%x\n", rc); 748 break; 749 } 750 751 /* Attend pending lpfc data processing */ 752 lpfc_work_done(phba); 753 } 754 phba->worker_thread = NULL; 755 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 756 "0432 Worker thread stopped.\n"); 757 return 0; 758 } 759 760 /* 761 * This is only called to handle FC worker events. Since this a rare 762 * occurrence, we allocate a struct lpfc_work_evt structure here instead of 763 * embedding it in the IOCB. 764 */ 765 int 766 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, 767 uint32_t evt) 768 { 769 struct lpfc_work_evt *evtp; 770 unsigned long flags; 771 772 /* 773 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 774 * be queued to worker thread for processing 775 */ 776 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); 777 if (!evtp) 778 return 0; 779 780 evtp->evt_arg1 = arg1; 781 evtp->evt_arg2 = arg2; 782 evtp->evt = evt; 783 784 spin_lock_irqsave(&phba->hbalock, flags); 785 list_add_tail(&evtp->evt_listp, &phba->work_list); 786 spin_unlock_irqrestore(&phba->hbalock, flags); 787 788 lpfc_worker_wake_up(phba); 789 790 return 1; 791 } 792 793 void 794 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 795 { 796 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 797 struct lpfc_hba *phba = vport->phba; 798 struct lpfc_nodelist *ndlp, *next_ndlp; 799 int rc; 800 801 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 802 if (!NLP_CHK_NODE_ACT(ndlp)) 803 continue; 804 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 805 continue; 806 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 807 ((vport->port_type == LPFC_NPIV_PORT) && 808 (ndlp->nlp_DID == NameServer_DID))) 809 lpfc_unreg_rpi(vport, ndlp); 810 811 /* Leave Fabric nodes alone on link down */ 812 if ((phba->sli_rev < LPFC_SLI_REV4) && 813 (!remove && ndlp->nlp_type & NLP_FABRIC)) 814 continue; 815 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 816 remove 817 ? NLP_EVT_DEVICE_RM 818 : NLP_EVT_DEVICE_RECOVERY); 819 } 820 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 821 if (phba->sli_rev == LPFC_SLI_REV4) 822 lpfc_sli4_unreg_all_rpis(vport); 823 lpfc_mbx_unreg_vpi(vport); 824 spin_lock_irq(shost->host_lock); 825 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 826 spin_unlock_irq(shost->host_lock); 827 } 828 } 829 830 void 831 lpfc_port_link_failure(struct lpfc_vport *vport) 832 { 833 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 834 835 /* Cleanup any outstanding received buffers */ 836 lpfc_cleanup_rcv_buffers(vport); 837 838 /* Cleanup any outstanding RSCN activity */ 839 lpfc_els_flush_rscn(vport); 840 841 /* Cleanup any outstanding ELS commands */ 842 lpfc_els_flush_cmd(vport); 843 844 lpfc_cleanup_rpis(vport, 0); 845 846 /* Turn off discovery timer if its running */ 847 lpfc_can_disctmo(vport); 848 } 849 850 void 851 lpfc_linkdown_port(struct lpfc_vport *vport) 852 { 853 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 854 855 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 856 857 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 858 "Link Down: state:x%x rtry:x%x flg:x%x", 859 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 860 861 lpfc_port_link_failure(vport); 862 863 /* Stop delayed Nport discovery */ 864 spin_lock_irq(shost->host_lock); 865 vport->fc_flag &= ~FC_DISC_DELAYED; 866 spin_unlock_irq(shost->host_lock); 867 del_timer_sync(&vport->delayed_disc_tmo); 868 } 869 870 int 871 lpfc_linkdown(struct lpfc_hba *phba) 872 { 873 struct lpfc_vport *vport = phba->pport; 874 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 875 struct lpfc_vport **vports; 876 LPFC_MBOXQ_t *mb; 877 int i; 878 879 if (phba->link_state == LPFC_LINK_DOWN) 880 return 0; 881 882 /* Block all SCSI stack I/Os */ 883 lpfc_scsi_dev_block(phba); 884 885 spin_lock_irq(&phba->hbalock); 886 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 887 spin_unlock_irq(&phba->hbalock); 888 if (phba->link_state > LPFC_LINK_DOWN) { 889 phba->link_state = LPFC_LINK_DOWN; 890 spin_lock_irq(shost->host_lock); 891 phba->pport->fc_flag &= ~FC_LBIT; 892 spin_unlock_irq(shost->host_lock); 893 } 894 vports = lpfc_create_vport_work_array(phba); 895 if (vports != NULL) 896 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 897 /* Issue a LINK DOWN event to all nodes */ 898 lpfc_linkdown_port(vports[i]); 899 } 900 lpfc_destroy_vport_work_array(phba, vports); 901 /* Clean up any firmware default rpi's */ 902 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 903 if (mb) { 904 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); 905 mb->vport = vport; 906 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 907 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 908 == MBX_NOT_FINISHED) { 909 mempool_free(mb, phba->mbox_mem_pool); 910 } 911 } 912 913 /* Setup myDID for link up if we are in pt2pt mode */ 914 if (phba->pport->fc_flag & FC_PT2PT) { 915 phba->pport->fc_myDID = 0; 916 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 917 if (mb) { 918 lpfc_config_link(phba, mb); 919 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 920 mb->vport = vport; 921 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 922 == MBX_NOT_FINISHED) { 923 mempool_free(mb, phba->mbox_mem_pool); 924 } 925 } 926 spin_lock_irq(shost->host_lock); 927 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 928 spin_unlock_irq(shost->host_lock); 929 } 930 931 return 0; 932 } 933 934 static void 935 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) 936 { 937 struct lpfc_nodelist *ndlp; 938 939 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 940 if (!NLP_CHK_NODE_ACT(ndlp)) 941 continue; 942 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 943 continue; 944 if (ndlp->nlp_type & NLP_FABRIC) { 945 /* On Linkup its safe to clean up the ndlp 946 * from Fabric connections. 947 */ 948 if (ndlp->nlp_DID != Fabric_DID) 949 lpfc_unreg_rpi(vport, ndlp); 950 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 951 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 952 /* Fail outstanding IO now since device is 953 * marked for PLOGI. 954 */ 955 lpfc_unreg_rpi(vport, ndlp); 956 } 957 } 958 } 959 960 static void 961 lpfc_linkup_port(struct lpfc_vport *vport) 962 { 963 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 964 struct lpfc_hba *phba = vport->phba; 965 966 if ((vport->load_flag & FC_UNLOADING) != 0) 967 return; 968 969 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 970 "Link Up: top:x%x speed:x%x flg:x%x", 971 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); 972 973 /* If NPIV is not enabled, only bring the physical port up */ 974 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 975 (vport != phba->pport)) 976 return; 977 978 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); 979 980 spin_lock_irq(shost->host_lock); 981 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 982 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 983 vport->fc_flag |= FC_NDISC_ACTIVE; 984 vport->fc_ns_retry = 0; 985 spin_unlock_irq(shost->host_lock); 986 987 if (vport->fc_flag & FC_LBIT) 988 lpfc_linkup_cleanup_nodes(vport); 989 990 } 991 992 static int 993 lpfc_linkup(struct lpfc_hba *phba) 994 { 995 struct lpfc_vport **vports; 996 int i; 997 998 lpfc_cleanup_wt_rrqs(phba); 999 phba->link_state = LPFC_LINK_UP; 1000 1001 /* Unblock fabric iocbs if they are blocked */ 1002 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1003 del_timer_sync(&phba->fabric_block_timer); 1004 1005 vports = lpfc_create_vport_work_array(phba); 1006 if (vports != NULL) 1007 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1008 lpfc_linkup_port(vports[i]); 1009 lpfc_destroy_vport_work_array(phba, vports); 1010 1011 return 0; 1012 } 1013 1014 /* 1015 * This routine handles processing a CLEAR_LA mailbox 1016 * command upon completion. It is setup in the LPFC_MBOXQ 1017 * as the completion routine when the command is 1018 * handed off to the SLI layer. 1019 */ 1020 static void 1021 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1022 { 1023 struct lpfc_vport *vport = pmb->vport; 1024 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1025 struct lpfc_sli *psli = &phba->sli; 1026 MAILBOX_t *mb = &pmb->u.mb; 1027 uint32_t control; 1028 1029 /* Since we don't do discovery right now, turn these off here */ 1030 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1031 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1032 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1033 1034 /* Check for error */ 1035 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 1036 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 1037 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1038 "0320 CLEAR_LA mbxStatus error x%x hba " 1039 "state x%x\n", 1040 mb->mbxStatus, vport->port_state); 1041 phba->link_state = LPFC_HBA_ERROR; 1042 goto out; 1043 } 1044 1045 if (vport->port_type == LPFC_PHYSICAL_PORT) 1046 phba->link_state = LPFC_HBA_READY; 1047 1048 spin_lock_irq(&phba->hbalock); 1049 psli->sli_flag |= LPFC_PROCESS_LA; 1050 control = readl(phba->HCregaddr); 1051 control |= HC_LAINT_ENA; 1052 writel(control, phba->HCregaddr); 1053 readl(phba->HCregaddr); /* flush */ 1054 spin_unlock_irq(&phba->hbalock); 1055 mempool_free(pmb, phba->mbox_mem_pool); 1056 return; 1057 1058 out: 1059 /* Device Discovery completes */ 1060 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1061 "0225 Device Discovery completes\n"); 1062 mempool_free(pmb, phba->mbox_mem_pool); 1063 1064 spin_lock_irq(shost->host_lock); 1065 vport->fc_flag &= ~FC_ABORT_DISCOVERY; 1066 spin_unlock_irq(shost->host_lock); 1067 1068 lpfc_can_disctmo(vport); 1069 1070 /* turn on Link Attention interrupts */ 1071 1072 spin_lock_irq(&phba->hbalock); 1073 psli->sli_flag |= LPFC_PROCESS_LA; 1074 control = readl(phba->HCregaddr); 1075 control |= HC_LAINT_ENA; 1076 writel(control, phba->HCregaddr); 1077 readl(phba->HCregaddr); /* flush */ 1078 spin_unlock_irq(&phba->hbalock); 1079 1080 return; 1081 } 1082 1083 1084 static void 1085 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1086 { 1087 struct lpfc_vport *vport = pmb->vport; 1088 1089 if (pmb->u.mb.mbxStatus) 1090 goto out; 1091 1092 mempool_free(pmb, phba->mbox_mem_pool); 1093 1094 /* don't perform discovery for SLI4 loopback diagnostic test */ 1095 if ((phba->sli_rev == LPFC_SLI_REV4) && 1096 !(phba->hba_flag & HBA_FCOE_MODE) && 1097 (phba->link_flag & LS_LOOPBACK_MODE)) 1098 return; 1099 1100 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 1101 vport->fc_flag & FC_PUBLIC_LOOP && 1102 !(vport->fc_flag & FC_LBIT)) { 1103 /* Need to wait for FAN - use discovery timer 1104 * for timeout. port_state is identically 1105 * LPFC_LOCAL_CFG_LINK while waiting for FAN 1106 */ 1107 lpfc_set_disctmo(vport); 1108 return; 1109 } 1110 1111 /* Start discovery by sending a FLOGI. port_state is identically 1112 * LPFC_FLOGI while waiting for FLOGI cmpl 1113 */ 1114 if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI) 1115 lpfc_initial_flogi(vport); 1116 return; 1117 1118 out: 1119 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1120 "0306 CONFIG_LINK mbxStatus error x%x " 1121 "HBA state x%x\n", 1122 pmb->u.mb.mbxStatus, vport->port_state); 1123 mempool_free(pmb, phba->mbox_mem_pool); 1124 1125 lpfc_linkdown(phba); 1126 1127 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1128 "0200 CONFIG_LINK bad hba state x%x\n", 1129 vport->port_state); 1130 1131 lpfc_issue_clear_la(phba, vport); 1132 return; 1133 } 1134 1135 /** 1136 * lpfc_sli4_clear_fcf_rr_bmask 1137 * @phba pointer to the struct lpfc_hba for this port. 1138 * This fucnction resets the round robin bit mask and clears the 1139 * fcf priority list. The list deletions are done while holding the 1140 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared 1141 * from the lpfc_fcf_pri record. 1142 **/ 1143 void 1144 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) 1145 { 1146 struct lpfc_fcf_pri *fcf_pri; 1147 struct lpfc_fcf_pri *next_fcf_pri; 1148 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 1149 spin_lock_irq(&phba->hbalock); 1150 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 1151 &phba->fcf.fcf_pri_list, list) { 1152 list_del_init(&fcf_pri->list); 1153 fcf_pri->fcf_rec.flag = 0; 1154 } 1155 spin_unlock_irq(&phba->hbalock); 1156 } 1157 static void 1158 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1159 { 1160 struct lpfc_vport *vport = mboxq->vport; 1161 1162 if (mboxq->u.mb.mbxStatus) { 1163 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1164 "2017 REG_FCFI mbxStatus error x%x " 1165 "HBA state x%x\n", 1166 mboxq->u.mb.mbxStatus, vport->port_state); 1167 goto fail_out; 1168 } 1169 1170 /* Start FCoE discovery by sending a FLOGI. */ 1171 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1172 /* Set the FCFI registered flag */ 1173 spin_lock_irq(&phba->hbalock); 1174 phba->fcf.fcf_flag |= FCF_REGISTERED; 1175 spin_unlock_irq(&phba->hbalock); 1176 1177 /* If there is a pending FCoE event, restart FCF table scan. */ 1178 if ((!(phba->hba_flag & FCF_RR_INPROG)) && 1179 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1180 goto fail_out; 1181 1182 /* Mark successful completion of FCF table scan */ 1183 spin_lock_irq(&phba->hbalock); 1184 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1185 phba->hba_flag &= ~FCF_TS_INPROG; 1186 if (vport->port_state != LPFC_FLOGI) { 1187 phba->hba_flag |= FCF_RR_INPROG; 1188 spin_unlock_irq(&phba->hbalock); 1189 lpfc_issue_init_vfi(vport); 1190 goto out; 1191 } 1192 spin_unlock_irq(&phba->hbalock); 1193 goto out; 1194 1195 fail_out: 1196 spin_lock_irq(&phba->hbalock); 1197 phba->hba_flag &= ~FCF_RR_INPROG; 1198 spin_unlock_irq(&phba->hbalock); 1199 out: 1200 mempool_free(mboxq, phba->mbox_mem_pool); 1201 } 1202 1203 /** 1204 * lpfc_fab_name_match - Check if the fcf fabric name match. 1205 * @fab_name: pointer to fabric name. 1206 * @new_fcf_record: pointer to fcf record. 1207 * 1208 * This routine compare the fcf record's fabric name with provided 1209 * fabric name. If the fabric name are identical this function 1210 * returns 1 else return 0. 1211 **/ 1212 static uint32_t 1213 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1214 { 1215 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) 1216 return 0; 1217 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) 1218 return 0; 1219 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) 1220 return 0; 1221 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) 1222 return 0; 1223 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) 1224 return 0; 1225 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) 1226 return 0; 1227 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) 1228 return 0; 1229 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) 1230 return 0; 1231 return 1; 1232 } 1233 1234 /** 1235 * lpfc_sw_name_match - Check if the fcf switch name match. 1236 * @fab_name: pointer to fabric name. 1237 * @new_fcf_record: pointer to fcf record. 1238 * 1239 * This routine compare the fcf record's switch name with provided 1240 * switch name. If the switch name are identical this function 1241 * returns 1 else return 0. 1242 **/ 1243 static uint32_t 1244 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1245 { 1246 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) 1247 return 0; 1248 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) 1249 return 0; 1250 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) 1251 return 0; 1252 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) 1253 return 0; 1254 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) 1255 return 0; 1256 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) 1257 return 0; 1258 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) 1259 return 0; 1260 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) 1261 return 0; 1262 return 1; 1263 } 1264 1265 /** 1266 * lpfc_mac_addr_match - Check if the fcf mac address match. 1267 * @mac_addr: pointer to mac address. 1268 * @new_fcf_record: pointer to fcf record. 1269 * 1270 * This routine compare the fcf record's mac address with HBA's 1271 * FCF mac address. If the mac addresses are identical this function 1272 * returns 1 else return 0. 1273 **/ 1274 static uint32_t 1275 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) 1276 { 1277 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) 1278 return 0; 1279 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) 1280 return 0; 1281 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) 1282 return 0; 1283 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) 1284 return 0; 1285 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) 1286 return 0; 1287 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) 1288 return 0; 1289 return 1; 1290 } 1291 1292 static bool 1293 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) 1294 { 1295 return (curr_vlan_id == new_vlan_id); 1296 } 1297 1298 /** 1299 * lpfc_update_fcf_record - Update driver fcf record 1300 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. 1301 * @phba: pointer to lpfc hba data structure. 1302 * @fcf_index: Index for the lpfc_fcf_record. 1303 * @new_fcf_record: pointer to hba fcf record. 1304 * 1305 * This routine updates the driver FCF priority record from the new HBA FCF 1306 * record. This routine is called with the host lock held. 1307 **/ 1308 static void 1309 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, 1310 struct fcf_record *new_fcf_record 1311 ) 1312 { 1313 struct lpfc_fcf_pri *fcf_pri; 1314 1315 fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1316 fcf_pri->fcf_rec.fcf_index = fcf_index; 1317 /* FCF record priority */ 1318 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1319 1320 } 1321 1322 /** 1323 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1324 * @fcf: pointer to driver fcf record. 1325 * @new_fcf_record: pointer to fcf record. 1326 * 1327 * This routine copies the FCF information from the FCF 1328 * record to lpfc_hba data structure. 1329 **/ 1330 static void 1331 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, 1332 struct fcf_record *new_fcf_record) 1333 { 1334 /* Fabric name */ 1335 fcf_rec->fabric_name[0] = 1336 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1337 fcf_rec->fabric_name[1] = 1338 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1339 fcf_rec->fabric_name[2] = 1340 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1341 fcf_rec->fabric_name[3] = 1342 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1343 fcf_rec->fabric_name[4] = 1344 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1345 fcf_rec->fabric_name[5] = 1346 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1347 fcf_rec->fabric_name[6] = 1348 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1349 fcf_rec->fabric_name[7] = 1350 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1351 /* Mac address */ 1352 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1353 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1354 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1355 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1356 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1357 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1358 /* FCF record index */ 1359 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1360 /* FCF record priority */ 1361 fcf_rec->priority = new_fcf_record->fip_priority; 1362 /* Switch name */ 1363 fcf_rec->switch_name[0] = 1364 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1365 fcf_rec->switch_name[1] = 1366 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1367 fcf_rec->switch_name[2] = 1368 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1369 fcf_rec->switch_name[3] = 1370 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1371 fcf_rec->switch_name[4] = 1372 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1373 fcf_rec->switch_name[5] = 1374 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1375 fcf_rec->switch_name[6] = 1376 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1377 fcf_rec->switch_name[7] = 1378 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1379 } 1380 1381 /** 1382 * lpfc_update_fcf_record - Update driver fcf record 1383 * @phba: pointer to lpfc hba data structure. 1384 * @fcf_rec: pointer to driver fcf record. 1385 * @new_fcf_record: pointer to hba fcf record. 1386 * @addr_mode: address mode to be set to the driver fcf record. 1387 * @vlan_id: vlan tag to be set to the driver fcf record. 1388 * @flag: flag bits to be set to the driver fcf record. 1389 * 1390 * This routine updates the driver FCF record from the new HBA FCF record 1391 * together with the address mode, vlan_id, and other informations. This 1392 * routine is called with the host lock held. 1393 **/ 1394 static void 1395 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, 1396 struct fcf_record *new_fcf_record, uint32_t addr_mode, 1397 uint16_t vlan_id, uint32_t flag) 1398 { 1399 /* Copy the fields from the HBA's FCF record */ 1400 lpfc_copy_fcf_record(fcf_rec, new_fcf_record); 1401 /* Update other fields of driver FCF record */ 1402 fcf_rec->addr_mode = addr_mode; 1403 fcf_rec->vlan_id = vlan_id; 1404 fcf_rec->flag |= (flag | RECORD_VALID); 1405 __lpfc_update_fcf_record_pri(phba, 1406 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), 1407 new_fcf_record); 1408 } 1409 1410 /** 1411 * lpfc_register_fcf - Register the FCF with hba. 1412 * @phba: pointer to lpfc hba data structure. 1413 * 1414 * This routine issues a register fcfi mailbox command to register 1415 * the fcf with HBA. 1416 **/ 1417 static void 1418 lpfc_register_fcf(struct lpfc_hba *phba) 1419 { 1420 LPFC_MBOXQ_t *fcf_mbxq; 1421 int rc; 1422 1423 spin_lock_irq(&phba->hbalock); 1424 /* If the FCF is not available do nothing. */ 1425 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1426 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1427 spin_unlock_irq(&phba->hbalock); 1428 return; 1429 } 1430 1431 /* The FCF is already registered, start discovery */ 1432 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1433 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1434 phba->hba_flag &= ~FCF_TS_INPROG; 1435 if (phba->pport->port_state != LPFC_FLOGI && 1436 phba->pport->fc_flag & FC_FABRIC) { 1437 phba->hba_flag |= FCF_RR_INPROG; 1438 spin_unlock_irq(&phba->hbalock); 1439 lpfc_initial_flogi(phba->pport); 1440 return; 1441 } 1442 spin_unlock_irq(&phba->hbalock); 1443 return; 1444 } 1445 spin_unlock_irq(&phba->hbalock); 1446 1447 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1448 if (!fcf_mbxq) { 1449 spin_lock_irq(&phba->hbalock); 1450 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1451 spin_unlock_irq(&phba->hbalock); 1452 return; 1453 } 1454 1455 lpfc_reg_fcfi(phba, fcf_mbxq); 1456 fcf_mbxq->vport = phba->pport; 1457 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1458 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1459 if (rc == MBX_NOT_FINISHED) { 1460 spin_lock_irq(&phba->hbalock); 1461 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1462 spin_unlock_irq(&phba->hbalock); 1463 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1464 } 1465 1466 return; 1467 } 1468 1469 /** 1470 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. 1471 * @phba: pointer to lpfc hba data structure. 1472 * @new_fcf_record: pointer to fcf record. 1473 * @boot_flag: Indicates if this record used by boot bios. 1474 * @addr_mode: The address mode to be used by this FCF 1475 * @vlan_id: The vlan id to be used as vlan tagging by this FCF. 1476 * 1477 * This routine compare the fcf record with connect list obtained from the 1478 * config region to decide if this FCF can be used for SAN discovery. It returns 1479 * 1 if this record can be used for SAN discovery else return zero. If this FCF 1480 * record can be used for SAN discovery, the boot_flag will indicate if this FCF 1481 * is used by boot bios and addr_mode will indicate the addressing mode to be 1482 * used for this FCF when the function returns. 1483 * If the FCF record need to be used with a particular vlan id, the vlan is 1484 * set in the vlan_id on return of the function. If not VLAN tagging need to 1485 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; 1486 **/ 1487 static int 1488 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1489 struct fcf_record *new_fcf_record, 1490 uint32_t *boot_flag, uint32_t *addr_mode, 1491 uint16_t *vlan_id) 1492 { 1493 struct lpfc_fcf_conn_entry *conn_entry; 1494 int i, j, fcf_vlan_id = 0; 1495 1496 /* Find the lowest VLAN id in the FCF record */ 1497 for (i = 0; i < 512; i++) { 1498 if (new_fcf_record->vlan_bitmap[i]) { 1499 fcf_vlan_id = i * 8; 1500 j = 0; 1501 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { 1502 j++; 1503 fcf_vlan_id++; 1504 } 1505 break; 1506 } 1507 } 1508 1509 /* FCF not valid/available or solicitation in progress */ 1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || 1512 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) 1513 return 0; 1514 1515 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1516 *boot_flag = 0; 1517 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1518 new_fcf_record); 1519 if (phba->valid_vlan) 1520 *vlan_id = phba->vlan_id; 1521 else 1522 *vlan_id = LPFC_FCOE_NULL_VID; 1523 return 1; 1524 } 1525 1526 /* 1527 * If there are no FCF connection table entry, driver connect to all 1528 * FCFs. 1529 */ 1530 if (list_empty(&phba->fcf_conn_rec_list)) { 1531 *boot_flag = 0; 1532 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1533 new_fcf_record); 1534 1535 /* 1536 * When there are no FCF connect entries, use driver's default 1537 * addressing mode - FPMA. 1538 */ 1539 if (*addr_mode & LPFC_FCF_FPMA) 1540 *addr_mode = LPFC_FCF_FPMA; 1541 1542 /* If FCF record report a vlan id use that vlan id */ 1543 if (fcf_vlan_id) 1544 *vlan_id = fcf_vlan_id; 1545 else 1546 *vlan_id = LPFC_FCOE_NULL_VID; 1547 return 1; 1548 } 1549 1550 list_for_each_entry(conn_entry, 1551 &phba->fcf_conn_rec_list, list) { 1552 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1553 continue; 1554 1555 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1556 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1557 new_fcf_record)) 1558 continue; 1559 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && 1560 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, 1561 new_fcf_record)) 1562 continue; 1563 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 1564 /* 1565 * If the vlan bit map does not have the bit set for the 1566 * vlan id to be used, then it is not a match. 1567 */ 1568 if (!(new_fcf_record->vlan_bitmap 1569 [conn_entry->conn_rec.vlan_tag / 8] & 1570 (1 << (conn_entry->conn_rec.vlan_tag % 8)))) 1571 continue; 1572 } 1573 1574 /* 1575 * If connection record does not support any addressing mode, 1576 * skip the FCF record. 1577 */ 1578 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) 1579 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) 1580 continue; 1581 1582 /* 1583 * Check if the connection record specifies a required 1584 * addressing mode. 1585 */ 1586 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1587 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { 1588 1589 /* 1590 * If SPMA required but FCF not support this continue. 1591 */ 1592 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1593 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1594 new_fcf_record) & LPFC_FCF_SPMA)) 1595 continue; 1596 1597 /* 1598 * If FPMA required but FCF not support this continue. 1599 */ 1600 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1601 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1602 new_fcf_record) & LPFC_FCF_FPMA)) 1603 continue; 1604 } 1605 1606 /* 1607 * This fcf record matches filtering criteria. 1608 */ 1609 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) 1610 *boot_flag = 1; 1611 else 1612 *boot_flag = 0; 1613 1614 /* 1615 * If user did not specify any addressing mode, or if the 1616 * preferred addressing mode specified by user is not supported 1617 * by FCF, allow fabric to pick the addressing mode. 1618 */ 1619 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1620 new_fcf_record); 1621 /* 1622 * If the user specified a required address mode, assign that 1623 * address mode 1624 */ 1625 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1626 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) 1627 *addr_mode = (conn_entry->conn_rec.flags & 1628 FCFCNCT_AM_SPMA) ? 1629 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 1630 /* 1631 * If the user specified a preferred address mode, use the 1632 * addr mode only if FCF support the addr_mode. 1633 */ 1634 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1635 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1636 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1637 (*addr_mode & LPFC_FCF_SPMA)) 1638 *addr_mode = LPFC_FCF_SPMA; 1639 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1640 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1641 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1642 (*addr_mode & LPFC_FCF_FPMA)) 1643 *addr_mode = LPFC_FCF_FPMA; 1644 1645 /* If matching connect list has a vlan id, use it */ 1646 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1647 *vlan_id = conn_entry->conn_rec.vlan_tag; 1648 /* 1649 * If no vlan id is specified in connect list, use the vlan id 1650 * in the FCF record 1651 */ 1652 else if (fcf_vlan_id) 1653 *vlan_id = fcf_vlan_id; 1654 else 1655 *vlan_id = LPFC_FCOE_NULL_VID; 1656 1657 return 1; 1658 } 1659 1660 return 0; 1661 } 1662 1663 /** 1664 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. 1665 * @phba: pointer to lpfc hba data structure. 1666 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. 1667 * 1668 * This function check if there is any fcoe event pending while driver 1669 * scan FCF entries. If there is any pending event, it will restart the 1670 * FCF saning and return 1 else return 0. 1671 */ 1672 int 1673 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1674 { 1675 /* 1676 * If the Link is up and no FCoE events while in the 1677 * FCF discovery, no need to restart FCF discovery. 1678 */ 1679 if ((phba->link_state >= LPFC_LINK_UP) && 1680 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1681 return 0; 1682 1683 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1684 "2768 Pending link or FCF event during current " 1685 "handling of the previous event: link_state:x%x, " 1686 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", 1687 phba->link_state, phba->fcoe_eventtag_at_fcf_scan, 1688 phba->fcoe_eventtag); 1689 1690 spin_lock_irq(&phba->hbalock); 1691 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1692 spin_unlock_irq(&phba->hbalock); 1693 1694 if (phba->link_state >= LPFC_LINK_UP) { 1695 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1696 "2780 Restart FCF table scan due to " 1697 "pending FCF event:evt_tag_at_scan:x%x, " 1698 "evt_tag_current:x%x\n", 1699 phba->fcoe_eventtag_at_fcf_scan, 1700 phba->fcoe_eventtag); 1701 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1702 } else { 1703 /* 1704 * Do not continue FCF discovery and clear FCF_TS_INPROG 1705 * flag 1706 */ 1707 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1708 "2833 Stop FCF discovery process due to link " 1709 "state change (x%x)\n", phba->link_state); 1710 spin_lock_irq(&phba->hbalock); 1711 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1712 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1713 spin_unlock_irq(&phba->hbalock); 1714 } 1715 1716 /* Unregister the currently registered FCF if required */ 1717 if (unreg_fcf) { 1718 spin_lock_irq(&phba->hbalock); 1719 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1720 spin_unlock_irq(&phba->hbalock); 1721 lpfc_sli4_unregister_fcf(phba); 1722 } 1723 return 1; 1724 } 1725 1726 /** 1727 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record 1728 * @phba: pointer to lpfc hba data structure. 1729 * @fcf_cnt: number of eligible fcf record seen so far. 1730 * 1731 * This function makes an running random selection decision on FCF record to 1732 * use through a sequence of @fcf_cnt eligible FCF records with equal 1733 * probability. To perform integer manunipulation of random numbers with 1734 * size unit32_t, the lower 16 bits of the 32-bit random number returned 1735 * from prandom_u32() are taken as the random random number generated. 1736 * 1737 * Returns true when outcome is for the newly read FCF record should be 1738 * chosen; otherwise, return false when outcome is for keeping the previously 1739 * chosen FCF record. 1740 **/ 1741 static bool 1742 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) 1743 { 1744 uint32_t rand_num; 1745 1746 /* Get 16-bit uniform random number */ 1747 rand_num = 0xFFFF & prandom_u32(); 1748 1749 /* Decision with probability 1/fcf_cnt */ 1750 if ((fcf_cnt * rand_num) < 0xFFFF) 1751 return true; 1752 else 1753 return false; 1754 } 1755 1756 /** 1757 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. 1758 * @phba: pointer to lpfc hba data structure. 1759 * @mboxq: pointer to mailbox object. 1760 * @next_fcf_index: pointer to holder of next fcf index. 1761 * 1762 * This routine parses the non-embedded fcf mailbox command by performing the 1763 * necessarily error checking, non-embedded read FCF record mailbox command 1764 * SGE parsing, and endianness swapping. 1765 * 1766 * Returns the pointer to the new FCF record in the non-embedded mailbox 1767 * command DMA memory if successfully, other NULL. 1768 */ 1769 static struct fcf_record * 1770 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 1771 uint16_t *next_fcf_index) 1772 { 1773 void *virt_addr; 1774 dma_addr_t phys_addr; 1775 struct lpfc_mbx_sge sge; 1776 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1777 uint32_t shdr_status, shdr_add_status; 1778 union lpfc_sli4_cfg_shdr *shdr; 1779 struct fcf_record *new_fcf_record; 1780 1781 /* Get the first SGE entry from the non-embedded DMA memory. This 1782 * routine only uses a single SGE. 1783 */ 1784 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 1785 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 1786 if (unlikely(!mboxq->sge_array)) { 1787 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1788 "2524 Failed to get the non-embedded SGE " 1789 "virtual address\n"); 1790 return NULL; 1791 } 1792 virt_addr = mboxq->sge_array->addr[0]; 1793 1794 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1795 lpfc_sli_pcimem_bcopy(shdr, shdr, 1796 sizeof(union lpfc_sli4_cfg_shdr)); 1797 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1798 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1799 if (shdr_status || shdr_add_status) { 1800 if (shdr_status == STATUS_FCF_TABLE_EMPTY) 1801 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1802 "2726 READ_FCF_RECORD Indicates empty " 1803 "FCF table.\n"); 1804 else 1805 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1806 "2521 READ_FCF_RECORD mailbox failed " 1807 "with status x%x add_status x%x, " 1808 "mbx\n", shdr_status, shdr_add_status); 1809 return NULL; 1810 } 1811 1812 /* Interpreting the returned information of the FCF record */ 1813 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1814 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1815 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1816 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1817 new_fcf_record = (struct fcf_record *)(virt_addr + 1818 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1819 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1820 offsetof(struct fcf_record, vlan_bitmap)); 1821 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); 1822 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); 1823 1824 return new_fcf_record; 1825 } 1826 1827 /** 1828 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record 1829 * @phba: pointer to lpfc hba data structure. 1830 * @fcf_record: pointer to the fcf record. 1831 * @vlan_id: the lowest vlan identifier associated to this fcf record. 1832 * @next_fcf_index: the index to the next fcf record in hba's fcf table. 1833 * 1834 * This routine logs the detailed FCF record if the LOG_FIP loggin is 1835 * enabled. 1836 **/ 1837 static void 1838 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, 1839 struct fcf_record *fcf_record, 1840 uint16_t vlan_id, 1841 uint16_t next_fcf_index) 1842 { 1843 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1844 "2764 READ_FCF_RECORD:\n" 1845 "\tFCF_Index : x%x\n" 1846 "\tFCF_Avail : x%x\n" 1847 "\tFCF_Valid : x%x\n" 1848 "\tFCF_SOL : x%x\n" 1849 "\tFIP_Priority : x%x\n" 1850 "\tMAC_Provider : x%x\n" 1851 "\tLowest VLANID : x%x\n" 1852 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" 1853 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1854 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1855 "\tNext_FCF_Index: x%x\n", 1856 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 1857 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 1858 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 1859 bf_get(lpfc_fcf_record_fcf_sol, fcf_record), 1860 fcf_record->fip_priority, 1861 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 1862 vlan_id, 1863 bf_get(lpfc_fcf_record_mac_0, fcf_record), 1864 bf_get(lpfc_fcf_record_mac_1, fcf_record), 1865 bf_get(lpfc_fcf_record_mac_2, fcf_record), 1866 bf_get(lpfc_fcf_record_mac_3, fcf_record), 1867 bf_get(lpfc_fcf_record_mac_4, fcf_record), 1868 bf_get(lpfc_fcf_record_mac_5, fcf_record), 1869 bf_get(lpfc_fcf_record_fab_name_0, fcf_record), 1870 bf_get(lpfc_fcf_record_fab_name_1, fcf_record), 1871 bf_get(lpfc_fcf_record_fab_name_2, fcf_record), 1872 bf_get(lpfc_fcf_record_fab_name_3, fcf_record), 1873 bf_get(lpfc_fcf_record_fab_name_4, fcf_record), 1874 bf_get(lpfc_fcf_record_fab_name_5, fcf_record), 1875 bf_get(lpfc_fcf_record_fab_name_6, fcf_record), 1876 bf_get(lpfc_fcf_record_fab_name_7, fcf_record), 1877 bf_get(lpfc_fcf_record_switch_name_0, fcf_record), 1878 bf_get(lpfc_fcf_record_switch_name_1, fcf_record), 1879 bf_get(lpfc_fcf_record_switch_name_2, fcf_record), 1880 bf_get(lpfc_fcf_record_switch_name_3, fcf_record), 1881 bf_get(lpfc_fcf_record_switch_name_4, fcf_record), 1882 bf_get(lpfc_fcf_record_switch_name_5, fcf_record), 1883 bf_get(lpfc_fcf_record_switch_name_6, fcf_record), 1884 bf_get(lpfc_fcf_record_switch_name_7, fcf_record), 1885 next_fcf_index); 1886 } 1887 1888 /** 1889 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF 1890 * @phba: pointer to lpfc hba data structure. 1891 * @fcf_rec: pointer to an existing FCF record. 1892 * @new_fcf_record: pointer to a new FCF record. 1893 * @new_vlan_id: vlan id from the new FCF record. 1894 * 1895 * This function performs matching test of a new FCF record against an existing 1896 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id 1897 * will not be used as part of the FCF record matching criteria. 1898 * 1899 * Returns true if all the fields matching, otherwise returns false. 1900 */ 1901 static bool 1902 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, 1903 struct lpfc_fcf_rec *fcf_rec, 1904 struct fcf_record *new_fcf_record, 1905 uint16_t new_vlan_id) 1906 { 1907 if (new_vlan_id != LPFC_FCOE_IGNORE_VID) 1908 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) 1909 return false; 1910 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) 1911 return false; 1912 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) 1913 return false; 1914 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1915 return false; 1916 if (fcf_rec->priority != new_fcf_record->fip_priority) 1917 return false; 1918 return true; 1919 } 1920 1921 /** 1922 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 1923 * @vport: Pointer to vport object. 1924 * @fcf_index: index to next fcf. 1925 * 1926 * This function processing the roundrobin fcf failover to next fcf index. 1927 * When this function is invoked, there will be a current fcf registered 1928 * for flogi. 1929 * Return: 0 for continue retrying flogi on currently registered fcf; 1930 * 1 for stop flogi on currently registered fcf; 1931 */ 1932 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 1933 { 1934 struct lpfc_hba *phba = vport->phba; 1935 int rc; 1936 1937 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 1938 spin_lock_irq(&phba->hbalock); 1939 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1940 spin_unlock_irq(&phba->hbalock); 1941 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1942 "2872 Devloss tmo with no eligible " 1943 "FCF, unregister in-use FCF (x%x) " 1944 "and rescan FCF table\n", 1945 phba->fcf.current_rec.fcf_indx); 1946 lpfc_unregister_fcf_rescan(phba); 1947 goto stop_flogi_current_fcf; 1948 } 1949 /* Mark the end to FLOGI roundrobin failover */ 1950 phba->hba_flag &= ~FCF_RR_INPROG; 1951 /* Allow action to new fcf asynchronous event */ 1952 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1953 spin_unlock_irq(&phba->hbalock); 1954 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1955 "2865 No FCF available, stop roundrobin FCF " 1956 "failover and change port state:x%x/x%x\n", 1957 phba->pport->port_state, LPFC_VPORT_UNKNOWN); 1958 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1959 goto stop_flogi_current_fcf; 1960 } else { 1961 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 1962 "2794 Try FLOGI roundrobin FCF failover to " 1963 "(x%x)\n", fcf_index); 1964 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 1965 if (rc) 1966 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1967 "2761 FLOGI roundrobin FCF failover " 1968 "failed (rc:x%x) to read FCF (x%x)\n", 1969 rc, phba->fcf.current_rec.fcf_indx); 1970 else 1971 goto stop_flogi_current_fcf; 1972 } 1973 return 0; 1974 1975 stop_flogi_current_fcf: 1976 lpfc_can_disctmo(vport); 1977 return 1; 1978 } 1979 1980 /** 1981 * lpfc_sli4_fcf_pri_list_del 1982 * @phba: pointer to lpfc hba data structure. 1983 * @fcf_index the index of the fcf record to delete 1984 * This routine checks the on list flag of the fcf_index to be deleted. 1985 * If it is one the list then it is removed from the list, and the flag 1986 * is cleared. This routine grab the hbalock before removing the fcf 1987 * record from the list. 1988 **/ 1989 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, 1990 uint16_t fcf_index) 1991 { 1992 struct lpfc_fcf_pri *new_fcf_pri; 1993 1994 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1995 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1996 "3058 deleting idx x%x pri x%x flg x%x\n", 1997 fcf_index, new_fcf_pri->fcf_rec.priority, 1998 new_fcf_pri->fcf_rec.flag); 1999 spin_lock_irq(&phba->hbalock); 2000 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { 2001 if (phba->fcf.current_rec.priority == 2002 new_fcf_pri->fcf_rec.priority) 2003 phba->fcf.eligible_fcf_cnt--; 2004 list_del_init(&new_fcf_pri->list); 2005 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; 2006 } 2007 spin_unlock_irq(&phba->hbalock); 2008 } 2009 2010 /** 2011 * lpfc_sli4_set_fcf_flogi_fail 2012 * @phba: pointer to lpfc hba data structure. 2013 * @fcf_index the index of the fcf record to update 2014 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED 2015 * flag so the the round robin slection for the particular priority level 2016 * will try a different fcf record that does not have this bit set. 2017 * If the fcf record is re-read for any reason this flag is cleared brfore 2018 * adding it to the priority list. 2019 **/ 2020 void 2021 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) 2022 { 2023 struct lpfc_fcf_pri *new_fcf_pri; 2024 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2025 spin_lock_irq(&phba->hbalock); 2026 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; 2027 spin_unlock_irq(&phba->hbalock); 2028 } 2029 2030 /** 2031 * lpfc_sli4_fcf_pri_list_add 2032 * @phba: pointer to lpfc hba data structure. 2033 * @fcf_index the index of the fcf record to add 2034 * This routine checks the priority of the fcf_index to be added. 2035 * If it is a lower priority than the current head of the fcf_pri list 2036 * then it is added to the list in the right order. 2037 * If it is the same priority as the current head of the list then it 2038 * is added to the head of the list and its bit in the rr_bmask is set. 2039 * If the fcf_index to be added is of a higher priority than the current 2040 * head of the list then the rr_bmask is cleared, its bit is set in the 2041 * rr_bmask and it is added to the head of the list. 2042 * returns: 2043 * 0=success 1=failure 2044 **/ 2045 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, 2046 struct fcf_record *new_fcf_record) 2047 { 2048 uint16_t current_fcf_pri; 2049 uint16_t last_index; 2050 struct lpfc_fcf_pri *fcf_pri; 2051 struct lpfc_fcf_pri *next_fcf_pri; 2052 struct lpfc_fcf_pri *new_fcf_pri; 2053 int ret; 2054 2055 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2056 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2057 "3059 adding idx x%x pri x%x flg x%x\n", 2058 fcf_index, new_fcf_record->fip_priority, 2059 new_fcf_pri->fcf_rec.flag); 2060 spin_lock_irq(&phba->hbalock); 2061 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) 2062 list_del_init(&new_fcf_pri->list); 2063 new_fcf_pri->fcf_rec.fcf_index = fcf_index; 2064 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 2065 if (list_empty(&phba->fcf.fcf_pri_list)) { 2066 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2067 ret = lpfc_sli4_fcf_rr_index_set(phba, 2068 new_fcf_pri->fcf_rec.fcf_index); 2069 goto out; 2070 } 2071 2072 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 2073 LPFC_SLI4_FCF_TBL_INDX_MAX); 2074 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 2075 ret = 0; /* Empty rr list */ 2076 goto out; 2077 } 2078 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; 2079 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { 2080 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2081 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { 2082 memset(phba->fcf.fcf_rr_bmask, 0, 2083 sizeof(*phba->fcf.fcf_rr_bmask)); 2084 /* fcfs_at_this_priority_level = 1; */ 2085 phba->fcf.eligible_fcf_cnt = 1; 2086 } else 2087 /* fcfs_at_this_priority_level++; */ 2088 phba->fcf.eligible_fcf_cnt++; 2089 ret = lpfc_sli4_fcf_rr_index_set(phba, 2090 new_fcf_pri->fcf_rec.fcf_index); 2091 goto out; 2092 } 2093 2094 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 2095 &phba->fcf.fcf_pri_list, list) { 2096 if (new_fcf_pri->fcf_rec.priority <= 2097 fcf_pri->fcf_rec.priority) { 2098 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) 2099 list_add(&new_fcf_pri->list, 2100 &phba->fcf.fcf_pri_list); 2101 else 2102 list_add(&new_fcf_pri->list, 2103 &((struct lpfc_fcf_pri *) 2104 fcf_pri->list.prev)->list); 2105 ret = 0; 2106 goto out; 2107 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2108 || new_fcf_pri->fcf_rec.priority < 2109 next_fcf_pri->fcf_rec.priority) { 2110 list_add(&new_fcf_pri->list, &fcf_pri->list); 2111 ret = 0; 2112 goto out; 2113 } 2114 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) 2115 continue; 2116 2117 } 2118 ret = 1; 2119 out: 2120 /* we use = instead of |= to clear the FLOGI_FAILED flag. */ 2121 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; 2122 spin_unlock_irq(&phba->hbalock); 2123 return ret; 2124 } 2125 2126 /** 2127 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2128 * @phba: pointer to lpfc hba data structure. 2129 * @mboxq: pointer to mailbox object. 2130 * 2131 * This function iterates through all the fcf records available in 2132 * HBA and chooses the optimal FCF record for discovery. After finding 2133 * the FCF for discovery it registers the FCF record and kicks start 2134 * discovery. 2135 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to 2136 * use an FCF record which matches fabric name and mac address of the 2137 * currently used FCF record. 2138 * If the driver supports only one FCF, it will try to use the FCF record 2139 * used by BOOT_BIOS. 2140 */ 2141 void 2142 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2143 { 2144 struct fcf_record *new_fcf_record; 2145 uint32_t boot_flag, addr_mode; 2146 uint16_t fcf_index, next_fcf_index; 2147 struct lpfc_fcf_rec *fcf_rec = NULL; 2148 uint16_t vlan_id; 2149 uint32_t seed; 2150 bool select_new_fcf; 2151 int rc; 2152 2153 /* If there is pending FCoE event restart FCF table scan */ 2154 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 2155 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2156 return; 2157 } 2158 2159 /* Parse the FCF record from the non-embedded mailbox command */ 2160 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2161 &next_fcf_index); 2162 if (!new_fcf_record) { 2163 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2164 "2765 Mailbox command READ_FCF_RECORD " 2165 "failed to retrieve a FCF record.\n"); 2166 /* Let next new FCF event trigger fast failover */ 2167 spin_lock_irq(&phba->hbalock); 2168 phba->hba_flag &= ~FCF_TS_INPROG; 2169 spin_unlock_irq(&phba->hbalock); 2170 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2171 return; 2172 } 2173 2174 /* Check the FCF record against the connection list */ 2175 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2176 &addr_mode, &vlan_id); 2177 2178 /* Log the FCF record information if turned on */ 2179 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2180 next_fcf_index); 2181 2182 /* 2183 * If the fcf record does not match with connect list entries 2184 * read the next entry; otherwise, this is an eligible FCF 2185 * record for roundrobin FCF failover. 2186 */ 2187 if (!rc) { 2188 lpfc_sli4_fcf_pri_list_del(phba, 2189 bf_get(lpfc_fcf_record_fcf_index, 2190 new_fcf_record)); 2191 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2192 "2781 FCF (x%x) failed connection " 2193 "list check: (x%x/x%x/%x)\n", 2194 bf_get(lpfc_fcf_record_fcf_index, 2195 new_fcf_record), 2196 bf_get(lpfc_fcf_record_fcf_avail, 2197 new_fcf_record), 2198 bf_get(lpfc_fcf_record_fcf_valid, 2199 new_fcf_record), 2200 bf_get(lpfc_fcf_record_fcf_sol, 2201 new_fcf_record)); 2202 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2203 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2204 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 2205 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 2206 phba->fcf.current_rec.fcf_indx) { 2207 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2208 "2862 FCF (x%x) matches property " 2209 "of in-use FCF (x%x)\n", 2210 bf_get(lpfc_fcf_record_fcf_index, 2211 new_fcf_record), 2212 phba->fcf.current_rec.fcf_indx); 2213 goto read_next_fcf; 2214 } 2215 /* 2216 * In case the current in-use FCF record becomes 2217 * invalid/unavailable during FCF discovery that 2218 * was not triggered by fast FCF failover process, 2219 * treat it as fast FCF failover. 2220 */ 2221 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && 2222 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2223 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2224 "2835 Invalid in-use FCF " 2225 "(x%x), enter FCF failover " 2226 "table scan.\n", 2227 phba->fcf.current_rec.fcf_indx); 2228 spin_lock_irq(&phba->hbalock); 2229 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2230 spin_unlock_irq(&phba->hbalock); 2231 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2232 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2233 LPFC_FCOE_FCF_GET_FIRST); 2234 return; 2235 } 2236 } 2237 goto read_next_fcf; 2238 } else { 2239 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2240 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, 2241 new_fcf_record); 2242 if (rc) 2243 goto read_next_fcf; 2244 } 2245 2246 /* 2247 * If this is not the first FCF discovery of the HBA, use last 2248 * FCF record for the discovery. The condition that a rescan 2249 * matches the in-use FCF record: fabric name, switch name, mac 2250 * address, and vlan_id. 2251 */ 2252 spin_lock_irq(&phba->hbalock); 2253 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2254 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2255 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2256 new_fcf_record, vlan_id)) { 2257 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2258 phba->fcf.current_rec.fcf_indx) { 2259 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2260 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2261 /* Stop FCF redisc wait timer */ 2262 __lpfc_sli4_stop_fcf_redisc_wait_timer( 2263 phba); 2264 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2265 /* Fast failover, mark completed */ 2266 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2267 spin_unlock_irq(&phba->hbalock); 2268 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2269 "2836 New FCF matches in-use " 2270 "FCF (x%x), port_state:x%x, " 2271 "fc_flag:x%x\n", 2272 phba->fcf.current_rec.fcf_indx, 2273 phba->pport->port_state, 2274 phba->pport->fc_flag); 2275 goto out; 2276 } else 2277 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2278 "2863 New FCF (x%x) matches " 2279 "property of in-use FCF (x%x)\n", 2280 bf_get(lpfc_fcf_record_fcf_index, 2281 new_fcf_record), 2282 phba->fcf.current_rec.fcf_indx); 2283 } 2284 /* 2285 * Read next FCF record from HBA searching for the matching 2286 * with in-use record only if not during the fast failover 2287 * period. In case of fast failover period, it shall try to 2288 * determine whether the FCF record just read should be the 2289 * next candidate. 2290 */ 2291 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2292 spin_unlock_irq(&phba->hbalock); 2293 goto read_next_fcf; 2294 } 2295 } 2296 /* 2297 * Update on failover FCF record only if it's in FCF fast-failover 2298 * period; otherwise, update on current FCF record. 2299 */ 2300 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2301 fcf_rec = &phba->fcf.failover_rec; 2302 else 2303 fcf_rec = &phba->fcf.current_rec; 2304 2305 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 2306 /* 2307 * If the driver FCF record does not have boot flag 2308 * set and new hba fcf record has boot flag set, use 2309 * the new hba fcf record. 2310 */ 2311 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 2312 /* Choose this FCF record */ 2313 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2314 "2837 Update current FCF record " 2315 "(x%x) with new FCF record (x%x)\n", 2316 fcf_rec->fcf_indx, 2317 bf_get(lpfc_fcf_record_fcf_index, 2318 new_fcf_record)); 2319 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2320 addr_mode, vlan_id, BOOT_ENABLE); 2321 spin_unlock_irq(&phba->hbalock); 2322 goto read_next_fcf; 2323 } 2324 /* 2325 * If the driver FCF record has boot flag set and the 2326 * new hba FCF record does not have boot flag, read 2327 * the next FCF record. 2328 */ 2329 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 2330 spin_unlock_irq(&phba->hbalock); 2331 goto read_next_fcf; 2332 } 2333 /* 2334 * If the new hba FCF record has lower priority value 2335 * than the driver FCF record, use the new record. 2336 */ 2337 if (new_fcf_record->fip_priority < fcf_rec->priority) { 2338 /* Choose the new FCF record with lower priority */ 2339 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2340 "2838 Update current FCF record " 2341 "(x%x) with new FCF record (x%x)\n", 2342 fcf_rec->fcf_indx, 2343 bf_get(lpfc_fcf_record_fcf_index, 2344 new_fcf_record)); 2345 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2346 addr_mode, vlan_id, 0); 2347 /* Reset running random FCF selection count */ 2348 phba->fcf.eligible_fcf_cnt = 1; 2349 } else if (new_fcf_record->fip_priority == fcf_rec->priority) { 2350 /* Update running random FCF selection count */ 2351 phba->fcf.eligible_fcf_cnt++; 2352 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 2353 phba->fcf.eligible_fcf_cnt); 2354 if (select_new_fcf) { 2355 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2356 "2839 Update current FCF record " 2357 "(x%x) with new FCF record (x%x)\n", 2358 fcf_rec->fcf_indx, 2359 bf_get(lpfc_fcf_record_fcf_index, 2360 new_fcf_record)); 2361 /* Choose the new FCF by random selection */ 2362 __lpfc_update_fcf_record(phba, fcf_rec, 2363 new_fcf_record, 2364 addr_mode, vlan_id, 0); 2365 } 2366 } 2367 spin_unlock_irq(&phba->hbalock); 2368 goto read_next_fcf; 2369 } 2370 /* 2371 * This is the first suitable FCF record, choose this record for 2372 * initial best-fit FCF. 2373 */ 2374 if (fcf_rec) { 2375 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2376 "2840 Update initial FCF candidate " 2377 "with FCF (x%x)\n", 2378 bf_get(lpfc_fcf_record_fcf_index, 2379 new_fcf_record)); 2380 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2381 addr_mode, vlan_id, (boot_flag ? 2382 BOOT_ENABLE : 0)); 2383 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2384 /* Setup initial running random FCF selection count */ 2385 phba->fcf.eligible_fcf_cnt = 1; 2386 /* Seeding the random number generator for random selection */ 2387 seed = (uint32_t)(0xFFFFFFFF & jiffies); 2388 prandom_seed(seed); 2389 } 2390 spin_unlock_irq(&phba->hbalock); 2391 goto read_next_fcf; 2392 2393 read_next_fcf: 2394 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2395 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { 2396 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 2397 /* 2398 * Case of FCF fast failover scan 2399 */ 2400 2401 /* 2402 * It has not found any suitable FCF record, cancel 2403 * FCF scan inprogress, and do nothing 2404 */ 2405 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2406 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2407 "2782 No suitable FCF found: " 2408 "(x%x/x%x)\n", 2409 phba->fcoe_eventtag_at_fcf_scan, 2410 bf_get(lpfc_fcf_record_fcf_index, 2411 new_fcf_record)); 2412 spin_lock_irq(&phba->hbalock); 2413 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2414 phba->hba_flag &= ~FCF_TS_INPROG; 2415 spin_unlock_irq(&phba->hbalock); 2416 /* Unregister in-use FCF and rescan */ 2417 lpfc_printf_log(phba, KERN_INFO, 2418 LOG_FIP, 2419 "2864 On devloss tmo " 2420 "unreg in-use FCF and " 2421 "rescan FCF table\n"); 2422 lpfc_unregister_fcf_rescan(phba); 2423 return; 2424 } 2425 /* 2426 * Let next new FCF event trigger fast failover 2427 */ 2428 phba->hba_flag &= ~FCF_TS_INPROG; 2429 spin_unlock_irq(&phba->hbalock); 2430 return; 2431 } 2432 /* 2433 * It has found a suitable FCF record that is not 2434 * the same as in-use FCF record, unregister the 2435 * in-use FCF record, replace the in-use FCF record 2436 * with the new FCF record, mark FCF fast failover 2437 * completed, and then start register the new FCF 2438 * record. 2439 */ 2440 2441 /* Unregister the current in-use FCF record */ 2442 lpfc_unregister_fcf(phba); 2443 2444 /* Replace in-use record with the new record */ 2445 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2446 "2842 Replace in-use FCF (x%x) " 2447 "with failover FCF (x%x)\n", 2448 phba->fcf.current_rec.fcf_indx, 2449 phba->fcf.failover_rec.fcf_indx); 2450 memcpy(&phba->fcf.current_rec, 2451 &phba->fcf.failover_rec, 2452 sizeof(struct lpfc_fcf_rec)); 2453 /* 2454 * Mark the fast FCF failover rediscovery completed 2455 * and the start of the first round of the roundrobin 2456 * FCF failover. 2457 */ 2458 spin_lock_irq(&phba->hbalock); 2459 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2460 spin_unlock_irq(&phba->hbalock); 2461 /* Register to the new FCF record */ 2462 lpfc_register_fcf(phba); 2463 } else { 2464 /* 2465 * In case of transaction period to fast FCF failover, 2466 * do nothing when search to the end of the FCF table. 2467 */ 2468 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2469 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2470 return; 2471 2472 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2473 phba->fcf.fcf_flag & FCF_IN_USE) { 2474 /* 2475 * In case the current in-use FCF record no 2476 * longer existed during FCF discovery that 2477 * was not triggered by fast FCF failover 2478 * process, treat it as fast FCF failover. 2479 */ 2480 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2481 "2841 In-use FCF record (x%x) " 2482 "not reported, entering fast " 2483 "FCF failover mode scanning.\n", 2484 phba->fcf.current_rec.fcf_indx); 2485 spin_lock_irq(&phba->hbalock); 2486 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2487 spin_unlock_irq(&phba->hbalock); 2488 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2489 LPFC_FCOE_FCF_GET_FIRST); 2490 return; 2491 } 2492 /* Register to the new FCF record */ 2493 lpfc_register_fcf(phba); 2494 } 2495 } else 2496 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); 2497 return; 2498 2499 out: 2500 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2501 lpfc_register_fcf(phba); 2502 2503 return; 2504 } 2505 2506 /** 2507 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2508 * @phba: pointer to lpfc hba data structure. 2509 * @mboxq: pointer to mailbox object. 2510 * 2511 * This is the callback function for FLOGI failure roundrobin FCF failover 2512 * read FCF record mailbox command from the eligible FCF record bmask for 2513 * performing the failover. If the FCF read back is not valid/available, it 2514 * fails through to retrying FLOGI to the currently registered FCF again. 2515 * Otherwise, if the FCF read back is valid and available, it will set the 2516 * newly read FCF record to the failover FCF record, unregister currently 2517 * registered FCF record, copy the failover FCF record to the current 2518 * FCF record, and then register the current FCF record before proceeding 2519 * to trying FLOGI on the new failover FCF. 2520 */ 2521 void 2522 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2523 { 2524 struct fcf_record *new_fcf_record; 2525 uint32_t boot_flag, addr_mode; 2526 uint16_t next_fcf_index, fcf_index; 2527 uint16_t current_fcf_index; 2528 uint16_t vlan_id; 2529 int rc; 2530 2531 /* If link state is not up, stop the roundrobin failover process */ 2532 if (phba->link_state < LPFC_LINK_UP) { 2533 spin_lock_irq(&phba->hbalock); 2534 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2535 phba->hba_flag &= ~FCF_RR_INPROG; 2536 spin_unlock_irq(&phba->hbalock); 2537 goto out; 2538 } 2539 2540 /* Parse the FCF record from the non-embedded mailbox command */ 2541 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2542 &next_fcf_index); 2543 if (!new_fcf_record) { 2544 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2545 "2766 Mailbox command READ_FCF_RECORD " 2546 "failed to retrieve a FCF record. " 2547 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, 2548 phba->fcf.fcf_flag); 2549 lpfc_unregister_fcf_rescan(phba); 2550 goto out; 2551 } 2552 2553 /* Get the needed parameters from FCF record */ 2554 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2555 &addr_mode, &vlan_id); 2556 2557 /* Log the FCF record information if turned on */ 2558 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2559 next_fcf_index); 2560 2561 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2562 if (!rc) { 2563 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2564 "2848 Remove ineligible FCF (x%x) from " 2565 "from roundrobin bmask\n", fcf_index); 2566 /* Clear roundrobin bmask bit for ineligible FCF */ 2567 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 2568 /* Perform next round of roundrobin FCF failover */ 2569 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 2570 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 2571 if (rc) 2572 goto out; 2573 goto error_out; 2574 } 2575 2576 if (fcf_index == phba->fcf.current_rec.fcf_indx) { 2577 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2578 "2760 Perform FLOGI roundrobin FCF failover: " 2579 "FCF (x%x) back to FCF (x%x)\n", 2580 phba->fcf.current_rec.fcf_indx, fcf_index); 2581 /* Wait 500 ms before retrying FLOGI to current FCF */ 2582 msleep(500); 2583 lpfc_issue_init_vfi(phba->pport); 2584 goto out; 2585 } 2586 2587 /* Upload new FCF record to the failover FCF record */ 2588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2589 "2834 Update current FCF (x%x) with new FCF (x%x)\n", 2590 phba->fcf.failover_rec.fcf_indx, fcf_index); 2591 spin_lock_irq(&phba->hbalock); 2592 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2593 new_fcf_record, addr_mode, vlan_id, 2594 (boot_flag ? BOOT_ENABLE : 0)); 2595 spin_unlock_irq(&phba->hbalock); 2596 2597 current_fcf_index = phba->fcf.current_rec.fcf_indx; 2598 2599 /* Unregister the current in-use FCF record */ 2600 lpfc_unregister_fcf(phba); 2601 2602 /* Replace in-use record with the new record */ 2603 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, 2604 sizeof(struct lpfc_fcf_rec)); 2605 2606 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2607 "2783 Perform FLOGI roundrobin FCF failover: FCF " 2608 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 2609 2610 error_out: 2611 lpfc_register_fcf(phba); 2612 out: 2613 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2614 } 2615 2616 /** 2617 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. 2618 * @phba: pointer to lpfc hba data structure. 2619 * @mboxq: pointer to mailbox object. 2620 * 2621 * This is the callback function of read FCF record mailbox command for 2622 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 2623 * failover when a new FCF event happened. If the FCF read back is 2624 * valid/available and it passes the connection list check, it updates 2625 * the bmask for the eligible FCF record for roundrobin failover. 2626 */ 2627 void 2628 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2629 { 2630 struct fcf_record *new_fcf_record; 2631 uint32_t boot_flag, addr_mode; 2632 uint16_t fcf_index, next_fcf_index; 2633 uint16_t vlan_id; 2634 int rc; 2635 2636 /* If link state is not up, no need to proceed */ 2637 if (phba->link_state < LPFC_LINK_UP) 2638 goto out; 2639 2640 /* If FCF discovery period is over, no need to proceed */ 2641 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) 2642 goto out; 2643 2644 /* Parse the FCF record from the non-embedded mailbox command */ 2645 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2646 &next_fcf_index); 2647 if (!new_fcf_record) { 2648 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2649 "2767 Mailbox command READ_FCF_RECORD " 2650 "failed to retrieve a FCF record.\n"); 2651 goto out; 2652 } 2653 2654 /* Check the connection list for eligibility */ 2655 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2656 &addr_mode, &vlan_id); 2657 2658 /* Log the FCF record information if turned on */ 2659 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2660 next_fcf_index); 2661 2662 if (!rc) 2663 goto out; 2664 2665 /* Update the eligible FCF record index bmask */ 2666 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2667 2668 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); 2669 2670 out: 2671 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2672 } 2673 2674 /** 2675 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. 2676 * @phba: pointer to lpfc hba data structure. 2677 * @mboxq: pointer to mailbox data structure. 2678 * 2679 * This function handles completion of init vfi mailbox command. 2680 */ 2681 void 2682 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2683 { 2684 struct lpfc_vport *vport = mboxq->vport; 2685 2686 /* 2687 * VFI not supported on interface type 0, just do the flogi 2688 * Also continue if the VFI is in use - just use the same one. 2689 */ 2690 if (mboxq->u.mb.mbxStatus && 2691 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2692 LPFC_SLI_INTF_IF_TYPE_0) && 2693 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2694 lpfc_printf_vlog(vport, KERN_ERR, 2695 LOG_MBOX, 2696 "2891 Init VFI mailbox failed 0x%x\n", 2697 mboxq->u.mb.mbxStatus); 2698 mempool_free(mboxq, phba->mbox_mem_pool); 2699 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2700 return; 2701 } 2702 2703 lpfc_initial_flogi(vport); 2704 mempool_free(mboxq, phba->mbox_mem_pool); 2705 return; 2706 } 2707 2708 /** 2709 * lpfc_issue_init_vfi - Issue init_vfi mailbox command. 2710 * @vport: pointer to lpfc_vport data structure. 2711 * 2712 * This function issue a init_vfi mailbox command to initialize the VFI and 2713 * VPI for the physical port. 2714 */ 2715 void 2716 lpfc_issue_init_vfi(struct lpfc_vport *vport) 2717 { 2718 LPFC_MBOXQ_t *mboxq; 2719 int rc; 2720 struct lpfc_hba *phba = vport->phba; 2721 2722 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2723 if (!mboxq) { 2724 lpfc_printf_vlog(vport, KERN_ERR, 2725 LOG_MBOX, "2892 Failed to allocate " 2726 "init_vfi mailbox\n"); 2727 return; 2728 } 2729 lpfc_init_vfi(mboxq, vport); 2730 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; 2731 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 2732 if (rc == MBX_NOT_FINISHED) { 2733 lpfc_printf_vlog(vport, KERN_ERR, 2734 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); 2735 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2736 } 2737 } 2738 2739 /** 2740 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2741 * @phba: pointer to lpfc hba data structure. 2742 * @mboxq: pointer to mailbox data structure. 2743 * 2744 * This function handles completion of init vpi mailbox command. 2745 */ 2746 void 2747 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2748 { 2749 struct lpfc_vport *vport = mboxq->vport; 2750 struct lpfc_nodelist *ndlp; 2751 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2752 2753 if (mboxq->u.mb.mbxStatus) { 2754 lpfc_printf_vlog(vport, KERN_ERR, 2755 LOG_MBOX, 2756 "2609 Init VPI mailbox failed 0x%x\n", 2757 mboxq->u.mb.mbxStatus); 2758 mempool_free(mboxq, phba->mbox_mem_pool); 2759 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2760 return; 2761 } 2762 spin_lock_irq(shost->host_lock); 2763 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2764 spin_unlock_irq(shost->host_lock); 2765 2766 /* If this port is physical port or FDISC is done, do reg_vpi */ 2767 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { 2768 ndlp = lpfc_findnode_did(vport, Fabric_DID); 2769 if (!ndlp) 2770 lpfc_printf_vlog(vport, KERN_ERR, 2771 LOG_DISCOVERY, 2772 "2731 Cannot find fabric " 2773 "controller node\n"); 2774 else 2775 lpfc_register_new_vport(phba, vport, ndlp); 2776 mempool_free(mboxq, phba->mbox_mem_pool); 2777 return; 2778 } 2779 2780 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2781 lpfc_initial_fdisc(vport); 2782 else { 2783 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 2784 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2785 "2606 No NPIV Fabric support\n"); 2786 } 2787 mempool_free(mboxq, phba->mbox_mem_pool); 2788 return; 2789 } 2790 2791 /** 2792 * lpfc_issue_init_vpi - Issue init_vpi mailbox command. 2793 * @vport: pointer to lpfc_vport data structure. 2794 * 2795 * This function issue a init_vpi mailbox command to initialize 2796 * VPI for the vport. 2797 */ 2798 void 2799 lpfc_issue_init_vpi(struct lpfc_vport *vport) 2800 { 2801 LPFC_MBOXQ_t *mboxq; 2802 int rc, vpi; 2803 2804 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { 2805 vpi = lpfc_alloc_vpi(vport->phba); 2806 if (!vpi) { 2807 lpfc_printf_vlog(vport, KERN_ERR, 2808 LOG_MBOX, 2809 "3303 Failed to obtain vport vpi\n"); 2810 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2811 return; 2812 } 2813 vport->vpi = vpi; 2814 } 2815 2816 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2817 if (!mboxq) { 2818 lpfc_printf_vlog(vport, KERN_ERR, 2819 LOG_MBOX, "2607 Failed to allocate " 2820 "init_vpi mailbox\n"); 2821 return; 2822 } 2823 lpfc_init_vpi(vport->phba, mboxq, vport->vpi); 2824 mboxq->vport = vport; 2825 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; 2826 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); 2827 if (rc == MBX_NOT_FINISHED) { 2828 lpfc_printf_vlog(vport, KERN_ERR, 2829 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); 2830 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2831 } 2832 } 2833 2834 /** 2835 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 2836 * @phba: pointer to lpfc hba data structure. 2837 * 2838 * This function loops through the list of vports on the @phba and issues an 2839 * FDISC if possible. 2840 */ 2841 void 2842 lpfc_start_fdiscs(struct lpfc_hba *phba) 2843 { 2844 struct lpfc_vport **vports; 2845 int i; 2846 2847 vports = lpfc_create_vport_work_array(phba); 2848 if (vports != NULL) { 2849 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2850 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 2851 continue; 2852 /* There are no vpi for this vport */ 2853 if (vports[i]->vpi > phba->max_vpi) { 2854 lpfc_vport_set_state(vports[i], 2855 FC_VPORT_FAILED); 2856 continue; 2857 } 2858 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2859 lpfc_vport_set_state(vports[i], 2860 FC_VPORT_LINKDOWN); 2861 continue; 2862 } 2863 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 2864 lpfc_issue_init_vpi(vports[i]); 2865 continue; 2866 } 2867 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2868 lpfc_initial_fdisc(vports[i]); 2869 else { 2870 lpfc_vport_set_state(vports[i], 2871 FC_VPORT_NO_FABRIC_SUPP); 2872 lpfc_printf_vlog(vports[i], KERN_ERR, 2873 LOG_ELS, 2874 "0259 No NPIV " 2875 "Fabric support\n"); 2876 } 2877 } 2878 } 2879 lpfc_destroy_vport_work_array(phba, vports); 2880 } 2881 2882 void 2883 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2884 { 2885 struct lpfc_dmabuf *dmabuf = mboxq->context1; 2886 struct lpfc_vport *vport = mboxq->vport; 2887 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2888 2889 /* 2890 * VFI not supported for interface type 0, so ignore any mailbox 2891 * error (except VFI in use) and continue with the discovery. 2892 */ 2893 if (mboxq->u.mb.mbxStatus && 2894 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2895 LPFC_SLI_INTF_IF_TYPE_0) && 2896 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2897 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2898 "2018 REG_VFI mbxStatus error x%x " 2899 "HBA state x%x\n", 2900 mboxq->u.mb.mbxStatus, vport->port_state); 2901 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2902 /* FLOGI failed, use loop map to make discovery list */ 2903 lpfc_disc_list_loopmap(vport); 2904 /* Start discovery */ 2905 lpfc_disc_start(vport); 2906 goto out_free_mem; 2907 } 2908 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2909 goto out_free_mem; 2910 } 2911 2912 /* If the VFI is already registered, there is nothing else to do 2913 * Unless this was a VFI update and we are in PT2PT mode, then 2914 * we should drop through to set the port state to ready. 2915 */ 2916 if (vport->fc_flag & FC_VFI_REGISTERED) 2917 if (!(phba->sli_rev == LPFC_SLI_REV4 && 2918 vport->fc_flag & FC_PT2PT)) 2919 goto out_free_mem; 2920 2921 /* The VPI is implicitly registered when the VFI is registered */ 2922 spin_lock_irq(shost->host_lock); 2923 vport->vpi_state |= LPFC_VPI_REGISTERED; 2924 vport->fc_flag |= FC_VFI_REGISTERED; 2925 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2926 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2927 spin_unlock_irq(shost->host_lock); 2928 2929 /* In case SLI4 FC loopback test, we are ready */ 2930 if ((phba->sli_rev == LPFC_SLI_REV4) && 2931 (phba->link_flag & LS_LOOPBACK_MODE)) { 2932 phba->link_state = LPFC_HBA_READY; 2933 goto out_free_mem; 2934 } 2935 2936 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2937 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " 2938 "alpacnt:%d LinkState:%x topology:%x\n", 2939 vport->port_state, vport->fc_flag, vport->fc_myDID, 2940 vport->phba->alpa_map[0], 2941 phba->link_state, phba->fc_topology); 2942 2943 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2944 /* 2945 * For private loop or for NPort pt2pt, 2946 * just start discovery and we are done. 2947 */ 2948 if ((vport->fc_flag & FC_PT2PT) || 2949 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 2950 !(vport->fc_flag & FC_PUBLIC_LOOP))) { 2951 2952 /* Use loop map to make discovery list */ 2953 lpfc_disc_list_loopmap(vport); 2954 /* Start discovery */ 2955 if (vport->fc_flag & FC_PT2PT) 2956 vport->port_state = LPFC_VPORT_READY; 2957 else 2958 lpfc_disc_start(vport); 2959 } else { 2960 lpfc_start_fdiscs(phba); 2961 lpfc_do_scr_ns_plogi(phba, vport); 2962 } 2963 } 2964 2965 out_free_mem: 2966 mempool_free(mboxq, phba->mbox_mem_pool); 2967 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2968 kfree(dmabuf); 2969 return; 2970 } 2971 2972 static void 2973 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2974 { 2975 MAILBOX_t *mb = &pmb->u.mb; 2976 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 2977 struct lpfc_vport *vport = pmb->vport; 2978 2979 2980 /* Check for error */ 2981 if (mb->mbxStatus) { 2982 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 2983 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2984 "0319 READ_SPARAM mbxStatus error x%x " 2985 "hba state x%x>\n", 2986 mb->mbxStatus, vport->port_state); 2987 lpfc_linkdown(phba); 2988 goto out; 2989 } 2990 2991 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 2992 sizeof (struct serv_parm)); 2993 lpfc_update_vport_wwn(vport); 2994 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2995 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 2996 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 2997 } 2998 2999 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3000 kfree(mp); 3001 mempool_free(pmb, phba->mbox_mem_pool); 3002 return; 3003 3004 out: 3005 pmb->context1 = NULL; 3006 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3007 kfree(mp); 3008 lpfc_issue_clear_la(phba, vport); 3009 mempool_free(pmb, phba->mbox_mem_pool); 3010 return; 3011 } 3012 3013 static void 3014 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) 3015 { 3016 struct lpfc_vport *vport = phba->pport; 3017 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 3018 struct Scsi_Host *shost; 3019 int i; 3020 struct lpfc_dmabuf *mp; 3021 int rc; 3022 struct fcf_record *fcf_record; 3023 uint32_t fc_flags = 0; 3024 3025 spin_lock_irq(&phba->hbalock); 3026 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 3027 case LPFC_LINK_SPEED_1GHZ: 3028 case LPFC_LINK_SPEED_2GHZ: 3029 case LPFC_LINK_SPEED_4GHZ: 3030 case LPFC_LINK_SPEED_8GHZ: 3031 case LPFC_LINK_SPEED_10GHZ: 3032 case LPFC_LINK_SPEED_16GHZ: 3033 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 3034 break; 3035 default: 3036 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; 3037 break; 3038 } 3039 3040 if (phba->fc_topology && 3041 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { 3042 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3043 "3314 Toplogy changed was 0x%x is 0x%x\n", 3044 phba->fc_topology, 3045 bf_get(lpfc_mbx_read_top_topology, la)); 3046 phba->fc_topology_changed = 1; 3047 } 3048 3049 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3050 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 3051 3052 shost = lpfc_shost_from_vport(vport); 3053 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3054 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 3055 3056 /* if npiv is enabled and this adapter supports npiv log 3057 * a message that npiv is not supported in this topology 3058 */ 3059 if (phba->cfg_enable_npiv && phba->max_vpi) 3060 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3061 "1309 Link Up Event npiv not supported in loop " 3062 "topology\n"); 3063 /* Get Loop Map information */ 3064 if (bf_get(lpfc_mbx_read_top_il, la)) 3065 fc_flags |= FC_LBIT; 3066 3067 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3068 i = la->lilpBde64.tus.f.bdeSize; 3069 3070 if (i == 0) { 3071 phba->alpa_map[0] = 0; 3072 } else { 3073 if (vport->cfg_log_verbose & LOG_LINK_EVENT) { 3074 int numalpa, j, k; 3075 union { 3076 uint8_t pamap[16]; 3077 struct { 3078 uint32_t wd1; 3079 uint32_t wd2; 3080 uint32_t wd3; 3081 uint32_t wd4; 3082 } pa; 3083 } un; 3084 numalpa = phba->alpa_map[0]; 3085 j = 0; 3086 while (j < numalpa) { 3087 memset(un.pamap, 0, 16); 3088 for (k = 1; j < numalpa; k++) { 3089 un.pamap[k - 1] = 3090 phba->alpa_map[j + 1]; 3091 j++; 3092 if (k == 16) 3093 break; 3094 } 3095 /* Link Up Event ALPA map */ 3096 lpfc_printf_log(phba, 3097 KERN_WARNING, 3098 LOG_LINK_EVENT, 3099 "1304 Link Up Event " 3100 "ALPA map Data: x%x " 3101 "x%x x%x x%x\n", 3102 un.pa.wd1, un.pa.wd2, 3103 un.pa.wd3, un.pa.wd4); 3104 } 3105 } 3106 } 3107 } else { 3108 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 3109 if (phba->max_vpi && phba->cfg_enable_npiv && 3110 (phba->sli_rev >= LPFC_SLI_REV3)) 3111 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3112 } 3113 vport->fc_myDID = phba->fc_pref_DID; 3114 fc_flags |= FC_LBIT; 3115 } 3116 spin_unlock_irq(&phba->hbalock); 3117 3118 if (fc_flags) { 3119 spin_lock_irq(shost->host_lock); 3120 vport->fc_flag |= fc_flags; 3121 spin_unlock_irq(shost->host_lock); 3122 } 3123 3124 lpfc_linkup(phba); 3125 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3126 if (!sparam_mbox) 3127 goto out; 3128 3129 rc = lpfc_read_sparam(phba, sparam_mbox, 0); 3130 if (rc) { 3131 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3132 goto out; 3133 } 3134 sparam_mbox->vport = vport; 3135 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 3136 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 3137 if (rc == MBX_NOT_FINISHED) { 3138 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 3139 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3140 kfree(mp); 3141 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3142 goto out; 3143 } 3144 3145 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3146 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3147 if (!cfglink_mbox) 3148 goto out; 3149 vport->port_state = LPFC_LOCAL_CFG_LINK; 3150 lpfc_config_link(phba, cfglink_mbox); 3151 cfglink_mbox->vport = vport; 3152 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 3153 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 3154 if (rc == MBX_NOT_FINISHED) { 3155 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 3156 goto out; 3157 } 3158 } else { 3159 vport->port_state = LPFC_VPORT_UNKNOWN; 3160 /* 3161 * Add the driver's default FCF record at FCF index 0 now. This 3162 * is phase 1 implementation that support FCF index 0 and driver 3163 * defaults. 3164 */ 3165 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 3166 fcf_record = kzalloc(sizeof(struct fcf_record), 3167 GFP_KERNEL); 3168 if (unlikely(!fcf_record)) { 3169 lpfc_printf_log(phba, KERN_ERR, 3170 LOG_MBOX | LOG_SLI, 3171 "2554 Could not allocate memory for " 3172 "fcf record\n"); 3173 rc = -ENODEV; 3174 goto out; 3175 } 3176 3177 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, 3178 LPFC_FCOE_FCF_DEF_INDEX); 3179 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 3180 if (unlikely(rc)) { 3181 lpfc_printf_log(phba, KERN_ERR, 3182 LOG_MBOX | LOG_SLI, 3183 "2013 Could not manually add FCF " 3184 "record 0, status %d\n", rc); 3185 rc = -ENODEV; 3186 kfree(fcf_record); 3187 goto out; 3188 } 3189 kfree(fcf_record); 3190 } 3191 /* 3192 * The driver is expected to do FIP/FCF. Call the port 3193 * and get the FCF Table. 3194 */ 3195 spin_lock_irq(&phba->hbalock); 3196 if (phba->hba_flag & FCF_TS_INPROG) { 3197 spin_unlock_irq(&phba->hbalock); 3198 return; 3199 } 3200 /* This is the initial FCF discovery scan */ 3201 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3202 spin_unlock_irq(&phba->hbalock); 3203 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3204 "2778 Start FCF table scan at linkup\n"); 3205 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3206 LPFC_FCOE_FCF_GET_FIRST); 3207 if (rc) { 3208 spin_lock_irq(&phba->hbalock); 3209 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 3210 spin_unlock_irq(&phba->hbalock); 3211 goto out; 3212 } 3213 /* Reset FCF roundrobin bmask for new discovery */ 3214 lpfc_sli4_clear_fcf_rr_bmask(phba); 3215 } 3216 3217 return; 3218 out: 3219 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3220 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3221 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", 3222 vport->port_state, sparam_mbox, cfglink_mbox); 3223 lpfc_issue_clear_la(phba, vport); 3224 return; 3225 } 3226 3227 static void 3228 lpfc_enable_la(struct lpfc_hba *phba) 3229 { 3230 uint32_t control; 3231 struct lpfc_sli *psli = &phba->sli; 3232 spin_lock_irq(&phba->hbalock); 3233 psli->sli_flag |= LPFC_PROCESS_LA; 3234 if (phba->sli_rev <= LPFC_SLI_REV3) { 3235 control = readl(phba->HCregaddr); 3236 control |= HC_LAINT_ENA; 3237 writel(control, phba->HCregaddr); 3238 readl(phba->HCregaddr); /* flush */ 3239 } 3240 spin_unlock_irq(&phba->hbalock); 3241 } 3242 3243 static void 3244 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 3245 { 3246 lpfc_linkdown(phba); 3247 lpfc_enable_la(phba); 3248 lpfc_unregister_unused_fcf(phba); 3249 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 3250 } 3251 3252 3253 /* 3254 * This routine handles processing a READ_TOPOLOGY mailbox 3255 * command upon completion. It is setup in the LPFC_MBOXQ 3256 * as the completion routine when the command is 3257 * handed off to the SLI layer. 3258 */ 3259 void 3260 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3261 { 3262 struct lpfc_vport *vport = pmb->vport; 3263 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3264 struct lpfc_mbx_read_top *la; 3265 MAILBOX_t *mb = &pmb->u.mb; 3266 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3267 3268 /* Unblock ELS traffic */ 3269 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 3270 /* Check for error */ 3271 if (mb->mbxStatus) { 3272 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3273 "1307 READ_LA mbox error x%x state x%x\n", 3274 mb->mbxStatus, vport->port_state); 3275 lpfc_mbx_issue_link_down(phba); 3276 phba->link_state = LPFC_HBA_ERROR; 3277 goto lpfc_mbx_cmpl_read_topology_free_mbuf; 3278 } 3279 3280 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3281 3282 memcpy(&phba->alpa_map[0], mp->virt, 128); 3283 3284 spin_lock_irq(shost->host_lock); 3285 if (bf_get(lpfc_mbx_read_top_pb, la)) 3286 vport->fc_flag |= FC_BYPASSED_MODE; 3287 else 3288 vport->fc_flag &= ~FC_BYPASSED_MODE; 3289 spin_unlock_irq(shost->host_lock); 3290 3291 if (phba->fc_eventTag <= la->eventTag) { 3292 phba->fc_stat.LinkMultiEvent++; 3293 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) 3294 if (phba->fc_eventTag != 0) 3295 lpfc_linkdown(phba); 3296 } 3297 3298 phba->fc_eventTag = la->eventTag; 3299 if (phba->sli_rev < LPFC_SLI_REV4) { 3300 spin_lock_irq(&phba->hbalock); 3301 if (bf_get(lpfc_mbx_read_top_mm, la)) 3302 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 3303 else 3304 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 3305 spin_unlock_irq(&phba->hbalock); 3306 } 3307 3308 phba->link_events++; 3309 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && 3310 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { 3311 phba->fc_stat.LinkUp++; 3312 if (phba->link_flag & LS_LOOPBACK_MODE) { 3313 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3314 "1306 Link Up Event in loop back mode " 3315 "x%x received Data: x%x x%x x%x x%x\n", 3316 la->eventTag, phba->fc_eventTag, 3317 bf_get(lpfc_mbx_read_top_alpa_granted, 3318 la), 3319 bf_get(lpfc_mbx_read_top_link_spd, la), 3320 phba->alpa_map[0]); 3321 } else { 3322 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3323 "1303 Link Up Event x%x received " 3324 "Data: x%x x%x x%x x%x x%x x%x %d\n", 3325 la->eventTag, phba->fc_eventTag, 3326 bf_get(lpfc_mbx_read_top_alpa_granted, 3327 la), 3328 bf_get(lpfc_mbx_read_top_link_spd, la), 3329 phba->alpa_map[0], 3330 bf_get(lpfc_mbx_read_top_mm, la), 3331 bf_get(lpfc_mbx_read_top_fa, la), 3332 phba->wait_4_mlo_maint_flg); 3333 } 3334 lpfc_mbx_process_link_up(phba, la); 3335 } else if (bf_get(lpfc_mbx_read_top_att_type, la) == 3336 LPFC_ATT_LINK_DOWN) { 3337 phba->fc_stat.LinkDown++; 3338 if (phba->link_flag & LS_LOOPBACK_MODE) 3339 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3340 "1308 Link Down Event in loop back mode " 3341 "x%x received " 3342 "Data: x%x x%x x%x\n", 3343 la->eventTag, phba->fc_eventTag, 3344 phba->pport->port_state, vport->fc_flag); 3345 else 3346 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3347 "1305 Link Down Event x%x received " 3348 "Data: x%x x%x x%x x%x x%x\n", 3349 la->eventTag, phba->fc_eventTag, 3350 phba->pport->port_state, vport->fc_flag, 3351 bf_get(lpfc_mbx_read_top_mm, la), 3352 bf_get(lpfc_mbx_read_top_fa, la)); 3353 lpfc_mbx_issue_link_down(phba); 3354 } 3355 if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) && 3356 ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) { 3357 if (phba->link_state != LPFC_LINK_DOWN) { 3358 phba->fc_stat.LinkDown++; 3359 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3360 "1312 Link Down Event x%x received " 3361 "Data: x%x x%x x%x\n", 3362 la->eventTag, phba->fc_eventTag, 3363 phba->pport->port_state, vport->fc_flag); 3364 lpfc_mbx_issue_link_down(phba); 3365 } else 3366 lpfc_enable_la(phba); 3367 3368 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3369 "1310 Menlo Maint Mode Link up Event x%x rcvd " 3370 "Data: x%x x%x x%x\n", 3371 la->eventTag, phba->fc_eventTag, 3372 phba->pport->port_state, vport->fc_flag); 3373 /* 3374 * The cmnd that triggered this will be waiting for this 3375 * signal. 3376 */ 3377 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ 3378 if (phba->wait_4_mlo_maint_flg) { 3379 phba->wait_4_mlo_maint_flg = 0; 3380 wake_up_interruptible(&phba->wait_4_mlo_m_q); 3381 } 3382 } 3383 3384 if ((phba->sli_rev < LPFC_SLI_REV4) && 3385 bf_get(lpfc_mbx_read_top_fa, la)) { 3386 if (phba->sli.sli_flag & LPFC_MENLO_MAINT) 3387 lpfc_issue_clear_la(phba, vport); 3388 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3389 "1311 fa %d\n", 3390 bf_get(lpfc_mbx_read_top_fa, la)); 3391 } 3392 3393 lpfc_mbx_cmpl_read_topology_free_mbuf: 3394 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3395 kfree(mp); 3396 mempool_free(pmb, phba->mbox_mem_pool); 3397 return; 3398 } 3399 3400 /* 3401 * This routine handles processing a REG_LOGIN mailbox 3402 * command upon completion. It is setup in the LPFC_MBOXQ 3403 * as the completion routine when the command is 3404 * handed off to the SLI layer. 3405 */ 3406 void 3407 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3408 { 3409 struct lpfc_vport *vport = pmb->vport; 3410 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3411 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3412 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3413 3414 pmb->context1 = NULL; 3415 pmb->context2 = NULL; 3416 3417 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3418 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3419 3420 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || 3421 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 3422 /* We rcvd a rscn after issuing this 3423 * mbox reg login, we may have cycled 3424 * back through the state and be 3425 * back at reg login state so this 3426 * mbox needs to be ignored becase 3427 * there is another reg login in 3428 * process. 3429 */ 3430 spin_lock_irq(shost->host_lock); 3431 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3432 spin_unlock_irq(shost->host_lock); 3433 } else 3434 /* Good status, call state machine */ 3435 lpfc_disc_state_machine(vport, ndlp, pmb, 3436 NLP_EVT_CMPL_REG_LOGIN); 3437 3438 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3439 kfree(mp); 3440 mempool_free(pmb, phba->mbox_mem_pool); 3441 /* decrement the node reference count held for this callback 3442 * function. 3443 */ 3444 lpfc_nlp_put(ndlp); 3445 3446 return; 3447 } 3448 3449 static void 3450 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3451 { 3452 MAILBOX_t *mb = &pmb->u.mb; 3453 struct lpfc_vport *vport = pmb->vport; 3454 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3455 3456 switch (mb->mbxStatus) { 3457 case 0x0011: 3458 case 0x0020: 3459 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3460 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 3461 mb->mbxStatus); 3462 break; 3463 /* If VPI is busy, reset the HBA */ 3464 case 0x9700: 3465 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3466 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", 3467 vport->vpi, mb->mbxStatus); 3468 if (!(phba->pport->load_flag & FC_UNLOADING)) 3469 lpfc_workq_post_event(phba, NULL, NULL, 3470 LPFC_EVT_RESET_HBA); 3471 } 3472 spin_lock_irq(shost->host_lock); 3473 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 3474 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3475 spin_unlock_irq(shost->host_lock); 3476 vport->unreg_vpi_cmpl = VPORT_OK; 3477 mempool_free(pmb, phba->mbox_mem_pool); 3478 lpfc_cleanup_vports_rrqs(vport, NULL); 3479 /* 3480 * This shost reference might have been taken at the beginning of 3481 * lpfc_vport_delete() 3482 */ 3483 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) 3484 scsi_host_put(shost); 3485 } 3486 3487 int 3488 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 3489 { 3490 struct lpfc_hba *phba = vport->phba; 3491 LPFC_MBOXQ_t *mbox; 3492 int rc; 3493 3494 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3495 if (!mbox) 3496 return 1; 3497 3498 lpfc_unreg_vpi(phba, vport->vpi, mbox); 3499 mbox->vport = vport; 3500 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 3501 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3502 if (rc == MBX_NOT_FINISHED) { 3503 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 3504 "1800 Could not issue unreg_vpi\n"); 3505 mempool_free(mbox, phba->mbox_mem_pool); 3506 vport->unreg_vpi_cmpl = VPORT_ERROR; 3507 return rc; 3508 } 3509 return 0; 3510 } 3511 3512 static void 3513 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3514 { 3515 struct lpfc_vport *vport = pmb->vport; 3516 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3517 MAILBOX_t *mb = &pmb->u.mb; 3518 3519 switch (mb->mbxStatus) { 3520 case 0x0011: 3521 case 0x9601: 3522 case 0x9602: 3523 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3524 "0912 cmpl_reg_vpi, mb status = 0x%x\n", 3525 mb->mbxStatus); 3526 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3527 spin_lock_irq(shost->host_lock); 3528 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3529 spin_unlock_irq(shost->host_lock); 3530 vport->fc_myDID = 0; 3531 goto out; 3532 } 3533 3534 spin_lock_irq(shost->host_lock); 3535 vport->vpi_state |= LPFC_VPI_REGISTERED; 3536 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 3537 spin_unlock_irq(shost->host_lock); 3538 vport->num_disc_nodes = 0; 3539 /* go thru NPR list and issue ELS PLOGIs */ 3540 if (vport->fc_npr_cnt) 3541 lpfc_els_disc_plogi(vport); 3542 3543 if (!vport->num_disc_nodes) { 3544 spin_lock_irq(shost->host_lock); 3545 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3546 spin_unlock_irq(shost->host_lock); 3547 lpfc_can_disctmo(vport); 3548 } 3549 vport->port_state = LPFC_VPORT_READY; 3550 3551 out: 3552 mempool_free(pmb, phba->mbox_mem_pool); 3553 return; 3554 } 3555 3556 /** 3557 * lpfc_create_static_vport - Read HBA config region to create static vports. 3558 * @phba: pointer to lpfc hba data structure. 3559 * 3560 * This routine issue a DUMP mailbox command for config region 22 to get 3561 * the list of static vports to be created. The function create vports 3562 * based on the information returned from the HBA. 3563 **/ 3564 void 3565 lpfc_create_static_vport(struct lpfc_hba *phba) 3566 { 3567 LPFC_MBOXQ_t *pmb = NULL; 3568 MAILBOX_t *mb; 3569 struct static_vport_info *vport_info; 3570 int mbx_wait_rc = 0, i; 3571 struct fc_vport_identifiers vport_id; 3572 struct fc_vport *new_fc_vport; 3573 struct Scsi_Host *shost; 3574 struct lpfc_vport *vport; 3575 uint16_t offset = 0; 3576 uint8_t *vport_buff; 3577 struct lpfc_dmabuf *mp; 3578 uint32_t byte_count = 0; 3579 3580 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3581 if (!pmb) { 3582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3583 "0542 lpfc_create_static_vport failed to" 3584 " allocate mailbox memory\n"); 3585 return; 3586 } 3587 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 3588 mb = &pmb->u.mb; 3589 3590 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 3591 if (!vport_info) { 3592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3593 "0543 lpfc_create_static_vport failed to" 3594 " allocate vport_info\n"); 3595 mempool_free(pmb, phba->mbox_mem_pool); 3596 return; 3597 } 3598 3599 vport_buff = (uint8_t *) vport_info; 3600 do { 3601 /* free dma buffer from previous round */ 3602 if (pmb->context1) { 3603 mp = (struct lpfc_dmabuf *)pmb->context1; 3604 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3605 kfree(mp); 3606 } 3607 if (lpfc_dump_static_vport(phba, pmb, offset)) 3608 goto out; 3609 3610 pmb->vport = phba->pport; 3611 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, 3612 LPFC_MBOX_TMO); 3613 3614 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { 3615 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3616 "0544 lpfc_create_static_vport failed to" 3617 " issue dump mailbox command ret 0x%x " 3618 "status 0x%x\n", 3619 mbx_wait_rc, mb->mbxStatus); 3620 goto out; 3621 } 3622 3623 if (phba->sli_rev == LPFC_SLI_REV4) { 3624 byte_count = pmb->u.mqe.un.mb_words[5]; 3625 mp = (struct lpfc_dmabuf *)pmb->context1; 3626 if (byte_count > sizeof(struct static_vport_info) - 3627 offset) 3628 byte_count = sizeof(struct static_vport_info) 3629 - offset; 3630 memcpy(vport_buff + offset, mp->virt, byte_count); 3631 offset += byte_count; 3632 } else { 3633 if (mb->un.varDmp.word_cnt > 3634 sizeof(struct static_vport_info) - offset) 3635 mb->un.varDmp.word_cnt = 3636 sizeof(struct static_vport_info) 3637 - offset; 3638 byte_count = mb->un.varDmp.word_cnt; 3639 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 3640 vport_buff + offset, 3641 byte_count); 3642 3643 offset += byte_count; 3644 } 3645 3646 } while (byte_count && 3647 offset < sizeof(struct static_vport_info)); 3648 3649 3650 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 3651 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 3652 != VPORT_INFO_REV)) { 3653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3654 "0545 lpfc_create_static_vport bad" 3655 " information header 0x%x 0x%x\n", 3656 le32_to_cpu(vport_info->signature), 3657 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); 3658 3659 goto out; 3660 } 3661 3662 shost = lpfc_shost_from_vport(phba->pport); 3663 3664 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { 3665 memset(&vport_id, 0, sizeof(vport_id)); 3666 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); 3667 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); 3668 if (!vport_id.port_name || !vport_id.node_name) 3669 continue; 3670 3671 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 3672 vport_id.vport_type = FC_PORTTYPE_NPIV; 3673 vport_id.disable = false; 3674 new_fc_vport = fc_vport_create(shost, 0, &vport_id); 3675 3676 if (!new_fc_vport) { 3677 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3678 "0546 lpfc_create_static_vport failed to" 3679 " create vport\n"); 3680 continue; 3681 } 3682 3683 vport = *(struct lpfc_vport **)new_fc_vport->dd_data; 3684 vport->vport_flag |= STATIC_VPORT; 3685 } 3686 3687 out: 3688 kfree(vport_info); 3689 if (mbx_wait_rc != MBX_TIMEOUT) { 3690 if (pmb->context1) { 3691 mp = (struct lpfc_dmabuf *)pmb->context1; 3692 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3693 kfree(mp); 3694 } 3695 mempool_free(pmb, phba->mbox_mem_pool); 3696 } 3697 3698 return; 3699 } 3700 3701 /* 3702 * This routine handles processing a Fabric REG_LOGIN mailbox 3703 * command upon completion. It is setup in the LPFC_MBOXQ 3704 * as the completion routine when the command is 3705 * handed off to the SLI layer. 3706 */ 3707 void 3708 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3709 { 3710 struct lpfc_vport *vport = pmb->vport; 3711 MAILBOX_t *mb = &pmb->u.mb; 3712 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3713 struct lpfc_nodelist *ndlp; 3714 struct Scsi_Host *shost; 3715 3716 ndlp = (struct lpfc_nodelist *) pmb->context2; 3717 pmb->context1 = NULL; 3718 pmb->context2 = NULL; 3719 3720 if (mb->mbxStatus) { 3721 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3722 "0258 Register Fabric login error: 0x%x\n", 3723 mb->mbxStatus); 3724 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3725 kfree(mp); 3726 mempool_free(pmb, phba->mbox_mem_pool); 3727 3728 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3729 /* FLOGI failed, use loop map to make discovery list */ 3730 lpfc_disc_list_loopmap(vport); 3731 3732 /* Start discovery */ 3733 lpfc_disc_start(vport); 3734 /* Decrement the reference count to ndlp after the 3735 * reference to the ndlp are done. 3736 */ 3737 lpfc_nlp_put(ndlp); 3738 return; 3739 } 3740 3741 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3742 /* Decrement the reference count to ndlp after the reference 3743 * to the ndlp are done. 3744 */ 3745 lpfc_nlp_put(ndlp); 3746 return; 3747 } 3748 3749 if (phba->sli_rev < LPFC_SLI_REV4) 3750 ndlp->nlp_rpi = mb->un.varWords[0]; 3751 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3752 ndlp->nlp_type |= NLP_FABRIC; 3753 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3754 3755 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 3756 /* when physical port receive logo donot start 3757 * vport discovery */ 3758 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 3759 lpfc_start_fdiscs(phba); 3760 else { 3761 shost = lpfc_shost_from_vport(vport); 3762 spin_lock_irq(shost->host_lock); 3763 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; 3764 spin_unlock_irq(shost->host_lock); 3765 } 3766 lpfc_do_scr_ns_plogi(phba, vport); 3767 } 3768 3769 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3770 kfree(mp); 3771 mempool_free(pmb, phba->mbox_mem_pool); 3772 3773 /* Drop the reference count from the mbox at the end after 3774 * all the current reference to the ndlp have been done. 3775 */ 3776 lpfc_nlp_put(ndlp); 3777 return; 3778 } 3779 3780 /* 3781 * This routine handles processing a NameServer REG_LOGIN mailbox 3782 * command upon completion. It is setup in the LPFC_MBOXQ 3783 * as the completion routine when the command is 3784 * handed off to the SLI layer. 3785 */ 3786 void 3787 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3788 { 3789 MAILBOX_t *mb = &pmb->u.mb; 3790 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3791 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3792 struct lpfc_vport *vport = pmb->vport; 3793 3794 pmb->context1 = NULL; 3795 pmb->context2 = NULL; 3796 3797 if (mb->mbxStatus) { 3798 out: 3799 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3800 "0260 Register NameServer error: 0x%x\n", 3801 mb->mbxStatus); 3802 /* decrement the node reference count held for this 3803 * callback function. 3804 */ 3805 lpfc_nlp_put(ndlp); 3806 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3807 kfree(mp); 3808 mempool_free(pmb, phba->mbox_mem_pool); 3809 3810 /* If no other thread is using the ndlp, free it */ 3811 lpfc_nlp_not_used(ndlp); 3812 3813 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3814 /* 3815 * RegLogin failed, use loop map to make discovery 3816 * list 3817 */ 3818 lpfc_disc_list_loopmap(vport); 3819 3820 /* Start discovery */ 3821 lpfc_disc_start(vport); 3822 return; 3823 } 3824 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3825 return; 3826 } 3827 3828 if (phba->sli_rev < LPFC_SLI_REV4) 3829 ndlp->nlp_rpi = mb->un.varWords[0]; 3830 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3831 ndlp->nlp_type |= NLP_FABRIC; 3832 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3833 3834 if (vport->port_state < LPFC_VPORT_READY) { 3835 /* Link up discovery requires Fabric registration. */ 3836 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ 3837 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 3838 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 3839 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 3840 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 3841 3842 /* Issue SCR just before NameServer GID_FT Query */ 3843 lpfc_issue_els_scr(vport, SCR_DID, 0); 3844 } 3845 3846 vport->fc_ns_retry = 0; 3847 /* Good status, issue CT Request to NameServer */ 3848 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { 3849 /* Cannot issue NameServer Query, so finish up discovery */ 3850 goto out; 3851 } 3852 3853 /* decrement the node reference count held for this 3854 * callback function. 3855 */ 3856 lpfc_nlp_put(ndlp); 3857 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3858 kfree(mp); 3859 mempool_free(pmb, phba->mbox_mem_pool); 3860 3861 return; 3862 } 3863 3864 static void 3865 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 3866 { 3867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3868 struct fc_rport *rport; 3869 struct lpfc_rport_data *rdata; 3870 struct fc_rport_identifiers rport_ids; 3871 struct lpfc_hba *phba = vport->phba; 3872 3873 /* Remote port has reappeared. Re-register w/ FC transport */ 3874 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 3875 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 3876 rport_ids.port_id = ndlp->nlp_DID; 3877 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 3878 3879 /* 3880 * We leave our node pointer in rport->dd_data when we unregister a 3881 * FCP target port. But fc_remote_port_add zeros the space to which 3882 * rport->dd_data points. So, if we're reusing a previously 3883 * registered port, drop the reference that we took the last time we 3884 * registered the port. 3885 */ 3886 if (ndlp->rport && ndlp->rport->dd_data && 3887 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) 3888 lpfc_nlp_put(ndlp); 3889 3890 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 3891 "rport add: did:x%x flg:x%x type x%x", 3892 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3893 3894 /* Don't add the remote port if unloading. */ 3895 if (vport->load_flag & FC_UNLOADING) 3896 return; 3897 3898 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 3899 if (!rport || !get_device(&rport->dev)) { 3900 dev_printk(KERN_WARNING, &phba->pcidev->dev, 3901 "Warning: fc_remote_port_add failed\n"); 3902 return; 3903 } 3904 3905 /* initialize static port data */ 3906 rport->maxframe_size = ndlp->nlp_maxframe; 3907 rport->supported_classes = ndlp->nlp_class_sup; 3908 rdata = rport->dd_data; 3909 rdata->pnode = lpfc_nlp_get(ndlp); 3910 3911 if (ndlp->nlp_type & NLP_FCP_TARGET) 3912 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 3913 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3914 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3915 3916 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3917 fc_remote_port_rolechg(rport, rport_ids.roles); 3918 3919 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3920 "3183 rport register x%06x, rport %p role x%x\n", 3921 ndlp->nlp_DID, rport, rport_ids.roles); 3922 3923 if ((rport->scsi_target_id != -1) && 3924 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 3925 ndlp->nlp_sid = rport->scsi_target_id; 3926 } 3927 return; 3928 } 3929 3930 static void 3931 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 3932 { 3933 struct fc_rport *rport = ndlp->rport; 3934 3935 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 3936 "rport delete: did:x%x flg:x%x type x%x", 3937 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3938 3939 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3940 "3184 rport unregister x%06x, rport %p\n", 3941 ndlp->nlp_DID, rport); 3942 3943 fc_remote_port_delete(rport); 3944 3945 return; 3946 } 3947 3948 static void 3949 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 3950 { 3951 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3952 3953 spin_lock_irq(shost->host_lock); 3954 switch (state) { 3955 case NLP_STE_UNUSED_NODE: 3956 vport->fc_unused_cnt += count; 3957 break; 3958 case NLP_STE_PLOGI_ISSUE: 3959 vport->fc_plogi_cnt += count; 3960 break; 3961 case NLP_STE_ADISC_ISSUE: 3962 vport->fc_adisc_cnt += count; 3963 break; 3964 case NLP_STE_REG_LOGIN_ISSUE: 3965 vport->fc_reglogin_cnt += count; 3966 break; 3967 case NLP_STE_PRLI_ISSUE: 3968 vport->fc_prli_cnt += count; 3969 break; 3970 case NLP_STE_UNMAPPED_NODE: 3971 vport->fc_unmap_cnt += count; 3972 break; 3973 case NLP_STE_MAPPED_NODE: 3974 vport->fc_map_cnt += count; 3975 break; 3976 case NLP_STE_NPR_NODE: 3977 if (vport->fc_npr_cnt == 0 && count == -1) 3978 vport->fc_npr_cnt = 0; 3979 else 3980 vport->fc_npr_cnt += count; 3981 break; 3982 } 3983 spin_unlock_irq(shost->host_lock); 3984 } 3985 3986 static void 3987 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3988 int old_state, int new_state) 3989 { 3990 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3991 3992 if (new_state == NLP_STE_UNMAPPED_NODE) { 3993 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 3994 ndlp->nlp_type |= NLP_FC_NODE; 3995 } 3996 if (new_state == NLP_STE_MAPPED_NODE) 3997 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 3998 if (new_state == NLP_STE_NPR_NODE) 3999 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 4000 4001 /* Transport interface */ 4002 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || 4003 old_state == NLP_STE_UNMAPPED_NODE)) { 4004 vport->phba->nport_event_cnt++; 4005 lpfc_unregister_remote_port(ndlp); 4006 } 4007 4008 if (new_state == NLP_STE_MAPPED_NODE || 4009 new_state == NLP_STE_UNMAPPED_NODE) { 4010 vport->phba->nport_event_cnt++; 4011 /* 4012 * Tell the fc transport about the port, if we haven't 4013 * already. If we have, and it's a scsi entity, be 4014 * sure to unblock any attached scsi devices 4015 */ 4016 lpfc_register_remote_port(vport, ndlp); 4017 } 4018 if ((new_state == NLP_STE_MAPPED_NODE) && 4019 (vport->stat_data_enabled)) { 4020 /* 4021 * A new target is discovered, if there is no buffer for 4022 * statistical data collection allocate buffer. 4023 */ 4024 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 4025 sizeof(struct lpfc_scsicmd_bkt), 4026 GFP_KERNEL); 4027 4028 if (!ndlp->lat_data) 4029 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 4030 "0286 lpfc_nlp_state_cleanup failed to " 4031 "allocate statistical data buffer DID " 4032 "0x%x\n", ndlp->nlp_DID); 4033 } 4034 /* 4035 * if we added to Mapped list, but the remote port 4036 * registration failed or assigned a target id outside 4037 * our presentable range - move the node to the 4038 * Unmapped List 4039 */ 4040 if (new_state == NLP_STE_MAPPED_NODE && 4041 (!ndlp->rport || 4042 ndlp->rport->scsi_target_id == -1 || 4043 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 4044 spin_lock_irq(shost->host_lock); 4045 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 4046 spin_unlock_irq(shost->host_lock); 4047 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4048 } 4049 } 4050 4051 static char * 4052 lpfc_nlp_state_name(char *buffer, size_t size, int state) 4053 { 4054 static char *states[] = { 4055 [NLP_STE_UNUSED_NODE] = "UNUSED", 4056 [NLP_STE_PLOGI_ISSUE] = "PLOGI", 4057 [NLP_STE_ADISC_ISSUE] = "ADISC", 4058 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 4059 [NLP_STE_PRLI_ISSUE] = "PRLI", 4060 [NLP_STE_LOGO_ISSUE] = "LOGO", 4061 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4062 [NLP_STE_MAPPED_NODE] = "MAPPED", 4063 [NLP_STE_NPR_NODE] = "NPR", 4064 }; 4065 4066 if (state < NLP_STE_MAX_STATE && states[state]) 4067 strlcpy(buffer, states[state], size); 4068 else 4069 snprintf(buffer, size, "unknown (%d)", state); 4070 return buffer; 4071 } 4072 4073 void 4074 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4075 int state) 4076 { 4077 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4078 int old_state = ndlp->nlp_state; 4079 char name1[16], name2[16]; 4080 4081 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4082 "0904 NPort state transition x%06x, %s -> %s\n", 4083 ndlp->nlp_DID, 4084 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 4085 lpfc_nlp_state_name(name2, sizeof(name2), state)); 4086 4087 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4088 "node statechg did:x%x old:%d ste:%d", 4089 ndlp->nlp_DID, old_state, state); 4090 4091 if (old_state == NLP_STE_NPR_NODE && 4092 state != NLP_STE_NPR_NODE) 4093 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4094 if (old_state == NLP_STE_UNMAPPED_NODE) { 4095 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 4096 ndlp->nlp_type &= ~NLP_FC_NODE; 4097 } 4098 4099 if (list_empty(&ndlp->nlp_listp)) { 4100 spin_lock_irq(shost->host_lock); 4101 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4102 spin_unlock_irq(shost->host_lock); 4103 } else if (old_state) 4104 lpfc_nlp_counters(vport, old_state, -1); 4105 4106 ndlp->nlp_state = state; 4107 lpfc_nlp_counters(vport, state, 1); 4108 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); 4109 } 4110 4111 void 4112 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4113 { 4114 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4115 4116 if (list_empty(&ndlp->nlp_listp)) { 4117 spin_lock_irq(shost->host_lock); 4118 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4119 spin_unlock_irq(shost->host_lock); 4120 } 4121 } 4122 4123 void 4124 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4125 { 4126 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4127 4128 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4129 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4130 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4131 spin_lock_irq(shost->host_lock); 4132 list_del_init(&ndlp->nlp_listp); 4133 spin_unlock_irq(shost->host_lock); 4134 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4135 NLP_STE_UNUSED_NODE); 4136 } 4137 4138 static void 4139 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4140 { 4141 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4142 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4143 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4144 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4145 NLP_STE_UNUSED_NODE); 4146 } 4147 /** 4148 * lpfc_initialize_node - Initialize all fields of node object 4149 * @vport: Pointer to Virtual Port object. 4150 * @ndlp: Pointer to FC node object. 4151 * @did: FC_ID of the node. 4152 * 4153 * This function is always called when node object need to be initialized. 4154 * It initializes all the fields of the node object. Although the reference 4155 * to phba from @ndlp can be obtained indirectly through it's reference to 4156 * @vport, a direct reference to phba is taken here by @ndlp. This is due 4157 * to the life-span of the @ndlp might go beyond the existence of @vport as 4158 * the final release of ndlp is determined by its reference count. And, the 4159 * operation on @ndlp needs the reference to phba. 4160 **/ 4161 static inline void 4162 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4163 uint32_t did) 4164 { 4165 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4166 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4167 init_timer(&ndlp->nlp_delayfunc); 4168 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 4169 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 4170 ndlp->nlp_DID = did; 4171 ndlp->vport = vport; 4172 ndlp->phba = vport->phba; 4173 ndlp->nlp_sid = NLP_NO_SID; 4174 kref_init(&ndlp->kref); 4175 NLP_INT_NODE_ACT(ndlp); 4176 atomic_set(&ndlp->cmd_pending, 0); 4177 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4178 } 4179 4180 struct lpfc_nodelist * 4181 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4182 int state) 4183 { 4184 struct lpfc_hba *phba = vport->phba; 4185 uint32_t did; 4186 unsigned long flags; 4187 unsigned long *active_rrqs_xri_bitmap = NULL; 4188 4189 if (!ndlp) 4190 return NULL; 4191 4192 spin_lock_irqsave(&phba->ndlp_lock, flags); 4193 /* The ndlp should not be in memory free mode */ 4194 if (NLP_CHK_FREE_REQ(ndlp)) { 4195 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4196 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4197 "0277 lpfc_enable_node: ndlp:x%p " 4198 "usgmap:x%x refcnt:%d\n", 4199 (void *)ndlp, ndlp->nlp_usg_map, 4200 atomic_read(&ndlp->kref.refcount)); 4201 return NULL; 4202 } 4203 /* The ndlp should not already be in active mode */ 4204 if (NLP_CHK_NODE_ACT(ndlp)) { 4205 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4206 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4207 "0278 lpfc_enable_node: ndlp:x%p " 4208 "usgmap:x%x refcnt:%d\n", 4209 (void *)ndlp, ndlp->nlp_usg_map, 4210 atomic_read(&ndlp->kref.refcount)); 4211 return NULL; 4212 } 4213 4214 /* Keep the original DID */ 4215 did = ndlp->nlp_DID; 4216 if (phba->sli_rev == LPFC_SLI_REV4) 4217 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap; 4218 4219 /* re-initialize ndlp except of ndlp linked list pointer */ 4220 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 4221 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 4222 lpfc_initialize_node(vport, ndlp, did); 4223 4224 if (phba->sli_rev == LPFC_SLI_REV4) 4225 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; 4226 4227 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4228 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4229 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 4230 4231 4232 if (state != NLP_STE_UNUSED_NODE) 4233 lpfc_nlp_set_state(vport, ndlp, state); 4234 4235 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4236 "node enable: did:x%x", 4237 ndlp->nlp_DID, 0, 0); 4238 return ndlp; 4239 } 4240 4241 void 4242 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4243 { 4244 /* 4245 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should 4246 * be used if we wish to issue the "last" lpfc_nlp_put() to remove 4247 * the ndlp from the vport. The ndlp marked as UNUSED on the list 4248 * until ALL other outstanding threads have completed. We check 4249 * that the ndlp not already in the UNUSED state before we proceed. 4250 */ 4251 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 4252 return; 4253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 4254 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4255 lpfc_cleanup_vports_rrqs(vport, ndlp); 4256 lpfc_nlp_put(ndlp); 4257 return; 4258 } 4259 4260 /* 4261 * Start / ReStart rescue timer for Discovery / RSCN handling 4262 */ 4263 void 4264 lpfc_set_disctmo(struct lpfc_vport *vport) 4265 { 4266 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4267 struct lpfc_hba *phba = vport->phba; 4268 uint32_t tmo; 4269 4270 if (vport->port_state == LPFC_LOCAL_CFG_LINK) { 4271 /* For FAN, timeout should be greater than edtov */ 4272 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 4273 } else { 4274 /* Normal discovery timeout should be > than ELS/CT timeout 4275 * FC spec states we need 3 * ratov for CT requests 4276 */ 4277 tmo = ((phba->fc_ratov * 3) + 3); 4278 } 4279 4280 4281 if (!timer_pending(&vport->fc_disctmo)) { 4282 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4283 "set disc timer: tmo:x%x state:x%x flg:x%x", 4284 tmo, vport->port_state, vport->fc_flag); 4285 } 4286 4287 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); 4288 spin_lock_irq(shost->host_lock); 4289 vport->fc_flag |= FC_DISC_TMO; 4290 spin_unlock_irq(shost->host_lock); 4291 4292 /* Start Discovery Timer state <hba_state> */ 4293 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4294 "0247 Start Discovery Timer state x%x " 4295 "Data: x%x x%lx x%x x%x\n", 4296 vport->port_state, tmo, 4297 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 4298 vport->fc_adisc_cnt); 4299 4300 return; 4301 } 4302 4303 /* 4304 * Cancel rescue timer for Discovery / RSCN handling 4305 */ 4306 int 4307 lpfc_can_disctmo(struct lpfc_vport *vport) 4308 { 4309 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4310 unsigned long iflags; 4311 4312 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4313 "can disc timer: state:x%x rtry:x%x flg:x%x", 4314 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 4315 4316 /* Turn off discovery timer if its running */ 4317 if (vport->fc_flag & FC_DISC_TMO) { 4318 spin_lock_irqsave(shost->host_lock, iflags); 4319 vport->fc_flag &= ~FC_DISC_TMO; 4320 spin_unlock_irqrestore(shost->host_lock, iflags); 4321 del_timer_sync(&vport->fc_disctmo); 4322 spin_lock_irqsave(&vport->work_port_lock, iflags); 4323 vport->work_port_events &= ~WORKER_DISC_TMO; 4324 spin_unlock_irqrestore(&vport->work_port_lock, iflags); 4325 } 4326 4327 /* Cancel Discovery Timer state <hba_state> */ 4328 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4329 "0248 Cancel Discovery Timer state x%x " 4330 "Data: x%x x%x x%x\n", 4331 vport->port_state, vport->fc_flag, 4332 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 4333 return 0; 4334 } 4335 4336 /* 4337 * Check specified ring for outstanding IOCB on the SLI queue 4338 * Return true if iocb matches the specified nport 4339 */ 4340 int 4341 lpfc_check_sli_ndlp(struct lpfc_hba *phba, 4342 struct lpfc_sli_ring *pring, 4343 struct lpfc_iocbq *iocb, 4344 struct lpfc_nodelist *ndlp) 4345 { 4346 struct lpfc_sli *psli = &phba->sli; 4347 IOCB_t *icmd = &iocb->iocb; 4348 struct lpfc_vport *vport = ndlp->vport; 4349 4350 if (iocb->vport != vport) 4351 return 0; 4352 4353 if (pring->ringno == LPFC_ELS_RING) { 4354 switch (icmd->ulpCommand) { 4355 case CMD_GEN_REQUEST64_CR: 4356 if (iocb->context_un.ndlp == ndlp) 4357 return 1; 4358 case CMD_ELS_REQUEST64_CR: 4359 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 4360 return 1; 4361 case CMD_XMIT_ELS_RSP64_CX: 4362 if (iocb->context1 == (uint8_t *) ndlp) 4363 return 1; 4364 } 4365 } else if (pring->ringno == psli->extra_ring) { 4366 4367 } else if (pring->ringno == psli->fcp_ring) { 4368 /* Skip match check if waiting to relogin to FCP target */ 4369 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4370 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 4371 return 0; 4372 } 4373 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 4374 return 1; 4375 } 4376 } else if (pring->ringno == psli->next_ring) { 4377 4378 } 4379 return 0; 4380 } 4381 4382 /* 4383 * Free resources / clean up outstanding I/Os 4384 * associated with nlp_rpi in the LPFC_NODELIST entry. 4385 */ 4386 static int 4387 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 4388 { 4389 LIST_HEAD(completions); 4390 struct lpfc_sli *psli; 4391 struct lpfc_sli_ring *pring; 4392 struct lpfc_iocbq *iocb, *next_iocb; 4393 uint32_t i; 4394 4395 lpfc_fabric_abort_nport(ndlp); 4396 4397 /* 4398 * Everything that matches on txcmplq will be returned 4399 * by firmware with a no rpi error. 4400 */ 4401 psli = &phba->sli; 4402 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4403 /* Now process each ring */ 4404 for (i = 0; i < psli->num_rings; i++) { 4405 pring = &psli->ring[i]; 4406 4407 spin_lock_irq(&phba->hbalock); 4408 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, 4409 list) { 4410 /* 4411 * Check to see if iocb matches the nport we are 4412 * looking for 4413 */ 4414 if ((lpfc_check_sli_ndlp(phba, pring, iocb, 4415 ndlp))) { 4416 /* It matches, so deque and call compl 4417 with an error */ 4418 list_move_tail(&iocb->list, 4419 &completions); 4420 } 4421 } 4422 spin_unlock_irq(&phba->hbalock); 4423 } 4424 } 4425 4426 /* Cancel all the IOCBs from the completions list */ 4427 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 4428 IOERR_SLI_ABORTED); 4429 4430 return 0; 4431 } 4432 4433 /** 4434 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO 4435 * @phba: Pointer to HBA context object. 4436 * @pmb: Pointer to mailbox object. 4437 * 4438 * This function will issue an ELS LOGO command after completing 4439 * the UNREG_RPI. 4440 **/ 4441 void 4442 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4443 { 4444 struct lpfc_vport *vport = pmb->vport; 4445 struct lpfc_nodelist *ndlp; 4446 4447 ndlp = (struct lpfc_nodelist *)(pmb->context1); 4448 if (!ndlp) 4449 return; 4450 lpfc_issue_els_logo(vport, ndlp, 0); 4451 mempool_free(pmb, phba->mbox_mem_pool); 4452 } 4453 4454 /* 4455 * Free rpi associated with LPFC_NODELIST entry. 4456 * This routine is called from lpfc_freenode(), when we are removing 4457 * a LPFC_NODELIST entry. It is also called if the driver initiates a 4458 * LOGO that completes successfully, and we are waiting to PLOGI back 4459 * to the remote NPort. In addition, it is called after we receive 4460 * and unsolicated ELS cmd, send back a rsp, the rsp completes and 4461 * we are waiting to PLOGI back to the remote NPort. 4462 */ 4463 int 4464 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4465 { 4466 struct lpfc_hba *phba = vport->phba; 4467 LPFC_MBOXQ_t *mbox; 4468 int rc; 4469 uint16_t rpi; 4470 4471 if (ndlp->nlp_flag & NLP_RPI_REGISTERED || 4472 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { 4473 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 4474 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4475 "3366 RPI x%x needs to be " 4476 "unregistered nlp_flag x%x " 4477 "did x%x\n", 4478 ndlp->nlp_rpi, ndlp->nlp_flag, 4479 ndlp->nlp_DID); 4480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4481 if (mbox) { 4482 /* SLI4 ports require the physical rpi value. */ 4483 rpi = ndlp->nlp_rpi; 4484 if (phba->sli_rev == LPFC_SLI_REV4) 4485 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4486 4487 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 4488 mbox->vport = vport; 4489 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { 4490 mbox->context1 = ndlp; 4491 mbox->mbox_cmpl = lpfc_nlp_logo_unreg; 4492 } else { 4493 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4494 } 4495 4496 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4497 if (rc == MBX_NOT_FINISHED) 4498 mempool_free(mbox, phba->mbox_mem_pool); 4499 } 4500 lpfc_no_rpi(phba, ndlp); 4501 4502 if (phba->sli_rev != LPFC_SLI_REV4) 4503 ndlp->nlp_rpi = 0; 4504 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 4505 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4506 return 1; 4507 } 4508 return 0; 4509 } 4510 4511 /** 4512 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. 4513 * @phba: pointer to lpfc hba data structure. 4514 * 4515 * This routine is invoked to unregister all the currently registered RPIs 4516 * to the HBA. 4517 **/ 4518 void 4519 lpfc_unreg_hba_rpis(struct lpfc_hba *phba) 4520 { 4521 struct lpfc_vport **vports; 4522 struct lpfc_nodelist *ndlp; 4523 struct Scsi_Host *shost; 4524 int i; 4525 4526 vports = lpfc_create_vport_work_array(phba); 4527 if (!vports) { 4528 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 4529 "2884 Vport array allocation failed \n"); 4530 return; 4531 } 4532 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4533 shost = lpfc_shost_from_vport(vports[i]); 4534 spin_lock_irq(shost->host_lock); 4535 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 4536 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4537 /* The mempool_alloc might sleep */ 4538 spin_unlock_irq(shost->host_lock); 4539 lpfc_unreg_rpi(vports[i], ndlp); 4540 spin_lock_irq(shost->host_lock); 4541 } 4542 } 4543 spin_unlock_irq(shost->host_lock); 4544 } 4545 lpfc_destroy_vport_work_array(phba, vports); 4546 } 4547 4548 void 4549 lpfc_unreg_all_rpis(struct lpfc_vport *vport) 4550 { 4551 struct lpfc_hba *phba = vport->phba; 4552 LPFC_MBOXQ_t *mbox; 4553 int rc; 4554 4555 if (phba->sli_rev == LPFC_SLI_REV4) { 4556 lpfc_sli4_unreg_all_rpis(vport); 4557 return; 4558 } 4559 4560 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4561 if (mbox) { 4562 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, 4563 mbox); 4564 mbox->vport = vport; 4565 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4566 mbox->context1 = NULL; 4567 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4568 if (rc != MBX_TIMEOUT) 4569 mempool_free(mbox, phba->mbox_mem_pool); 4570 4571 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 4572 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 4573 "1836 Could not issue " 4574 "unreg_login(all_rpis) status %d\n", rc); 4575 } 4576 } 4577 4578 void 4579 lpfc_unreg_default_rpis(struct lpfc_vport *vport) 4580 { 4581 struct lpfc_hba *phba = vport->phba; 4582 LPFC_MBOXQ_t *mbox; 4583 int rc; 4584 4585 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4586 if (mbox) { 4587 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, 4588 mbox); 4589 mbox->vport = vport; 4590 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4591 mbox->context1 = NULL; 4592 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4593 if (rc != MBX_TIMEOUT) 4594 mempool_free(mbox, phba->mbox_mem_pool); 4595 4596 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 4597 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 4598 "1815 Could not issue " 4599 "unreg_did (default rpis) status %d\n", 4600 rc); 4601 } 4602 } 4603 4604 /* 4605 * Free resources associated with LPFC_NODELIST entry 4606 * so it can be freed. 4607 */ 4608 static int 4609 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4610 { 4611 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4612 struct lpfc_hba *phba = vport->phba; 4613 LPFC_MBOXQ_t *mb, *nextmb; 4614 struct lpfc_dmabuf *mp; 4615 4616 /* Cleanup node for NPort <nlp_DID> */ 4617 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4618 "0900 Cleanup node for NPort x%x " 4619 "Data: x%x x%x x%x\n", 4620 ndlp->nlp_DID, ndlp->nlp_flag, 4621 ndlp->nlp_state, ndlp->nlp_rpi); 4622 if (NLP_CHK_FREE_REQ(ndlp)) { 4623 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4624 "0280 lpfc_cleanup_node: ndlp:x%p " 4625 "usgmap:x%x refcnt:%d\n", 4626 (void *)ndlp, ndlp->nlp_usg_map, 4627 atomic_read(&ndlp->kref.refcount)); 4628 lpfc_dequeue_node(vport, ndlp); 4629 } else { 4630 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4631 "0281 lpfc_cleanup_node: ndlp:x%p " 4632 "usgmap:x%x refcnt:%d\n", 4633 (void *)ndlp, ndlp->nlp_usg_map, 4634 atomic_read(&ndlp->kref.refcount)); 4635 lpfc_disable_node(vport, ndlp); 4636 } 4637 4638 4639 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ 4640 4641 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 4642 if ((mb = phba->sli.mbox_active)) { 4643 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4644 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 4645 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4646 mb->context2 = NULL; 4647 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4648 } 4649 } 4650 4651 spin_lock_irq(&phba->hbalock); 4652 /* Cleanup REG_LOGIN completions which are not yet processed */ 4653 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 4654 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 4655 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || 4656 (ndlp != (struct lpfc_nodelist *) mb->context2)) 4657 continue; 4658 4659 mb->context2 = NULL; 4660 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4661 } 4662 4663 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4664 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4665 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 4666 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4667 mp = (struct lpfc_dmabuf *) (mb->context1); 4668 if (mp) { 4669 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 4670 kfree(mp); 4671 } 4672 list_del(&mb->list); 4673 mempool_free(mb, phba->mbox_mem_pool); 4674 /* We shall not invoke the lpfc_nlp_put to decrement 4675 * the ndlp reference count as we are in the process 4676 * of lpfc_nlp_release. 4677 */ 4678 } 4679 } 4680 spin_unlock_irq(&phba->hbalock); 4681 4682 lpfc_els_abort(phba, ndlp); 4683 4684 spin_lock_irq(shost->host_lock); 4685 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4686 spin_unlock_irq(shost->host_lock); 4687 4688 ndlp->nlp_last_elscmd = 0; 4689 del_timer_sync(&ndlp->nlp_delayfunc); 4690 4691 list_del_init(&ndlp->els_retry_evt.evt_listp); 4692 list_del_init(&ndlp->dev_loss_evt.evt_listp); 4693 lpfc_cleanup_vports_rrqs(vport, ndlp); 4694 lpfc_unreg_rpi(vport, ndlp); 4695 4696 return 0; 4697 } 4698 4699 /* 4700 * Check to see if we can free the nlp back to the freelist. 4701 * If we are in the middle of using the nlp in the discovery state 4702 * machine, defer the free till we reach the end of the state machine. 4703 */ 4704 static void 4705 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4706 { 4707 struct lpfc_hba *phba = vport->phba; 4708 struct lpfc_rport_data *rdata; 4709 LPFC_MBOXQ_t *mbox; 4710 int rc; 4711 4712 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4713 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4714 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && 4715 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 4716 /* For this case we need to cleanup the default rpi 4717 * allocated by the firmware. 4718 */ 4719 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 4720 != NULL) { 4721 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, 4722 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi); 4723 if (rc) { 4724 mempool_free(mbox, phba->mbox_mem_pool); 4725 } 4726 else { 4727 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4728 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4729 mbox->vport = vport; 4730 mbox->context2 = ndlp; 4731 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4732 if (rc == MBX_NOT_FINISHED) { 4733 mempool_free(mbox, phba->mbox_mem_pool); 4734 } 4735 } 4736 } 4737 } 4738 lpfc_cleanup_node(vport, ndlp); 4739 4740 /* 4741 * We can get here with a non-NULL ndlp->rport because when we 4742 * unregister a rport we don't break the rport/node linkage. So if we 4743 * do, make sure we don't leaving any dangling pointers behind. 4744 */ 4745 if (ndlp->rport) { 4746 rdata = ndlp->rport->dd_data; 4747 rdata->pnode = NULL; 4748 ndlp->rport = NULL; 4749 } 4750 } 4751 4752 static int 4753 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4754 uint32_t did) 4755 { 4756 D_ID mydid, ndlpdid, matchdid; 4757 4758 if (did == Bcast_DID) 4759 return 0; 4760 4761 /* First check for Direct match */ 4762 if (ndlp->nlp_DID == did) 4763 return 1; 4764 4765 /* Next check for area/domain identically equals 0 match */ 4766 mydid.un.word = vport->fc_myDID; 4767 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 4768 return 0; 4769 } 4770 4771 matchdid.un.word = did; 4772 ndlpdid.un.word = ndlp->nlp_DID; 4773 if (matchdid.un.b.id == ndlpdid.un.b.id) { 4774 if ((mydid.un.b.domain == matchdid.un.b.domain) && 4775 (mydid.un.b.area == matchdid.un.b.area)) { 4776 if ((ndlpdid.un.b.domain == 0) && 4777 (ndlpdid.un.b.area == 0)) { 4778 if (ndlpdid.un.b.id) 4779 return 1; 4780 } 4781 return 0; 4782 } 4783 4784 matchdid.un.word = ndlp->nlp_DID; 4785 if ((mydid.un.b.domain == ndlpdid.un.b.domain) && 4786 (mydid.un.b.area == ndlpdid.un.b.area)) { 4787 if ((matchdid.un.b.domain == 0) && 4788 (matchdid.un.b.area == 0)) { 4789 if (matchdid.un.b.id) 4790 return 1; 4791 } 4792 } 4793 } 4794 return 0; 4795 } 4796 4797 /* Search for a nodelist entry */ 4798 static struct lpfc_nodelist * 4799 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 4800 { 4801 struct lpfc_nodelist *ndlp; 4802 uint32_t data1; 4803 4804 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4805 if (lpfc_matchdid(vport, ndlp, did)) { 4806 data1 = (((uint32_t) ndlp->nlp_state << 24) | 4807 ((uint32_t) ndlp->nlp_xri << 16) | 4808 ((uint32_t) ndlp->nlp_type << 8) | 4809 ((uint32_t) ndlp->nlp_rpi & 0xff)); 4810 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4811 "0929 FIND node DID " 4812 "Data: x%p x%x x%x x%x %p\n", 4813 ndlp, ndlp->nlp_DID, 4814 ndlp->nlp_flag, data1, 4815 ndlp->active_rrqs_xri_bitmap); 4816 return ndlp; 4817 } 4818 } 4819 4820 /* FIND node did <did> NOT FOUND */ 4821 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4822 "0932 FIND node did x%x NOT FOUND.\n", did); 4823 return NULL; 4824 } 4825 4826 struct lpfc_nodelist * 4827 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 4828 { 4829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4830 struct lpfc_nodelist *ndlp; 4831 unsigned long iflags; 4832 4833 spin_lock_irqsave(shost->host_lock, iflags); 4834 ndlp = __lpfc_findnode_did(vport, did); 4835 spin_unlock_irqrestore(shost->host_lock, iflags); 4836 return ndlp; 4837 } 4838 4839 struct lpfc_nodelist * 4840 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) 4841 { 4842 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4843 struct lpfc_nodelist *ndlp; 4844 4845 ndlp = lpfc_findnode_did(vport, did); 4846 if (!ndlp) { 4847 if ((vport->fc_flag & FC_RSCN_MODE) != 0 && 4848 lpfc_rscn_payload_check(vport, did) == 0) 4849 return NULL; 4850 ndlp = (struct lpfc_nodelist *) 4851 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); 4852 if (!ndlp) 4853 return NULL; 4854 lpfc_nlp_init(vport, ndlp, did); 4855 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4856 spin_lock_irq(shost->host_lock); 4857 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4858 spin_unlock_irq(shost->host_lock); 4859 return ndlp; 4860 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4861 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 4862 if (!ndlp) 4863 return NULL; 4864 spin_lock_irq(shost->host_lock); 4865 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4866 spin_unlock_irq(shost->host_lock); 4867 return ndlp; 4868 } 4869 4870 if ((vport->fc_flag & FC_RSCN_MODE) && 4871 !(vport->fc_flag & FC_NDISC_ACTIVE)) { 4872 if (lpfc_rscn_payload_check(vport, did)) { 4873 /* If we've already received a PLOGI from this NPort 4874 * we don't need to try to discover it again. 4875 */ 4876 if (ndlp->nlp_flag & NLP_RCV_PLOGI) 4877 return NULL; 4878 4879 /* Since this node is marked for discovery, 4880 * delay timeout is not needed. 4881 */ 4882 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4883 spin_lock_irq(shost->host_lock); 4884 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4885 spin_unlock_irq(shost->host_lock); 4886 } else 4887 ndlp = NULL; 4888 } else { 4889 /* If we've already received a PLOGI from this NPort, 4890 * or we are already in the process of discovery on it, 4891 * we don't need to try to discover it again. 4892 */ 4893 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 4894 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 4895 ndlp->nlp_flag & NLP_RCV_PLOGI) 4896 return NULL; 4897 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4898 spin_lock_irq(shost->host_lock); 4899 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4900 spin_unlock_irq(shost->host_lock); 4901 } 4902 return ndlp; 4903 } 4904 4905 /* Build a list of nodes to discover based on the loopmap */ 4906 void 4907 lpfc_disc_list_loopmap(struct lpfc_vport *vport) 4908 { 4909 struct lpfc_hba *phba = vport->phba; 4910 int j; 4911 uint32_t alpa, index; 4912 4913 if (!lpfc_is_link_up(phba)) 4914 return; 4915 4916 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 4917 return; 4918 4919 /* Check for loop map present or not */ 4920 if (phba->alpa_map[0]) { 4921 for (j = 1; j <= phba->alpa_map[0]; j++) { 4922 alpa = phba->alpa_map[j]; 4923 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) 4924 continue; 4925 lpfc_setup_disc_node(vport, alpa); 4926 } 4927 } else { 4928 /* No alpamap, so try all alpa's */ 4929 for (j = 0; j < FC_MAXLOOP; j++) { 4930 /* If cfg_scan_down is set, start from highest 4931 * ALPA (0xef) to lowest (0x1). 4932 */ 4933 if (vport->cfg_scan_down) 4934 index = j; 4935 else 4936 index = FC_MAXLOOP - j - 1; 4937 alpa = lpfcAlpaArray[index]; 4938 if ((vport->fc_myDID & 0xff) == alpa) 4939 continue; 4940 lpfc_setup_disc_node(vport, alpa); 4941 } 4942 } 4943 return; 4944 } 4945 4946 void 4947 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) 4948 { 4949 LPFC_MBOXQ_t *mbox; 4950 struct lpfc_sli *psli = &phba->sli; 4951 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; 4952 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; 4953 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; 4954 int rc; 4955 4956 /* 4957 * if it's not a physical port or if we already send 4958 * clear_la then don't send it. 4959 */ 4960 if ((phba->link_state >= LPFC_CLEAR_LA) || 4961 (vport->port_type != LPFC_PHYSICAL_PORT) || 4962 (phba->sli_rev == LPFC_SLI_REV4)) 4963 return; 4964 4965 /* Link up discovery */ 4966 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 4967 phba->link_state = LPFC_CLEAR_LA; 4968 lpfc_clear_la(phba, mbox); 4969 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 4970 mbox->vport = vport; 4971 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4972 if (rc == MBX_NOT_FINISHED) { 4973 mempool_free(mbox, phba->mbox_mem_pool); 4974 lpfc_disc_flush_list(vport); 4975 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4976 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4977 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4978 phba->link_state = LPFC_HBA_ERROR; 4979 } 4980 } 4981 } 4982 4983 /* Reg_vpi to tell firmware to resume normal operations */ 4984 void 4985 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) 4986 { 4987 LPFC_MBOXQ_t *regvpimbox; 4988 4989 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4990 if (regvpimbox) { 4991 lpfc_reg_vpi(vport, regvpimbox); 4992 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 4993 regvpimbox->vport = vport; 4994 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 4995 == MBX_NOT_FINISHED) { 4996 mempool_free(regvpimbox, phba->mbox_mem_pool); 4997 } 4998 } 4999 } 5000 5001 /* Start Link up / RSCN discovery on NPR nodes */ 5002 void 5003 lpfc_disc_start(struct lpfc_vport *vport) 5004 { 5005 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5006 struct lpfc_hba *phba = vport->phba; 5007 uint32_t num_sent; 5008 uint32_t clear_la_pending; 5009 int did_changed; 5010 5011 if (!lpfc_is_link_up(phba)) { 5012 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5013 "3315 Link is not up %x\n", 5014 phba->link_state); 5015 return; 5016 } 5017 5018 if (phba->link_state == LPFC_CLEAR_LA) 5019 clear_la_pending = 1; 5020 else 5021 clear_la_pending = 0; 5022 5023 if (vport->port_state < LPFC_VPORT_READY) 5024 vport->port_state = LPFC_DISC_AUTH; 5025 5026 lpfc_set_disctmo(vport); 5027 5028 if (vport->fc_prevDID == vport->fc_myDID) 5029 did_changed = 0; 5030 else 5031 did_changed = 1; 5032 5033 vport->fc_prevDID = vport->fc_myDID; 5034 vport->num_disc_nodes = 0; 5035 5036 /* Start Discovery state <hba_state> */ 5037 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5038 "0202 Start Discovery hba state x%x " 5039 "Data: x%x x%x x%x\n", 5040 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, 5041 vport->fc_adisc_cnt); 5042 5043 /* First do ADISCs - if any */ 5044 num_sent = lpfc_els_disc_adisc(vport); 5045 5046 if (num_sent) 5047 return; 5048 5049 /* Register the VPI for SLI3, NPIV only. */ 5050 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5051 !(vport->fc_flag & FC_PT2PT) && 5052 !(vport->fc_flag & FC_RSCN_MODE) && 5053 (phba->sli_rev < LPFC_SLI_REV4)) { 5054 if (vport->port_type == LPFC_PHYSICAL_PORT) 5055 lpfc_issue_clear_la(phba, vport); 5056 lpfc_issue_reg_vpi(phba, vport); 5057 return; 5058 } 5059 5060 /* 5061 * For SLI2, we need to set port_state to READY and continue 5062 * discovery. 5063 */ 5064 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 5065 /* If we get here, there is nothing to ADISC */ 5066 if (vport->port_type == LPFC_PHYSICAL_PORT) 5067 lpfc_issue_clear_la(phba, vport); 5068 5069 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 5070 vport->num_disc_nodes = 0; 5071 /* go thru NPR nodes and issue ELS PLOGIs */ 5072 if (vport->fc_npr_cnt) 5073 lpfc_els_disc_plogi(vport); 5074 5075 if (!vport->num_disc_nodes) { 5076 spin_lock_irq(shost->host_lock); 5077 vport->fc_flag &= ~FC_NDISC_ACTIVE; 5078 spin_unlock_irq(shost->host_lock); 5079 lpfc_can_disctmo(vport); 5080 } 5081 } 5082 vport->port_state = LPFC_VPORT_READY; 5083 } else { 5084 /* Next do PLOGIs - if any */ 5085 num_sent = lpfc_els_disc_plogi(vport); 5086 5087 if (num_sent) 5088 return; 5089 5090 if (vport->fc_flag & FC_RSCN_MODE) { 5091 /* Check to see if more RSCNs came in while we 5092 * were processing this one. 5093 */ 5094 if ((vport->fc_rscn_id_cnt == 0) && 5095 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { 5096 spin_lock_irq(shost->host_lock); 5097 vport->fc_flag &= ~FC_RSCN_MODE; 5098 spin_unlock_irq(shost->host_lock); 5099 lpfc_can_disctmo(vport); 5100 } else 5101 lpfc_els_handle_rscn(vport); 5102 } 5103 } 5104 return; 5105 } 5106 5107 /* 5108 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS 5109 * ring the match the sppecified nodelist. 5110 */ 5111 static void 5112 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5113 { 5114 LIST_HEAD(completions); 5115 struct lpfc_sli *psli; 5116 IOCB_t *icmd; 5117 struct lpfc_iocbq *iocb, *next_iocb; 5118 struct lpfc_sli_ring *pring; 5119 5120 psli = &phba->sli; 5121 pring = &psli->ring[LPFC_ELS_RING]; 5122 5123 /* Error matching iocb on txq or txcmplq 5124 * First check the txq. 5125 */ 5126 spin_lock_irq(&phba->hbalock); 5127 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 5128 if (iocb->context1 != ndlp) { 5129 continue; 5130 } 5131 icmd = &iocb->iocb; 5132 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 5133 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 5134 5135 list_move_tail(&iocb->list, &completions); 5136 } 5137 } 5138 5139 /* Next check the txcmplq */ 5140 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 5141 if (iocb->context1 != ndlp) { 5142 continue; 5143 } 5144 icmd = &iocb->iocb; 5145 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || 5146 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { 5147 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 5148 } 5149 } 5150 spin_unlock_irq(&phba->hbalock); 5151 5152 /* Cancel all the IOCBs from the completions list */ 5153 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 5154 IOERR_SLI_ABORTED); 5155 } 5156 5157 static void 5158 lpfc_disc_flush_list(struct lpfc_vport *vport) 5159 { 5160 struct lpfc_nodelist *ndlp, *next_ndlp; 5161 struct lpfc_hba *phba = vport->phba; 5162 5163 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 5164 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 5165 nlp_listp) { 5166 if (!NLP_CHK_NODE_ACT(ndlp)) 5167 continue; 5168 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5169 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 5170 lpfc_free_tx(phba, ndlp); 5171 } 5172 } 5173 } 5174 } 5175 5176 void 5177 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 5178 { 5179 lpfc_els_flush_rscn(vport); 5180 lpfc_els_flush_cmd(vport); 5181 lpfc_disc_flush_list(vport); 5182 } 5183 5184 /*****************************************************************************/ 5185 /* 5186 * NAME: lpfc_disc_timeout 5187 * 5188 * FUNCTION: Fibre Channel driver discovery timeout routine. 5189 * 5190 * EXECUTION ENVIRONMENT: interrupt only 5191 * 5192 * CALLED FROM: 5193 * Timer function 5194 * 5195 * RETURNS: 5196 * none 5197 */ 5198 /*****************************************************************************/ 5199 void 5200 lpfc_disc_timeout(unsigned long ptr) 5201 { 5202 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 5203 struct lpfc_hba *phba = vport->phba; 5204 uint32_t tmo_posted; 5205 unsigned long flags = 0; 5206 5207 if (unlikely(!phba)) 5208 return; 5209 5210 spin_lock_irqsave(&vport->work_port_lock, flags); 5211 tmo_posted = vport->work_port_events & WORKER_DISC_TMO; 5212 if (!tmo_posted) 5213 vport->work_port_events |= WORKER_DISC_TMO; 5214 spin_unlock_irqrestore(&vport->work_port_lock, flags); 5215 5216 if (!tmo_posted) 5217 lpfc_worker_wake_up(phba); 5218 return; 5219 } 5220 5221 static void 5222 lpfc_disc_timeout_handler(struct lpfc_vport *vport) 5223 { 5224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5225 struct lpfc_hba *phba = vport->phba; 5226 struct lpfc_sli *psli = &phba->sli; 5227 struct lpfc_nodelist *ndlp, *next_ndlp; 5228 LPFC_MBOXQ_t *initlinkmbox; 5229 int rc, clrlaerr = 0; 5230 5231 if (!(vport->fc_flag & FC_DISC_TMO)) 5232 return; 5233 5234 spin_lock_irq(shost->host_lock); 5235 vport->fc_flag &= ~FC_DISC_TMO; 5236 spin_unlock_irq(shost->host_lock); 5237 5238 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5239 "disc timeout: state:x%x rtry:x%x flg:x%x", 5240 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 5241 5242 switch (vport->port_state) { 5243 5244 case LPFC_LOCAL_CFG_LINK: 5245 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for 5246 * FAN 5247 */ 5248 /* FAN timeout */ 5249 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 5250 "0221 FAN timeout\n"); 5251 /* Start discovery by sending FLOGI, clean up old rpis */ 5252 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 5253 nlp_listp) { 5254 if (!NLP_CHK_NODE_ACT(ndlp)) 5255 continue; 5256 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 5257 continue; 5258 if (ndlp->nlp_type & NLP_FABRIC) { 5259 /* Clean up the ndlp on Fabric connections */ 5260 lpfc_drop_node(vport, ndlp); 5261 5262 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 5263 /* Fail outstanding IO now since device 5264 * is marked for PLOGI. 5265 */ 5266 lpfc_unreg_rpi(vport, ndlp); 5267 } 5268 } 5269 if (vport->port_state != LPFC_FLOGI) { 5270 if (phba->sli_rev <= LPFC_SLI_REV3) 5271 lpfc_initial_flogi(vport); 5272 else 5273 lpfc_issue_init_vfi(vport); 5274 return; 5275 } 5276 break; 5277 5278 case LPFC_FDISC: 5279 case LPFC_FLOGI: 5280 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 5281 /* Initial FLOGI timeout */ 5282 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5283 "0222 Initial %s timeout\n", 5284 vport->vpi ? "FDISC" : "FLOGI"); 5285 5286 /* Assume no Fabric and go on with discovery. 5287 * Check for outstanding ELS FLOGI to abort. 5288 */ 5289 5290 /* FLOGI failed, so just use loop map to make discovery list */ 5291 lpfc_disc_list_loopmap(vport); 5292 5293 /* Start discovery */ 5294 lpfc_disc_start(vport); 5295 break; 5296 5297 case LPFC_FABRIC_CFG_LINK: 5298 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 5299 NameServer login */ 5300 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5301 "0223 Timeout while waiting for " 5302 "NameServer login\n"); 5303 /* Next look for NameServer ndlp */ 5304 ndlp = lpfc_findnode_did(vport, NameServer_DID); 5305 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 5306 lpfc_els_abort(phba, ndlp); 5307 5308 /* ReStart discovery */ 5309 goto restart_disc; 5310 5311 case LPFC_NS_QRY: 5312 /* Check for wait for NameServer Rsp timeout */ 5313 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5314 "0224 NameServer Query timeout " 5315 "Data: x%x x%x\n", 5316 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 5317 5318 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 5319 /* Try it one more time */ 5320 vport->fc_ns_retry++; 5321 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 5322 vport->fc_ns_retry, 0); 5323 if (rc == 0) 5324 break; 5325 } 5326 vport->fc_ns_retry = 0; 5327 5328 restart_disc: 5329 /* 5330 * Discovery is over. 5331 * set port_state to PORT_READY if SLI2. 5332 * cmpl_reg_vpi will set port_state to READY for SLI3. 5333 */ 5334 if (phba->sli_rev < LPFC_SLI_REV4) { 5335 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 5336 lpfc_issue_reg_vpi(phba, vport); 5337 else { 5338 lpfc_issue_clear_la(phba, vport); 5339 vport->port_state = LPFC_VPORT_READY; 5340 } 5341 } 5342 5343 /* Setup and issue mailbox INITIALIZE LINK command */ 5344 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5345 if (!initlinkmbox) { 5346 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5347 "0206 Device Discovery " 5348 "completion error\n"); 5349 phba->link_state = LPFC_HBA_ERROR; 5350 break; 5351 } 5352 5353 lpfc_linkdown(phba); 5354 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 5355 phba->cfg_link_speed); 5356 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 5357 initlinkmbox->vport = vport; 5358 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5359 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 5360 lpfc_set_loopback_flag(phba); 5361 if (rc == MBX_NOT_FINISHED) 5362 mempool_free(initlinkmbox, phba->mbox_mem_pool); 5363 5364 break; 5365 5366 case LPFC_DISC_AUTH: 5367 /* Node Authentication timeout */ 5368 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5369 "0227 Node Authentication timeout\n"); 5370 lpfc_disc_flush_list(vport); 5371 5372 /* 5373 * set port_state to PORT_READY if SLI2. 5374 * cmpl_reg_vpi will set port_state to READY for SLI3. 5375 */ 5376 if (phba->sli_rev < LPFC_SLI_REV4) { 5377 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 5378 lpfc_issue_reg_vpi(phba, vport); 5379 else { /* NPIV Not enabled */ 5380 lpfc_issue_clear_la(phba, vport); 5381 vport->port_state = LPFC_VPORT_READY; 5382 } 5383 } 5384 break; 5385 5386 case LPFC_VPORT_READY: 5387 if (vport->fc_flag & FC_RSCN_MODE) { 5388 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5389 "0231 RSCN timeout Data: x%x " 5390 "x%x\n", 5391 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 5392 5393 /* Cleanup any outstanding ELS commands */ 5394 lpfc_els_flush_cmd(vport); 5395 5396 lpfc_els_flush_rscn(vport); 5397 lpfc_disc_flush_list(vport); 5398 } 5399 break; 5400 5401 default: 5402 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5403 "0273 Unexpected discovery timeout, " 5404 "vport State x%x\n", vport->port_state); 5405 break; 5406 } 5407 5408 switch (phba->link_state) { 5409 case LPFC_CLEAR_LA: 5410 /* CLEAR LA timeout */ 5411 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5412 "0228 CLEAR LA timeout\n"); 5413 clrlaerr = 1; 5414 break; 5415 5416 case LPFC_LINK_UP: 5417 lpfc_issue_clear_la(phba, vport); 5418 /* Drop thru */ 5419 case LPFC_LINK_UNKNOWN: 5420 case LPFC_WARM_START: 5421 case LPFC_INIT_START: 5422 case LPFC_INIT_MBX_CMDS: 5423 case LPFC_LINK_DOWN: 5424 case LPFC_HBA_ERROR: 5425 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5426 "0230 Unexpected timeout, hba link " 5427 "state x%x\n", phba->link_state); 5428 clrlaerr = 1; 5429 break; 5430 5431 case LPFC_HBA_READY: 5432 break; 5433 } 5434 5435 if (clrlaerr) { 5436 lpfc_disc_flush_list(vport); 5437 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5438 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5439 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5440 vport->port_state = LPFC_VPORT_READY; 5441 } 5442 5443 return; 5444 } 5445 5446 /* 5447 * This routine handles processing a NameServer REG_LOGIN mailbox 5448 * command upon completion. It is setup in the LPFC_MBOXQ 5449 * as the completion routine when the command is 5450 * handed off to the SLI layer. 5451 */ 5452 void 5453 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5454 { 5455 MAILBOX_t *mb = &pmb->u.mb; 5456 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 5457 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5458 struct lpfc_vport *vport = pmb->vport; 5459 5460 pmb->context1 = NULL; 5461 pmb->context2 = NULL; 5462 5463 if (phba->sli_rev < LPFC_SLI_REV4) 5464 ndlp->nlp_rpi = mb->un.varWords[0]; 5465 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5466 ndlp->nlp_type |= NLP_FABRIC; 5467 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5468 5469 /* 5470 * Start issuing Fabric-Device Management Interface (FDMI) command to 5471 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if 5472 * fdmi-on=2 (supporting RPA/hostnmae) 5473 */ 5474 5475 if (vport->cfg_fdmi_on == 1) 5476 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 5477 else 5478 mod_timer(&vport->fc_fdmitmo, 5479 jiffies + msecs_to_jiffies(1000 * 60)); 5480 5481 /* decrement the node reference count held for this callback 5482 * function. 5483 */ 5484 lpfc_nlp_put(ndlp); 5485 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5486 kfree(mp); 5487 mempool_free(pmb, phba->mbox_mem_pool); 5488 5489 return; 5490 } 5491 5492 static int 5493 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) 5494 { 5495 uint16_t *rpi = param; 5496 5497 /* check for active node */ 5498 if (!NLP_CHK_NODE_ACT(ndlp)) 5499 return 0; 5500 5501 return ndlp->nlp_rpi == *rpi; 5502 } 5503 5504 static int 5505 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) 5506 { 5507 return memcmp(&ndlp->nlp_portname, param, 5508 sizeof(ndlp->nlp_portname)) == 0; 5509 } 5510 5511 static struct lpfc_nodelist * 5512 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 5513 { 5514 struct lpfc_nodelist *ndlp; 5515 5516 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5517 if (filter(ndlp, param)) { 5518 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5519 "3185 FIND node filter %p DID " 5520 "Data: x%p x%x x%x\n", 5521 filter, ndlp, ndlp->nlp_DID, 5522 ndlp->nlp_flag); 5523 return ndlp; 5524 } 5525 } 5526 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5527 "3186 FIND node filter %p NOT FOUND.\n", filter); 5528 return NULL; 5529 } 5530 5531 /* 5532 * This routine looks up the ndlp lists for the given RPI. If rpi found it 5533 * returns the node list element pointer else return NULL. 5534 */ 5535 struct lpfc_nodelist * 5536 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 5537 { 5538 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 5539 } 5540 5541 /* 5542 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 5543 * returns the node element list pointer else return NULL. 5544 */ 5545 struct lpfc_nodelist * 5546 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) 5547 { 5548 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5549 struct lpfc_nodelist *ndlp; 5550 5551 spin_lock_irq(shost->host_lock); 5552 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); 5553 spin_unlock_irq(shost->host_lock); 5554 return ndlp; 5555 } 5556 5557 /* 5558 * This routine looks up the ndlp lists for the given RPI. If the rpi 5559 * is found, the routine returns the node element list pointer else 5560 * return NULL. 5561 */ 5562 struct lpfc_nodelist * 5563 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 5564 { 5565 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5566 struct lpfc_nodelist *ndlp; 5567 5568 spin_lock_irq(shost->host_lock); 5569 ndlp = __lpfc_findnode_rpi(vport, rpi); 5570 spin_unlock_irq(shost->host_lock); 5571 return ndlp; 5572 } 5573 5574 /** 5575 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier 5576 * @phba: pointer to lpfc hba data structure. 5577 * @vpi: the physical host virtual N_Port identifier. 5578 * 5579 * This routine finds a vport on a HBA (referred by @phba) through a 5580 * @vpi. The function walks the HBA's vport list and returns the address 5581 * of the vport with the matching @vpi. 5582 * 5583 * Return code 5584 * NULL - No vport with the matching @vpi found 5585 * Otherwise - Address to the vport with the matching @vpi. 5586 **/ 5587 struct lpfc_vport * 5588 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5589 { 5590 struct lpfc_vport *vport; 5591 unsigned long flags; 5592 int i = 0; 5593 5594 /* The physical ports are always vpi 0 - translate is unnecessary. */ 5595 if (vpi > 0) { 5596 /* 5597 * Translate the physical vpi to the logical vpi. The 5598 * vport stores the logical vpi. 5599 */ 5600 for (i = 0; i < phba->max_vpi; i++) { 5601 if (vpi == phba->vpi_ids[i]) 5602 break; 5603 } 5604 5605 if (i >= phba->max_vpi) { 5606 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5607 "2936 Could not find Vport mapped " 5608 "to vpi %d\n", vpi); 5609 return NULL; 5610 } 5611 } 5612 5613 spin_lock_irqsave(&phba->hbalock, flags); 5614 list_for_each_entry(vport, &phba->port_list, listentry) { 5615 if (vport->vpi == i) { 5616 spin_unlock_irqrestore(&phba->hbalock, flags); 5617 return vport; 5618 } 5619 } 5620 spin_unlock_irqrestore(&phba->hbalock, flags); 5621 return NULL; 5622 } 5623 5624 void 5625 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5626 uint32_t did) 5627 { 5628 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 5629 5630 lpfc_initialize_node(vport, ndlp, did); 5631 INIT_LIST_HEAD(&ndlp->nlp_listp); 5632 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 5633 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 5634 ndlp->active_rrqs_xri_bitmap = 5635 mempool_alloc(vport->phba->active_rrq_pool, 5636 GFP_KERNEL); 5637 if (ndlp->active_rrqs_xri_bitmap) 5638 memset(ndlp->active_rrqs_xri_bitmap, 0, 5639 ndlp->phba->cfg_rrq_xri_bitmap_sz); 5640 } 5641 5642 5643 5644 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 5645 "node init: did:x%x", 5646 ndlp->nlp_DID, 0, 0); 5647 5648 return; 5649 } 5650 5651 /* This routine releases all resources associated with a specifc NPort's ndlp 5652 * and mempool_free's the nodelist. 5653 */ 5654 static void 5655 lpfc_nlp_release(struct kref *kref) 5656 { 5657 struct lpfc_hba *phba; 5658 unsigned long flags; 5659 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 5660 kref); 5661 5662 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5663 "node release: did:x%x flg:x%x type:x%x", 5664 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 5665 5666 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5667 "0279 lpfc_nlp_release: ndlp:x%p did %x " 5668 "usgmap:x%x refcnt:%d\n", 5669 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, 5670 atomic_read(&ndlp->kref.refcount)); 5671 5672 /* remove ndlp from action. */ 5673 lpfc_nlp_remove(ndlp->vport, ndlp); 5674 5675 /* clear the ndlp active flag for all release cases */ 5676 phba = ndlp->phba; 5677 spin_lock_irqsave(&phba->ndlp_lock, flags); 5678 NLP_CLR_NODE_ACT(ndlp); 5679 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5680 if (phba->sli_rev == LPFC_SLI_REV4) 5681 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5682 5683 /* free ndlp memory for final ndlp release */ 5684 if (NLP_CHK_FREE_REQ(ndlp)) { 5685 kfree(ndlp->lat_data); 5686 if (phba->sli_rev == LPFC_SLI_REV4) 5687 mempool_free(ndlp->active_rrqs_xri_bitmap, 5688 ndlp->phba->active_rrq_pool); 5689 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 5690 } 5691 } 5692 5693 /* This routine bumps the reference count for a ndlp structure to ensure 5694 * that one discovery thread won't free a ndlp while another discovery thread 5695 * is using it. 5696 */ 5697 struct lpfc_nodelist * 5698 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 5699 { 5700 struct lpfc_hba *phba; 5701 unsigned long flags; 5702 5703 if (ndlp) { 5704 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5705 "node get: did:x%x flg:x%x refcnt:x%x", 5706 ndlp->nlp_DID, ndlp->nlp_flag, 5707 atomic_read(&ndlp->kref.refcount)); 5708 /* The check of ndlp usage to prevent incrementing the 5709 * ndlp reference count that is in the process of being 5710 * released. 5711 */ 5712 phba = ndlp->phba; 5713 spin_lock_irqsave(&phba->ndlp_lock, flags); 5714 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 5715 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5716 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5717 "0276 lpfc_nlp_get: ndlp:x%p " 5718 "usgmap:x%x refcnt:%d\n", 5719 (void *)ndlp, ndlp->nlp_usg_map, 5720 atomic_read(&ndlp->kref.refcount)); 5721 return NULL; 5722 } else 5723 kref_get(&ndlp->kref); 5724 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5725 } 5726 return ndlp; 5727 } 5728 5729 /* This routine decrements the reference count for a ndlp structure. If the 5730 * count goes to 0, this indicates the the associated nodelist should be 5731 * freed. Returning 1 indicates the ndlp resource has been released; on the 5732 * other hand, returning 0 indicates the ndlp resource has not been released 5733 * yet. 5734 */ 5735 int 5736 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 5737 { 5738 struct lpfc_hba *phba; 5739 unsigned long flags; 5740 5741 if (!ndlp) 5742 return 1; 5743 5744 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5745 "node put: did:x%x flg:x%x refcnt:x%x", 5746 ndlp->nlp_DID, ndlp->nlp_flag, 5747 atomic_read(&ndlp->kref.refcount)); 5748 phba = ndlp->phba; 5749 spin_lock_irqsave(&phba->ndlp_lock, flags); 5750 /* Check the ndlp memory free acknowledge flag to avoid the 5751 * possible race condition that kref_put got invoked again 5752 * after previous one has done ndlp memory free. 5753 */ 5754 if (NLP_CHK_FREE_ACK(ndlp)) { 5755 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5756 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5757 "0274 lpfc_nlp_put: ndlp:x%p " 5758 "usgmap:x%x refcnt:%d\n", 5759 (void *)ndlp, ndlp->nlp_usg_map, 5760 atomic_read(&ndlp->kref.refcount)); 5761 return 1; 5762 } 5763 /* Check the ndlp inactivate log flag to avoid the possible 5764 * race condition that kref_put got invoked again after ndlp 5765 * is already in inactivating state. 5766 */ 5767 if (NLP_CHK_IACT_REQ(ndlp)) { 5768 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5769 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5770 "0275 lpfc_nlp_put: ndlp:x%p " 5771 "usgmap:x%x refcnt:%d\n", 5772 (void *)ndlp, ndlp->nlp_usg_map, 5773 atomic_read(&ndlp->kref.refcount)); 5774 return 1; 5775 } 5776 /* For last put, mark the ndlp usage flags to make sure no 5777 * other kref_get and kref_put on the same ndlp shall get 5778 * in between the process when the final kref_put has been 5779 * invoked on this ndlp. 5780 */ 5781 if (atomic_read(&ndlp->kref.refcount) == 1) { 5782 /* Indicate ndlp is put to inactive state. */ 5783 NLP_SET_IACT_REQ(ndlp); 5784 /* Acknowledge ndlp memory free has been seen. */ 5785 if (NLP_CHK_FREE_REQ(ndlp)) 5786 NLP_SET_FREE_ACK(ndlp); 5787 } 5788 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5789 /* Note, the kref_put returns 1 when decrementing a reference 5790 * count that was 1, it invokes the release callback function, 5791 * but it still left the reference count as 1 (not actually 5792 * performs the last decrementation). Otherwise, it actually 5793 * decrements the reference count and returns 0. 5794 */ 5795 return kref_put(&ndlp->kref, lpfc_nlp_release); 5796 } 5797 5798 /* This routine free's the specified nodelist if it is not in use 5799 * by any other discovery thread. This routine returns 1 if the 5800 * ndlp has been freed. A return value of 0 indicates the ndlp is 5801 * not yet been released. 5802 */ 5803 int 5804 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 5805 { 5806 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5807 "node not used: did:x%x flg:x%x refcnt:x%x", 5808 ndlp->nlp_DID, ndlp->nlp_flag, 5809 atomic_read(&ndlp->kref.refcount)); 5810 if (atomic_read(&ndlp->kref.refcount) == 1) 5811 if (lpfc_nlp_put(ndlp)) 5812 return 1; 5813 return 0; 5814 } 5815 5816 /** 5817 * lpfc_fcf_inuse - Check if FCF can be unregistered. 5818 * @phba: Pointer to hba context object. 5819 * 5820 * This function iterate through all FC nodes associated 5821 * will all vports to check if there is any node with 5822 * fc_rports associated with it. If there is an fc_rport 5823 * associated with the node, then the node is either in 5824 * discovered state or its devloss_timer is pending. 5825 */ 5826 static int 5827 lpfc_fcf_inuse(struct lpfc_hba *phba) 5828 { 5829 struct lpfc_vport **vports; 5830 int i, ret = 0; 5831 struct lpfc_nodelist *ndlp; 5832 struct Scsi_Host *shost; 5833 5834 vports = lpfc_create_vport_work_array(phba); 5835 5836 /* If driver cannot allocate memory, indicate fcf is in use */ 5837 if (!vports) 5838 return 1; 5839 5840 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5841 shost = lpfc_shost_from_vport(vports[i]); 5842 spin_lock_irq(shost->host_lock); 5843 /* 5844 * IF the CVL_RCVD bit is not set then we have sent the 5845 * flogi. 5846 * If dev_loss fires while we are waiting we do not want to 5847 * unreg the fcf. 5848 */ 5849 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { 5850 spin_unlock_irq(shost->host_lock); 5851 ret = 1; 5852 goto out; 5853 } 5854 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5855 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 5856 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 5857 ret = 1; 5858 spin_unlock_irq(shost->host_lock); 5859 goto out; 5860 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5861 ret = 1; 5862 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 5863 "2624 RPI %x DID %x flag %x " 5864 "still logged in\n", 5865 ndlp->nlp_rpi, ndlp->nlp_DID, 5866 ndlp->nlp_flag); 5867 } 5868 } 5869 spin_unlock_irq(shost->host_lock); 5870 } 5871 out: 5872 lpfc_destroy_vport_work_array(phba, vports); 5873 return ret; 5874 } 5875 5876 /** 5877 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. 5878 * @phba: Pointer to hba context object. 5879 * @mboxq: Pointer to mailbox object. 5880 * 5881 * This function frees memory associated with the mailbox command. 5882 */ 5883 void 5884 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5885 { 5886 struct lpfc_vport *vport = mboxq->vport; 5887 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5888 5889 if (mboxq->u.mb.mbxStatus) { 5890 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5891 "2555 UNREG_VFI mbxStatus error x%x " 5892 "HBA state x%x\n", 5893 mboxq->u.mb.mbxStatus, vport->port_state); 5894 } 5895 spin_lock_irq(shost->host_lock); 5896 phba->pport->fc_flag &= ~FC_VFI_REGISTERED; 5897 spin_unlock_irq(shost->host_lock); 5898 mempool_free(mboxq, phba->mbox_mem_pool); 5899 return; 5900 } 5901 5902 /** 5903 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. 5904 * @phba: Pointer to hba context object. 5905 * @mboxq: Pointer to mailbox object. 5906 * 5907 * This function frees memory associated with the mailbox command. 5908 */ 5909 static void 5910 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5911 { 5912 struct lpfc_vport *vport = mboxq->vport; 5913 5914 if (mboxq->u.mb.mbxStatus) { 5915 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5916 "2550 UNREG_FCFI mbxStatus error x%x " 5917 "HBA state x%x\n", 5918 mboxq->u.mb.mbxStatus, vport->port_state); 5919 } 5920 mempool_free(mboxq, phba->mbox_mem_pool); 5921 return; 5922 } 5923 5924 /** 5925 * lpfc_unregister_fcf_prep - Unregister fcf record preparation 5926 * @phba: Pointer to hba context object. 5927 * 5928 * This function prepare the HBA for unregistering the currently registered 5929 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and 5930 * VFIs. 5931 */ 5932 int 5933 lpfc_unregister_fcf_prep(struct lpfc_hba *phba) 5934 { 5935 struct lpfc_vport **vports; 5936 struct lpfc_nodelist *ndlp; 5937 struct Scsi_Host *shost; 5938 int i = 0, rc; 5939 5940 /* Unregister RPIs */ 5941 if (lpfc_fcf_inuse(phba)) 5942 lpfc_unreg_hba_rpis(phba); 5943 5944 /* At this point, all discovery is aborted */ 5945 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5946 5947 /* Unregister VPIs */ 5948 vports = lpfc_create_vport_work_array(phba); 5949 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 5950 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5951 /* Stop FLOGI/FDISC retries */ 5952 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 5953 if (ndlp) 5954 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5955 lpfc_cleanup_pending_mbox(vports[i]); 5956 if (phba->sli_rev == LPFC_SLI_REV4) 5957 lpfc_sli4_unreg_all_rpis(vports[i]); 5958 lpfc_mbx_unreg_vpi(vports[i]); 5959 shost = lpfc_shost_from_vport(vports[i]); 5960 spin_lock_irq(shost->host_lock); 5961 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 5962 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 5963 spin_unlock_irq(shost->host_lock); 5964 } 5965 lpfc_destroy_vport_work_array(phba, vports); 5966 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { 5967 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 5968 if (ndlp) 5969 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 5970 lpfc_cleanup_pending_mbox(phba->pport); 5971 if (phba->sli_rev == LPFC_SLI_REV4) 5972 lpfc_sli4_unreg_all_rpis(phba->pport); 5973 lpfc_mbx_unreg_vpi(phba->pport); 5974 shost = lpfc_shost_from_vport(phba->pport); 5975 spin_lock_irq(shost->host_lock); 5976 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 5977 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; 5978 spin_unlock_irq(shost->host_lock); 5979 } 5980 5981 /* Cleanup any outstanding ELS commands */ 5982 lpfc_els_flush_all_cmd(phba); 5983 5984 /* Unregister the physical port VFI */ 5985 rc = lpfc_issue_unreg_vfi(phba->pport); 5986 return rc; 5987 } 5988 5989 /** 5990 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record 5991 * @phba: Pointer to hba context object. 5992 * 5993 * This function issues synchronous unregister FCF mailbox command to HBA to 5994 * unregister the currently registered FCF record. The driver does not reset 5995 * the driver FCF usage state flags. 5996 * 5997 * Return 0 if successfully issued, none-zero otherwise. 5998 */ 5999 int 6000 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) 6001 { 6002 LPFC_MBOXQ_t *mbox; 6003 int rc; 6004 6005 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6006 if (!mbox) { 6007 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6008 "2551 UNREG_FCFI mbox allocation failed" 6009 "HBA state x%x\n", phba->pport->port_state); 6010 return -ENOMEM; 6011 } 6012 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 6013 mbox->vport = phba->pport; 6014 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 6015 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6016 6017 if (rc == MBX_NOT_FINISHED) { 6018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6019 "2552 Unregister FCFI command failed rc x%x " 6020 "HBA state x%x\n", 6021 rc, phba->pport->port_state); 6022 return -EINVAL; 6023 } 6024 return 0; 6025 } 6026 6027 /** 6028 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan 6029 * @phba: Pointer to hba context object. 6030 * 6031 * This function unregisters the currently reigstered FCF. This function 6032 * also tries to find another FCF for discovery by rescan the HBA FCF table. 6033 */ 6034 void 6035 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) 6036 { 6037 int rc; 6038 6039 /* Preparation for unregistering fcf */ 6040 rc = lpfc_unregister_fcf_prep(phba); 6041 if (rc) { 6042 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 6043 "2748 Failed to prepare for unregistering " 6044 "HBA's FCF record: rc=%d\n", rc); 6045 return; 6046 } 6047 6048 /* Now, unregister FCF record and reset HBA FCF state */ 6049 rc = lpfc_sli4_unregister_fcf(phba); 6050 if (rc) 6051 return; 6052 /* Reset HBA FCF states after successful unregister FCF */ 6053 phba->fcf.fcf_flag = 0; 6054 phba->fcf.current_rec.flag = 0; 6055 6056 /* 6057 * If driver is not unloading, check if there is any other 6058 * FCF record that can be used for discovery. 6059 */ 6060 if ((phba->pport->load_flag & FC_UNLOADING) || 6061 (phba->link_state < LPFC_LINK_UP)) 6062 return; 6063 6064 /* This is considered as the initial FCF discovery scan */ 6065 spin_lock_irq(&phba->hbalock); 6066 phba->fcf.fcf_flag |= FCF_INIT_DISC; 6067 spin_unlock_irq(&phba->hbalock); 6068 6069 /* Reset FCF roundrobin bmask for new discovery */ 6070 lpfc_sli4_clear_fcf_rr_bmask(phba); 6071 6072 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6073 6074 if (rc) { 6075 spin_lock_irq(&phba->hbalock); 6076 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 6077 spin_unlock_irq(&phba->hbalock); 6078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 6079 "2553 lpfc_unregister_unused_fcf failed " 6080 "to read FCF record HBA state x%x\n", 6081 phba->pport->port_state); 6082 } 6083 } 6084 6085 /** 6086 * lpfc_unregister_fcf - Unregister the currently registered fcf record 6087 * @phba: Pointer to hba context object. 6088 * 6089 * This function just unregisters the currently reigstered FCF. It does not 6090 * try to find another FCF for discovery. 6091 */ 6092 void 6093 lpfc_unregister_fcf(struct lpfc_hba *phba) 6094 { 6095 int rc; 6096 6097 /* Preparation for unregistering fcf */ 6098 rc = lpfc_unregister_fcf_prep(phba); 6099 if (rc) { 6100 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 6101 "2749 Failed to prepare for unregistering " 6102 "HBA's FCF record: rc=%d\n", rc); 6103 return; 6104 } 6105 6106 /* Now, unregister FCF record and reset HBA FCF state */ 6107 rc = lpfc_sli4_unregister_fcf(phba); 6108 if (rc) 6109 return; 6110 /* Set proper HBA FCF states after successful unregister FCF */ 6111 spin_lock_irq(&phba->hbalock); 6112 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 6113 spin_unlock_irq(&phba->hbalock); 6114 } 6115 6116 /** 6117 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 6118 * @phba: Pointer to hba context object. 6119 * 6120 * This function check if there are any connected remote port for the FCF and 6121 * if all the devices are disconnected, this function unregister FCFI. 6122 * This function also tries to use another FCF for discovery. 6123 */ 6124 void 6125 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 6126 { 6127 /* 6128 * If HBA is not running in FIP mode, if HBA does not support 6129 * FCoE, if FCF discovery is ongoing, or if FCF has not been 6130 * registered, do nothing. 6131 */ 6132 spin_lock_irq(&phba->hbalock); 6133 if (!(phba->hba_flag & HBA_FCOE_MODE) || 6134 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 6135 !(phba->hba_flag & HBA_FIP_SUPPORT) || 6136 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 6137 (phba->pport->port_state == LPFC_FLOGI)) { 6138 spin_unlock_irq(&phba->hbalock); 6139 return; 6140 } 6141 spin_unlock_irq(&phba->hbalock); 6142 6143 if (lpfc_fcf_inuse(phba)) 6144 return; 6145 6146 lpfc_unregister_fcf_rescan(phba); 6147 } 6148 6149 /** 6150 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. 6151 * @phba: Pointer to hba context object. 6152 * @buff: Buffer containing the FCF connection table as in the config 6153 * region. 6154 * This function create driver data structure for the FCF connection 6155 * record table read from config region 23. 6156 */ 6157 static void 6158 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, 6159 uint8_t *buff) 6160 { 6161 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6162 struct lpfc_fcf_conn_hdr *conn_hdr; 6163 struct lpfc_fcf_conn_rec *conn_rec; 6164 uint32_t record_count; 6165 int i; 6166 6167 /* Free the current connect table */ 6168 list_for_each_entry_safe(conn_entry, next_conn_entry, 6169 &phba->fcf_conn_rec_list, list) { 6170 list_del_init(&conn_entry->list); 6171 kfree(conn_entry); 6172 } 6173 6174 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 6175 record_count = conn_hdr->length * sizeof(uint32_t)/ 6176 sizeof(struct lpfc_fcf_conn_rec); 6177 6178 conn_rec = (struct lpfc_fcf_conn_rec *) 6179 (buff + sizeof(struct lpfc_fcf_conn_hdr)); 6180 6181 for (i = 0; i < record_count; i++) { 6182 if (!(conn_rec[i].flags & FCFCNCT_VALID)) 6183 continue; 6184 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 6185 GFP_KERNEL); 6186 if (!conn_entry) { 6187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6188 "2566 Failed to allocate connection" 6189 " table entry\n"); 6190 return; 6191 } 6192 6193 memcpy(&conn_entry->conn_rec, &conn_rec[i], 6194 sizeof(struct lpfc_fcf_conn_rec)); 6195 list_add_tail(&conn_entry->list, 6196 &phba->fcf_conn_rec_list); 6197 } 6198 6199 if (!list_empty(&phba->fcf_conn_rec_list)) { 6200 i = 0; 6201 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, 6202 list) { 6203 conn_rec = &conn_entry->conn_rec; 6204 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6205 "3345 FCF connection list rec[%02d]: " 6206 "flags:x%04x, vtag:x%04x, " 6207 "fabric_name:x%02x:%02x:%02x:%02x:" 6208 "%02x:%02x:%02x:%02x, " 6209 "switch_name:x%02x:%02x:%02x:%02x:" 6210 "%02x:%02x:%02x:%02x\n", i++, 6211 conn_rec->flags, conn_rec->vlan_tag, 6212 conn_rec->fabric_name[0], 6213 conn_rec->fabric_name[1], 6214 conn_rec->fabric_name[2], 6215 conn_rec->fabric_name[3], 6216 conn_rec->fabric_name[4], 6217 conn_rec->fabric_name[5], 6218 conn_rec->fabric_name[6], 6219 conn_rec->fabric_name[7], 6220 conn_rec->switch_name[0], 6221 conn_rec->switch_name[1], 6222 conn_rec->switch_name[2], 6223 conn_rec->switch_name[3], 6224 conn_rec->switch_name[4], 6225 conn_rec->switch_name[5], 6226 conn_rec->switch_name[6], 6227 conn_rec->switch_name[7]); 6228 } 6229 } 6230 } 6231 6232 /** 6233 * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. 6234 * @phba: Pointer to hba context object. 6235 * @buff: Buffer containing the FCoE parameter data structure. 6236 * 6237 * This function update driver data structure with config 6238 * parameters read from config region 23. 6239 */ 6240 static void 6241 lpfc_read_fcoe_param(struct lpfc_hba *phba, 6242 uint8_t *buff) 6243 { 6244 struct lpfc_fip_param_hdr *fcoe_param_hdr; 6245 struct lpfc_fcoe_params *fcoe_param; 6246 6247 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 6248 buff; 6249 fcoe_param = (struct lpfc_fcoe_params *) 6250 (buff + sizeof(struct lpfc_fip_param_hdr)); 6251 6252 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 6253 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 6254 return; 6255 6256 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 6257 phba->valid_vlan = 1; 6258 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 6259 0xFFF; 6260 } 6261 6262 phba->fc_map[0] = fcoe_param->fc_map[0]; 6263 phba->fc_map[1] = fcoe_param->fc_map[1]; 6264 phba->fc_map[2] = fcoe_param->fc_map[2]; 6265 return; 6266 } 6267 6268 /** 6269 * lpfc_get_rec_conf23 - Get a record type in config region data. 6270 * @buff: Buffer containing config region 23 data. 6271 * @size: Size of the data buffer. 6272 * @rec_type: Record type to be searched. 6273 * 6274 * This function searches config region data to find the beginning 6275 * of the record specified by record_type. If record found, this 6276 * function return pointer to the record else return NULL. 6277 */ 6278 static uint8_t * 6279 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) 6280 { 6281 uint32_t offset = 0, rec_length; 6282 6283 if ((buff[0] == LPFC_REGION23_LAST_REC) || 6284 (size < sizeof(uint32_t))) 6285 return NULL; 6286 6287 rec_length = buff[offset + 1]; 6288 6289 /* 6290 * One TLV record has one word header and number of data words 6291 * specified in the rec_length field of the record header. 6292 */ 6293 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) 6294 <= size) { 6295 if (buff[offset] == rec_type) 6296 return &buff[offset]; 6297 6298 if (buff[offset] == LPFC_REGION23_LAST_REC) 6299 return NULL; 6300 6301 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); 6302 rec_length = buff[offset + 1]; 6303 } 6304 return NULL; 6305 } 6306 6307 /** 6308 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. 6309 * @phba: Pointer to lpfc_hba data structure. 6310 * @buff: Buffer containing config region 23 data. 6311 * @size: Size of the data buffer. 6312 * 6313 * This function parses the FCoE config parameters in config region 23 and 6314 * populate driver data structure with the parameters. 6315 */ 6316 void 6317 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, 6318 uint8_t *buff, 6319 uint32_t size) 6320 { 6321 uint32_t offset = 0, rec_length; 6322 uint8_t *rec_ptr; 6323 6324 /* 6325 * If data size is less than 2 words signature and version cannot be 6326 * verified. 6327 */ 6328 if (size < 2*sizeof(uint32_t)) 6329 return; 6330 6331 /* Check the region signature first */ 6332 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 6333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6334 "2567 Config region 23 has bad signature\n"); 6335 return; 6336 } 6337 6338 offset += 4; 6339 6340 /* Check the data structure version */ 6341 if (buff[offset] != LPFC_REGION23_VERSION) { 6342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6343 "2568 Config region 23 has bad version\n"); 6344 return; 6345 } 6346 offset += 4; 6347 6348 rec_length = buff[offset + 1]; 6349 6350 /* Read FCoE param record */ 6351 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 6352 size - offset, FCOE_PARAM_TYPE); 6353 if (rec_ptr) 6354 lpfc_read_fcoe_param(phba, rec_ptr); 6355 6356 /* Read FCF connection table */ 6357 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 6358 size - offset, FCOE_CONN_TBL_TYPE); 6359 if (rec_ptr) 6360 lpfc_read_fcf_conn_tbl(phba, rec_ptr); 6361 6362 } 6363