1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/irq.h> 40 #include <linux/bitops.h> 41 #include <linux/crash_dump.h> 42 #include <linux/cpu.h> 43 #include <linux/cpuhotplug.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi_transport_fc.h> 49 #include <scsi/scsi_tcq.h> 50 #include <scsi/fc/fc_fs.h> 51 52 #include "lpfc_hw4.h" 53 #include "lpfc_hw.h" 54 #include "lpfc_sli.h" 55 #include "lpfc_sli4.h" 56 #include "lpfc_nl.h" 57 #include "lpfc_disc.h" 58 #include "lpfc.h" 59 #include "lpfc_scsi.h" 60 #include "lpfc_nvme.h" 61 #include "lpfc_logmsg.h" 62 #include "lpfc_crtn.h" 63 #include "lpfc_vport.h" 64 #include "lpfc_version.h" 65 #include "lpfc_ids.h" 66 67 static enum cpuhp_state lpfc_cpuhp_state; 68 /* Used when mapping IRQ vectors in a driver centric manner */ 69 static uint32_t lpfc_present_cpu; 70 static bool lpfc_pldv_detect; 71 72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 76 static int lpfc_post_rcv_buf(struct lpfc_hba *); 77 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 79 static int lpfc_setup_endian_order(struct lpfc_hba *); 80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 81 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 83 static void lpfc_init_sgl_list(struct lpfc_hba *); 84 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 85 static void lpfc_free_active_sgl(struct lpfc_hba *); 86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 91 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 97 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 260 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 261 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 262 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 263 lpfc_vpd_data + offset, 264 mb->un.varDmp.word_cnt); 265 offset += mb->un.varDmp.word_cnt; 266 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 267 268 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 269 270 kfree(lpfc_vpd_data); 271 out_free_mbox: 272 mempool_free(pmb, phba->mbox_mem_pool); 273 return 0; 274 } 275 276 /** 277 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for driver's configuring asynchronous event 282 * mailbox command to the device. If the mailbox command returns successfully, 283 * it will set internal async event support flag to 1; otherwise, it will 284 * set internal async event support flag to 0. 285 **/ 286 static void 287 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 288 { 289 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 290 phba->temp_sensor_support = 1; 291 else 292 phba->temp_sensor_support = 0; 293 mempool_free(pmboxq, phba->mbox_mem_pool); 294 return; 295 } 296 297 /** 298 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 299 * @phba: pointer to lpfc hba data structure. 300 * @pmboxq: pointer to the driver internal queue element for mailbox command. 301 * 302 * This is the completion handler for dump mailbox command for getting 303 * wake up parameters. When this command complete, the response contain 304 * Option rom version of the HBA. This function translate the version number 305 * into a human readable string and store it in OptionROMVersion. 306 **/ 307 static void 308 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 309 { 310 struct prog_id *prg; 311 uint32_t prog_id_word; 312 char dist = ' '; 313 /* character array used for decoding dist type. */ 314 char dist_char[] = "nabx"; 315 316 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 317 mempool_free(pmboxq, phba->mbox_mem_pool); 318 return; 319 } 320 321 prg = (struct prog_id *) &prog_id_word; 322 323 /* word 7 contain option rom version */ 324 prog_id_word = pmboxq->u.mb.un.varWords[7]; 325 326 /* Decode the Option rom version word to a readable string */ 327 dist = dist_char[prg->dist]; 328 329 if ((prg->dist == 3) && (prg->num == 0)) 330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 331 prg->ver, prg->rev, prg->lev); 332 else 333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 334 prg->ver, prg->rev, prg->lev, 335 dist, prg->num); 336 mempool_free(pmboxq, phba->mbox_mem_pool); 337 return; 338 } 339 340 /** 341 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 342 * @vport: pointer to lpfc vport data structure. 343 * 344 * 345 * Return codes 346 * None. 347 **/ 348 void 349 lpfc_update_vport_wwn(struct lpfc_vport *vport) 350 { 351 struct lpfc_hba *phba = vport->phba; 352 353 /* 354 * If the name is empty or there exists a soft name 355 * then copy the service params name, otherwise use the fc name 356 */ 357 if (vport->fc_nodename.u.wwn[0] == 0) 358 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 359 sizeof(struct lpfc_name)); 360 else 361 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 362 sizeof(struct lpfc_name)); 363 364 /* 365 * If the port name has changed, then set the Param changes flag 366 * to unreg the login 367 */ 368 if (vport->fc_portname.u.wwn[0] != 0 && 369 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 370 sizeof(struct lpfc_name))) { 371 vport->vport_flag |= FAWWPN_PARAM_CHG; 372 373 if (phba->sli_rev == LPFC_SLI_REV4 && 374 vport->port_type == LPFC_PHYSICAL_PORT && 375 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { 376 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)) 377 phba->sli4_hba.fawwpn_flag &= 378 ~LPFC_FAWWPN_FABRIC; 379 lpfc_printf_log(phba, KERN_INFO, 380 LOG_SLI | LOG_DISCOVERY | LOG_ELS, 381 "2701 FA-PWWN change WWPN from %llx to " 382 "%llx: vflag x%x fawwpn_flag x%x\n", 383 wwn_to_u64(vport->fc_portname.u.wwn), 384 wwn_to_u64 385 (vport->fc_sparam.portName.u.wwn), 386 vport->vport_flag, 387 phba->sli4_hba.fawwpn_flag); 388 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 389 sizeof(struct lpfc_name)); 390 } 391 } 392 393 if (vport->fc_portname.u.wwn[0] == 0) 394 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 395 sizeof(struct lpfc_name)); 396 else 397 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 398 sizeof(struct lpfc_name)); 399 } 400 401 /** 402 * lpfc_config_port_post - Perform lpfc initialization after config port 403 * @phba: pointer to lpfc hba data structure. 404 * 405 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 406 * command call. It performs all internal resource and state setups on the 407 * port: post IOCB buffers, enable appropriate host interrupt attentions, 408 * ELS ring timers, etc. 409 * 410 * Return codes 411 * 0 - success. 412 * Any other value - error. 413 **/ 414 int 415 lpfc_config_port_post(struct lpfc_hba *phba) 416 { 417 struct lpfc_vport *vport = phba->pport; 418 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 419 LPFC_MBOXQ_t *pmb; 420 MAILBOX_t *mb; 421 struct lpfc_dmabuf *mp; 422 struct lpfc_sli *psli = &phba->sli; 423 uint32_t status, timeout; 424 int i, j; 425 int rc; 426 427 spin_lock_irq(&phba->hbalock); 428 /* 429 * If the Config port completed correctly the HBA is not 430 * over heated any more. 431 */ 432 if (phba->over_temp_state == HBA_OVER_TEMP) 433 phba->over_temp_state = HBA_NORMAL_TEMP; 434 spin_unlock_irq(&phba->hbalock); 435 436 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 437 if (!pmb) { 438 phba->link_state = LPFC_HBA_ERROR; 439 return -ENOMEM; 440 } 441 mb = &pmb->u.mb; 442 443 /* Get login parameters for NID. */ 444 rc = lpfc_read_sparam(phba, pmb, 0); 445 if (rc) { 446 mempool_free(pmb, phba->mbox_mem_pool); 447 return -ENOMEM; 448 } 449 450 pmb->vport = vport; 451 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 453 "0448 Adapter failed init, mbxCmd x%x " 454 "READ_SPARM mbxStatus x%x\n", 455 mb->mbxCommand, mb->mbxStatus); 456 phba->link_state = LPFC_HBA_ERROR; 457 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 458 return -EIO; 459 } 460 461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 462 463 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no 464 * longer needed. Prevent unintended ctx_buf access as the mbox is 465 * reused. 466 */ 467 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 468 lpfc_mbuf_free(phba, mp->virt, mp->phys); 469 kfree(mp); 470 pmb->ctx_buf = NULL; 471 lpfc_update_vport_wwn(vport); 472 473 /* Update the fc_host data structures with new wwn. */ 474 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 475 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 476 fc_host_max_npiv_vports(shost) = phba->max_vpi; 477 478 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 479 /* This should be consolidated into parse_vpd ? - mr */ 480 if (phba->SerialNumber[0] == 0) { 481 uint8_t *outptr; 482 483 outptr = &vport->fc_nodename.u.s.IEEE[0]; 484 for (i = 0; i < 12; i++) { 485 status = *outptr++; 486 j = ((status & 0xf0) >> 4); 487 if (j <= 9) 488 phba->SerialNumber[i] = 489 (char)((uint8_t) 0x30 + (uint8_t) j); 490 else 491 phba->SerialNumber[i] = 492 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 493 i++; 494 j = (status & 0xf); 495 if (j <= 9) 496 phba->SerialNumber[i] = 497 (char)((uint8_t) 0x30 + (uint8_t) j); 498 else 499 phba->SerialNumber[i] = 500 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 501 } 502 } 503 504 lpfc_read_config(phba, pmb); 505 pmb->vport = vport; 506 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 508 "0453 Adapter failed to init, mbxCmd x%x " 509 "READ_CONFIG, mbxStatus x%x\n", 510 mb->mbxCommand, mb->mbxStatus); 511 phba->link_state = LPFC_HBA_ERROR; 512 mempool_free( pmb, phba->mbox_mem_pool); 513 return -EIO; 514 } 515 516 /* Check if the port is disabled */ 517 lpfc_sli_read_link_ste(phba); 518 519 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 520 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 521 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 522 "3359 HBA queue depth changed from %d to %d\n", 523 phba->cfg_hba_queue_depth, 524 mb->un.varRdConfig.max_xri); 525 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 526 } 527 528 phba->lmt = mb->un.varRdConfig.lmt; 529 530 /* Get the default values for Model Name and Description */ 531 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 532 533 phba->link_state = LPFC_LINK_DOWN; 534 535 /* Only process IOCBs on ELS ring till hba_state is READY */ 536 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 537 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 538 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 539 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 540 541 /* Post receive buffers for desired rings */ 542 if (phba->sli_rev != 3) 543 lpfc_post_rcv_buf(phba); 544 545 /* 546 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 547 */ 548 if (phba->intr_type == MSIX) { 549 rc = lpfc_config_msi(phba, pmb); 550 if (rc) { 551 mempool_free(pmb, phba->mbox_mem_pool); 552 return -EIO; 553 } 554 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 555 if (rc != MBX_SUCCESS) { 556 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 557 "0352 Config MSI mailbox command " 558 "failed, mbxCmd x%x, mbxStatus x%x\n", 559 pmb->u.mb.mbxCommand, 560 pmb->u.mb.mbxStatus); 561 mempool_free(pmb, phba->mbox_mem_pool); 562 return -EIO; 563 } 564 } 565 566 spin_lock_irq(&phba->hbalock); 567 /* Initialize ERATT handling flag */ 568 phba->hba_flag &= ~HBA_ERATT_HANDLED; 569 570 /* Enable appropriate host interrupts */ 571 if (lpfc_readl(phba->HCregaddr, &status)) { 572 spin_unlock_irq(&phba->hbalock); 573 return -EIO; 574 } 575 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 576 if (psli->num_rings > 0) 577 status |= HC_R0INT_ENA; 578 if (psli->num_rings > 1) 579 status |= HC_R1INT_ENA; 580 if (psli->num_rings > 2) 581 status |= HC_R2INT_ENA; 582 if (psli->num_rings > 3) 583 status |= HC_R3INT_ENA; 584 585 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 586 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 587 status &= ~(HC_R0INT_ENA); 588 589 writel(status, phba->HCregaddr); 590 readl(phba->HCregaddr); /* flush */ 591 spin_unlock_irq(&phba->hbalock); 592 593 /* Set up ring-0 (ELS) timer */ 594 timeout = phba->fc_ratov * 2; 595 mod_timer(&vport->els_tmofunc, 596 jiffies + msecs_to_jiffies(1000 * timeout)); 597 /* Set up heart beat (HB) timer */ 598 mod_timer(&phba->hb_tmofunc, 599 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 600 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 601 phba->last_completion_time = jiffies; 602 /* Set up error attention (ERATT) polling timer */ 603 mod_timer(&phba->eratt_poll, 604 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 605 606 if (phba->hba_flag & LINK_DISABLED) { 607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 608 "2598 Adapter Link is disabled.\n"); 609 lpfc_down_link(phba, pmb); 610 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 611 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 612 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 613 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 614 "2599 Adapter failed to issue DOWN_LINK" 615 " mbox command rc 0x%x\n", rc); 616 617 mempool_free(pmb, phba->mbox_mem_pool); 618 return -EIO; 619 } 620 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 621 mempool_free(pmb, phba->mbox_mem_pool); 622 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 623 if (rc) 624 return rc; 625 } 626 /* MBOX buffer will be freed in mbox compl */ 627 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 628 if (!pmb) { 629 phba->link_state = LPFC_HBA_ERROR; 630 return -ENOMEM; 631 } 632 633 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 634 pmb->mbox_cmpl = lpfc_config_async_cmpl; 635 pmb->vport = phba->pport; 636 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 637 638 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 639 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 640 "0456 Adapter failed to issue " 641 "ASYNCEVT_ENABLE mbox status x%x\n", 642 rc); 643 mempool_free(pmb, phba->mbox_mem_pool); 644 } 645 646 /* Get Option rom version */ 647 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 648 if (!pmb) { 649 phba->link_state = LPFC_HBA_ERROR; 650 return -ENOMEM; 651 } 652 653 lpfc_dump_wakeup_param(phba, pmb); 654 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 655 pmb->vport = phba->pport; 656 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 657 658 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 659 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 660 "0435 Adapter failed " 661 "to get Option ROM version status x%x\n", rc); 662 mempool_free(pmb, phba->mbox_mem_pool); 663 } 664 665 return 0; 666 } 667 668 /** 669 * lpfc_sli4_refresh_params - update driver copy of params. 670 * @phba: Pointer to HBA context object. 671 * 672 * This is called to refresh driver copy of dynamic fields from the 673 * common_get_sli4_parameters descriptor. 674 **/ 675 int 676 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 677 { 678 LPFC_MBOXQ_t *mboxq; 679 struct lpfc_mqe *mqe; 680 struct lpfc_sli4_parameters *mbx_sli4_parameters; 681 int length, rc; 682 683 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 684 if (!mboxq) 685 return -ENOMEM; 686 687 mqe = &mboxq->u.mqe; 688 /* Read the port's SLI4 Config Parameters */ 689 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 690 sizeof(struct lpfc_sli4_cfg_mhdr)); 691 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 692 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 693 length, LPFC_SLI4_MBX_EMBED); 694 695 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 696 if (unlikely(rc)) { 697 mempool_free(mboxq, phba->mbox_mem_pool); 698 return rc; 699 } 700 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 701 phba->sli4_hba.pc_sli4_params.mi_cap = 702 bf_get(cfg_mi_ver, mbx_sli4_parameters); 703 704 /* Are we forcing MI off via module parameter? */ 705 if (phba->cfg_enable_mi) 706 phba->sli4_hba.pc_sli4_params.mi_ver = 707 bf_get(cfg_mi_ver, mbx_sli4_parameters); 708 else 709 phba->sli4_hba.pc_sli4_params.mi_ver = 0; 710 711 phba->sli4_hba.pc_sli4_params.cmf = 712 bf_get(cfg_cmf, mbx_sli4_parameters); 713 phba->sli4_hba.pc_sli4_params.pls = 714 bf_get(cfg_pvl, mbx_sli4_parameters); 715 716 mempool_free(mboxq, phba->mbox_mem_pool); 717 return rc; 718 } 719 720 /** 721 * lpfc_hba_init_link - Initialize the FC link 722 * @phba: pointer to lpfc hba data structure. 723 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 724 * 725 * This routine will issue the INIT_LINK mailbox command call. 726 * It is available to other drivers through the lpfc_hba data 727 * structure for use as a delayed link up mechanism with the 728 * module parameter lpfc_suppress_link_up. 729 * 730 * Return code 731 * 0 - success 732 * Any other value - error 733 **/ 734 static int 735 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 736 { 737 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 738 } 739 740 /** 741 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 742 * @phba: pointer to lpfc hba data structure. 743 * @fc_topology: desired fc topology. 744 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 745 * 746 * This routine will issue the INIT_LINK mailbox command call. 747 * It is available to other drivers through the lpfc_hba data 748 * structure for use as a delayed link up mechanism with the 749 * module parameter lpfc_suppress_link_up. 750 * 751 * Return code 752 * 0 - success 753 * Any other value - error 754 **/ 755 int 756 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 757 uint32_t flag) 758 { 759 struct lpfc_vport *vport = phba->pport; 760 LPFC_MBOXQ_t *pmb; 761 MAILBOX_t *mb; 762 int rc; 763 764 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 765 if (!pmb) { 766 phba->link_state = LPFC_HBA_ERROR; 767 return -ENOMEM; 768 } 769 mb = &pmb->u.mb; 770 pmb->vport = vport; 771 772 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 773 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 774 !(phba->lmt & LMT_1Gb)) || 775 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 776 !(phba->lmt & LMT_2Gb)) || 777 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 778 !(phba->lmt & LMT_4Gb)) || 779 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 780 !(phba->lmt & LMT_8Gb)) || 781 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 782 !(phba->lmt & LMT_10Gb)) || 783 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 784 !(phba->lmt & LMT_16Gb)) || 785 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 786 !(phba->lmt & LMT_32Gb)) || 787 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 788 !(phba->lmt & LMT_64Gb))) { 789 /* Reset link speed to auto */ 790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 791 "1302 Invalid speed for this board:%d " 792 "Reset link speed to auto.\n", 793 phba->cfg_link_speed); 794 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 795 } 796 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 798 if (phba->sli_rev < LPFC_SLI_REV4) 799 lpfc_set_loopback_flag(phba); 800 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 801 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 803 "0498 Adapter failed to init, mbxCmd x%x " 804 "INIT_LINK, mbxStatus x%x\n", 805 mb->mbxCommand, mb->mbxStatus); 806 if (phba->sli_rev <= LPFC_SLI_REV3) { 807 /* Clear all interrupt enable conditions */ 808 writel(0, phba->HCregaddr); 809 readl(phba->HCregaddr); /* flush */ 810 /* Clear all pending interrupts */ 811 writel(0xffffffff, phba->HAregaddr); 812 readl(phba->HAregaddr); /* flush */ 813 } 814 phba->link_state = LPFC_HBA_ERROR; 815 if (rc != MBX_BUSY || flag == MBX_POLL) 816 mempool_free(pmb, phba->mbox_mem_pool); 817 return -EIO; 818 } 819 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 820 if (flag == MBX_POLL) 821 mempool_free(pmb, phba->mbox_mem_pool); 822 823 return 0; 824 } 825 826 /** 827 * lpfc_hba_down_link - this routine downs the FC link 828 * @phba: pointer to lpfc hba data structure. 829 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 830 * 831 * This routine will issue the DOWN_LINK mailbox command call. 832 * It is available to other drivers through the lpfc_hba data 833 * structure for use to stop the link. 834 * 835 * Return code 836 * 0 - success 837 * Any other value - error 838 **/ 839 static int 840 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 841 { 842 LPFC_MBOXQ_t *pmb; 843 int rc; 844 845 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 846 if (!pmb) { 847 phba->link_state = LPFC_HBA_ERROR; 848 return -ENOMEM; 849 } 850 851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 852 "0491 Adapter Link is disabled.\n"); 853 lpfc_down_link(phba, pmb); 854 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 855 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 856 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 858 "2522 Adapter failed to issue DOWN_LINK" 859 " mbox command rc 0x%x\n", rc); 860 861 mempool_free(pmb, phba->mbox_mem_pool); 862 return -EIO; 863 } 864 if (flag == MBX_POLL) 865 mempool_free(pmb, phba->mbox_mem_pool); 866 867 return 0; 868 } 869 870 /** 871 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 872 * @phba: pointer to lpfc HBA data structure. 873 * 874 * This routine will do LPFC uninitialization before the HBA is reset when 875 * bringing down the SLI Layer. 876 * 877 * Return codes 878 * 0 - success. 879 * Any other value - error. 880 **/ 881 int 882 lpfc_hba_down_prep(struct lpfc_hba *phba) 883 { 884 struct lpfc_vport **vports; 885 int i; 886 887 if (phba->sli_rev <= LPFC_SLI_REV3) { 888 /* Disable interrupts */ 889 writel(0, phba->HCregaddr); 890 readl(phba->HCregaddr); /* flush */ 891 } 892 893 if (phba->pport->load_flag & FC_UNLOADING) 894 lpfc_cleanup_discovery_resources(phba->pport); 895 else { 896 vports = lpfc_create_vport_work_array(phba); 897 if (vports != NULL) 898 for (i = 0; i <= phba->max_vports && 899 vports[i] != NULL; i++) 900 lpfc_cleanup_discovery_resources(vports[i]); 901 lpfc_destroy_vport_work_array(phba, vports); 902 } 903 return 0; 904 } 905 906 /** 907 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 908 * rspiocb which got deferred 909 * 910 * @phba: pointer to lpfc HBA data structure. 911 * 912 * This routine will cleanup completed slow path events after HBA is reset 913 * when bringing down the SLI Layer. 914 * 915 * 916 * Return codes 917 * void. 918 **/ 919 static void 920 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 921 { 922 struct lpfc_iocbq *rspiocbq; 923 struct hbq_dmabuf *dmabuf; 924 struct lpfc_cq_event *cq_event; 925 926 spin_lock_irq(&phba->hbalock); 927 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 928 spin_unlock_irq(&phba->hbalock); 929 930 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 931 /* Get the response iocb from the head of work queue */ 932 spin_lock_irq(&phba->hbalock); 933 list_remove_head(&phba->sli4_hba.sp_queue_event, 934 cq_event, struct lpfc_cq_event, list); 935 spin_unlock_irq(&phba->hbalock); 936 937 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 938 case CQE_CODE_COMPL_WQE: 939 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 940 cq_event); 941 lpfc_sli_release_iocbq(phba, rspiocbq); 942 break; 943 case CQE_CODE_RECEIVE: 944 case CQE_CODE_RECEIVE_V1: 945 dmabuf = container_of(cq_event, struct hbq_dmabuf, 946 cq_event); 947 lpfc_in_buf_free(phba, &dmabuf->dbuf); 948 } 949 } 950 } 951 952 /** 953 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 954 * @phba: pointer to lpfc HBA data structure. 955 * 956 * This routine will cleanup posted ELS buffers after the HBA is reset 957 * when bringing down the SLI Layer. 958 * 959 * 960 * Return codes 961 * void. 962 **/ 963 static void 964 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 965 { 966 struct lpfc_sli *psli = &phba->sli; 967 struct lpfc_sli_ring *pring; 968 struct lpfc_dmabuf *mp, *next_mp; 969 LIST_HEAD(buflist); 970 int count; 971 972 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 973 lpfc_sli_hbqbuf_free_all(phba); 974 else { 975 /* Cleanup preposted buffers on the ELS ring */ 976 pring = &psli->sli3_ring[LPFC_ELS_RING]; 977 spin_lock_irq(&phba->hbalock); 978 list_splice_init(&pring->postbufq, &buflist); 979 spin_unlock_irq(&phba->hbalock); 980 981 count = 0; 982 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 983 list_del(&mp->list); 984 count++; 985 lpfc_mbuf_free(phba, mp->virt, mp->phys); 986 kfree(mp); 987 } 988 989 spin_lock_irq(&phba->hbalock); 990 pring->postbufq_cnt -= count; 991 spin_unlock_irq(&phba->hbalock); 992 } 993 } 994 995 /** 996 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 997 * @phba: pointer to lpfc HBA data structure. 998 * 999 * This routine will cleanup the txcmplq after the HBA is reset when bringing 1000 * down the SLI Layer. 1001 * 1002 * Return codes 1003 * void 1004 **/ 1005 static void 1006 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 1007 { 1008 struct lpfc_sli *psli = &phba->sli; 1009 struct lpfc_queue *qp = NULL; 1010 struct lpfc_sli_ring *pring; 1011 LIST_HEAD(completions); 1012 int i; 1013 struct lpfc_iocbq *piocb, *next_iocb; 1014 1015 if (phba->sli_rev != LPFC_SLI_REV4) { 1016 for (i = 0; i < psli->num_rings; i++) { 1017 pring = &psli->sli3_ring[i]; 1018 spin_lock_irq(&phba->hbalock); 1019 /* At this point in time the HBA is either reset or DOA 1020 * Nothing should be on txcmplq as it will 1021 * NEVER complete. 1022 */ 1023 list_splice_init(&pring->txcmplq, &completions); 1024 pring->txcmplq_cnt = 0; 1025 spin_unlock_irq(&phba->hbalock); 1026 1027 lpfc_sli_abort_iocb_ring(phba, pring); 1028 } 1029 /* Cancel all the IOCBs from the completions list */ 1030 lpfc_sli_cancel_iocbs(phba, &completions, 1031 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1032 return; 1033 } 1034 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1035 pring = qp->pring; 1036 if (!pring) 1037 continue; 1038 spin_lock_irq(&pring->ring_lock); 1039 list_for_each_entry_safe(piocb, next_iocb, 1040 &pring->txcmplq, list) 1041 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 1042 list_splice_init(&pring->txcmplq, &completions); 1043 pring->txcmplq_cnt = 0; 1044 spin_unlock_irq(&pring->ring_lock); 1045 lpfc_sli_abort_iocb_ring(phba, pring); 1046 } 1047 /* Cancel all the IOCBs from the completions list */ 1048 lpfc_sli_cancel_iocbs(phba, &completions, 1049 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1050 } 1051 1052 /** 1053 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1054 * @phba: pointer to lpfc HBA data structure. 1055 * 1056 * This routine will do uninitialization after the HBA is reset when bring 1057 * down the SLI Layer. 1058 * 1059 * Return codes 1060 * 0 - success. 1061 * Any other value - error. 1062 **/ 1063 static int 1064 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1065 { 1066 lpfc_hba_free_post_buf(phba); 1067 lpfc_hba_clean_txcmplq(phba); 1068 return 0; 1069 } 1070 1071 /** 1072 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1073 * @phba: pointer to lpfc HBA data structure. 1074 * 1075 * This routine will do uninitialization after the HBA is reset when bring 1076 * down the SLI Layer. 1077 * 1078 * Return codes 1079 * 0 - success. 1080 * Any other value - error. 1081 **/ 1082 static int 1083 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1084 { 1085 struct lpfc_io_buf *psb, *psb_next; 1086 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1087 struct lpfc_sli4_hdw_queue *qp; 1088 LIST_HEAD(aborts); 1089 LIST_HEAD(nvme_aborts); 1090 LIST_HEAD(nvmet_aborts); 1091 struct lpfc_sglq *sglq_entry = NULL; 1092 int cnt, idx; 1093 1094 1095 lpfc_sli_hbqbuf_free_all(phba); 1096 lpfc_hba_clean_txcmplq(phba); 1097 1098 /* At this point in time the HBA is either reset or DOA. Either 1099 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1100 * on the lpfc_els_sgl_list so that it can either be freed if the 1101 * driver is unloading or reposted if the driver is restarting 1102 * the port. 1103 */ 1104 1105 /* sgl_list_lock required because worker thread uses this 1106 * list. 1107 */ 1108 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1109 list_for_each_entry(sglq_entry, 1110 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1111 sglq_entry->state = SGL_FREED; 1112 1113 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1114 &phba->sli4_hba.lpfc_els_sgl_list); 1115 1116 1117 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1118 1119 /* abts_xxxx_buf_list_lock required because worker thread uses this 1120 * list. 1121 */ 1122 spin_lock_irq(&phba->hbalock); 1123 cnt = 0; 1124 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1125 qp = &phba->sli4_hba.hdwq[idx]; 1126 1127 spin_lock(&qp->abts_io_buf_list_lock); 1128 list_splice_init(&qp->lpfc_abts_io_buf_list, 1129 &aborts); 1130 1131 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1132 psb->pCmd = NULL; 1133 psb->status = IOSTAT_SUCCESS; 1134 cnt++; 1135 } 1136 spin_lock(&qp->io_buf_list_put_lock); 1137 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1138 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1139 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1140 qp->abts_scsi_io_bufs = 0; 1141 qp->abts_nvme_io_bufs = 0; 1142 spin_unlock(&qp->io_buf_list_put_lock); 1143 spin_unlock(&qp->abts_io_buf_list_lock); 1144 } 1145 spin_unlock_irq(&phba->hbalock); 1146 1147 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1148 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1149 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1150 &nvmet_aborts); 1151 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1152 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1153 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1154 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1155 } 1156 } 1157 1158 lpfc_sli4_free_sp_events(phba); 1159 return cnt; 1160 } 1161 1162 /** 1163 * lpfc_hba_down_post - Wrapper func for hba down post routine 1164 * @phba: pointer to lpfc HBA data structure. 1165 * 1166 * This routine wraps the actual SLI3 or SLI4 routine for performing 1167 * uninitialization after the HBA is reset when bring down the SLI Layer. 1168 * 1169 * Return codes 1170 * 0 - success. 1171 * Any other value - error. 1172 **/ 1173 int 1174 lpfc_hba_down_post(struct lpfc_hba *phba) 1175 { 1176 return (*phba->lpfc_hba_down_post)(phba); 1177 } 1178 1179 /** 1180 * lpfc_hb_timeout - The HBA-timer timeout handler 1181 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1182 * 1183 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1184 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1185 * work-port-events bitmap and the worker thread is notified. This timeout 1186 * event will be used by the worker thread to invoke the actual timeout 1187 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1188 * be performed in the timeout handler and the HBA timeout event bit shall 1189 * be cleared by the worker thread after it has taken the event bitmap out. 1190 **/ 1191 static void 1192 lpfc_hb_timeout(struct timer_list *t) 1193 { 1194 struct lpfc_hba *phba; 1195 uint32_t tmo_posted; 1196 unsigned long iflag; 1197 1198 phba = from_timer(phba, t, hb_tmofunc); 1199 1200 /* Check for heart beat timeout conditions */ 1201 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1202 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1203 if (!tmo_posted) 1204 phba->pport->work_port_events |= WORKER_HB_TMO; 1205 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1206 1207 /* Tell the worker thread there is work to do */ 1208 if (!tmo_posted) 1209 lpfc_worker_wake_up(phba); 1210 return; 1211 } 1212 1213 /** 1214 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1215 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1216 * 1217 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1218 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1219 * work-port-events bitmap and the worker thread is notified. This timeout 1220 * event will be used by the worker thread to invoke the actual timeout 1221 * handler routine, lpfc_rrq_handler. Any periodical operations will 1222 * be performed in the timeout handler and the RRQ timeout event bit shall 1223 * be cleared by the worker thread after it has taken the event bitmap out. 1224 **/ 1225 static void 1226 lpfc_rrq_timeout(struct timer_list *t) 1227 { 1228 struct lpfc_hba *phba; 1229 unsigned long iflag; 1230 1231 phba = from_timer(phba, t, rrq_tmr); 1232 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1233 if (!(phba->pport->load_flag & FC_UNLOADING)) 1234 phba->hba_flag |= HBA_RRQ_ACTIVE; 1235 else 1236 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1237 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1238 1239 if (!(phba->pport->load_flag & FC_UNLOADING)) 1240 lpfc_worker_wake_up(phba); 1241 } 1242 1243 /** 1244 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1245 * @phba: pointer to lpfc hba data structure. 1246 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1247 * 1248 * This is the callback function to the lpfc heart-beat mailbox command. 1249 * If configured, the lpfc driver issues the heart-beat mailbox command to 1250 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1251 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1252 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1253 * heart-beat outstanding state. Once the mailbox command comes back and 1254 * no error conditions detected, the heart-beat mailbox command timer is 1255 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1256 * state is cleared for the next heart-beat. If the timer expired with the 1257 * heart-beat outstanding state set, the driver will put the HBA offline. 1258 **/ 1259 static void 1260 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1261 { 1262 unsigned long drvr_flag; 1263 1264 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1265 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1266 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1267 1268 /* Check and reset heart-beat timer if necessary */ 1269 mempool_free(pmboxq, phba->mbox_mem_pool); 1270 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1271 !(phba->link_state == LPFC_HBA_ERROR) && 1272 !(phba->pport->load_flag & FC_UNLOADING)) 1273 mod_timer(&phba->hb_tmofunc, 1274 jiffies + 1275 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1276 return; 1277 } 1278 1279 /* 1280 * lpfc_idle_stat_delay_work - idle_stat tracking 1281 * 1282 * This routine tracks per-cq idle_stat and determines polling decisions. 1283 * 1284 * Return codes: 1285 * None 1286 **/ 1287 static void 1288 lpfc_idle_stat_delay_work(struct work_struct *work) 1289 { 1290 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1291 struct lpfc_hba, 1292 idle_stat_delay_work); 1293 struct lpfc_queue *cq; 1294 struct lpfc_sli4_hdw_queue *hdwq; 1295 struct lpfc_idle_stat *idle_stat; 1296 u32 i, idle_percent; 1297 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1298 1299 if (phba->pport->load_flag & FC_UNLOADING) 1300 return; 1301 1302 if (phba->link_state == LPFC_HBA_ERROR || 1303 phba->pport->fc_flag & FC_OFFLINE_MODE || 1304 phba->cmf_active_mode != LPFC_CFG_OFF) 1305 goto requeue; 1306 1307 for_each_present_cpu(i) { 1308 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1309 cq = hdwq->io_cq; 1310 1311 /* Skip if we've already handled this cq's primary CPU */ 1312 if (cq->chann != i) 1313 continue; 1314 1315 idle_stat = &phba->sli4_hba.idle_stat[i]; 1316 1317 /* get_cpu_idle_time returns values as running counters. Thus, 1318 * to know the amount for this period, the prior counter values 1319 * need to be subtracted from the current counter values. 1320 * From there, the idle time stat can be calculated as a 1321 * percentage of 100 - the sum of the other consumption times. 1322 */ 1323 wall_idle = get_cpu_idle_time(i, &wall, 1); 1324 diff_idle = wall_idle - idle_stat->prev_idle; 1325 diff_wall = wall - idle_stat->prev_wall; 1326 1327 if (diff_wall <= diff_idle) 1328 busy_time = 0; 1329 else 1330 busy_time = diff_wall - diff_idle; 1331 1332 idle_percent = div64_u64(100 * busy_time, diff_wall); 1333 idle_percent = 100 - idle_percent; 1334 1335 if (idle_percent < 15) 1336 cq->poll_mode = LPFC_QUEUE_WORK; 1337 else 1338 cq->poll_mode = LPFC_IRQ_POLL; 1339 1340 idle_stat->prev_idle = wall_idle; 1341 idle_stat->prev_wall = wall; 1342 } 1343 1344 requeue: 1345 schedule_delayed_work(&phba->idle_stat_delay_work, 1346 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1347 } 1348 1349 static void 1350 lpfc_hb_eq_delay_work(struct work_struct *work) 1351 { 1352 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1353 struct lpfc_hba, eq_delay_work); 1354 struct lpfc_eq_intr_info *eqi, *eqi_new; 1355 struct lpfc_queue *eq, *eq_next; 1356 unsigned char *ena_delay = NULL; 1357 uint32_t usdelay; 1358 int i; 1359 1360 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1361 return; 1362 1363 if (phba->link_state == LPFC_HBA_ERROR || 1364 phba->pport->fc_flag & FC_OFFLINE_MODE) 1365 goto requeue; 1366 1367 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1368 GFP_KERNEL); 1369 if (!ena_delay) 1370 goto requeue; 1371 1372 for (i = 0; i < phba->cfg_irq_chann; i++) { 1373 /* Get the EQ corresponding to the IRQ vector */ 1374 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1375 if (!eq) 1376 continue; 1377 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1378 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1379 ena_delay[eq->last_cpu] = 1; 1380 } 1381 } 1382 1383 for_each_present_cpu(i) { 1384 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1385 if (ena_delay[i]) { 1386 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1387 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1388 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1389 } else { 1390 usdelay = 0; 1391 } 1392 1393 eqi->icnt = 0; 1394 1395 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1396 if (unlikely(eq->last_cpu != i)) { 1397 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1398 eq->last_cpu); 1399 list_move_tail(&eq->cpu_list, &eqi_new->list); 1400 continue; 1401 } 1402 if (usdelay != eq->q_mode) 1403 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1404 usdelay); 1405 } 1406 } 1407 1408 kfree(ena_delay); 1409 1410 requeue: 1411 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1412 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1413 } 1414 1415 /** 1416 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1417 * @phba: pointer to lpfc hba data structure. 1418 * 1419 * For each heartbeat, this routine does some heuristic methods to adjust 1420 * XRI distribution. The goal is to fully utilize free XRIs. 1421 **/ 1422 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1423 { 1424 u32 i; 1425 u32 hwq_count; 1426 1427 hwq_count = phba->cfg_hdw_queue; 1428 for (i = 0; i < hwq_count; i++) { 1429 /* Adjust XRIs in private pool */ 1430 lpfc_adjust_pvt_pool_count(phba, i); 1431 1432 /* Adjust high watermark */ 1433 lpfc_adjust_high_watermark(phba, i); 1434 1435 #ifdef LPFC_MXP_STAT 1436 /* Snapshot pbl, pvt and busy count */ 1437 lpfc_snapshot_mxp(phba, i); 1438 #endif 1439 } 1440 } 1441 1442 /** 1443 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1444 * @phba: pointer to lpfc hba data structure. 1445 * 1446 * If a HB mbox is not already in progrees, this routine will allocate 1447 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1448 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1449 **/ 1450 int 1451 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1452 { 1453 LPFC_MBOXQ_t *pmboxq; 1454 int retval; 1455 1456 /* Is a Heartbeat mbox already in progress */ 1457 if (phba->hba_flag & HBA_HBEAT_INP) 1458 return 0; 1459 1460 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1461 if (!pmboxq) 1462 return -ENOMEM; 1463 1464 lpfc_heart_beat(phba, pmboxq); 1465 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1466 pmboxq->vport = phba->pport; 1467 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1468 1469 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1470 mempool_free(pmboxq, phba->mbox_mem_pool); 1471 return -ENXIO; 1472 } 1473 phba->hba_flag |= HBA_HBEAT_INP; 1474 1475 return 0; 1476 } 1477 1478 /** 1479 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1480 * @phba: pointer to lpfc hba data structure. 1481 * 1482 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1483 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1484 * of the value of lpfc_enable_hba_heartbeat. 1485 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1486 * try to issue a MBX_HEARTBEAT mbox command. 1487 **/ 1488 void 1489 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1490 { 1491 if (phba->cfg_enable_hba_heartbeat) 1492 return; 1493 phba->hba_flag |= HBA_HBEAT_TMO; 1494 } 1495 1496 /** 1497 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1498 * @phba: pointer to lpfc hba data structure. 1499 * 1500 * This is the actual HBA-timer timeout handler to be invoked by the worker 1501 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1502 * handler performs any periodic operations needed for the device. If such 1503 * periodic event has already been attended to either in the interrupt handler 1504 * or by processing slow-ring or fast-ring events within the HBA-timer 1505 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1506 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1507 * is configured and there is no heart-beat mailbox command outstanding, a 1508 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1509 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1510 * to offline. 1511 **/ 1512 void 1513 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1514 { 1515 struct lpfc_vport **vports; 1516 struct lpfc_dmabuf *buf_ptr; 1517 int retval = 0; 1518 int i, tmo; 1519 struct lpfc_sli *psli = &phba->sli; 1520 LIST_HEAD(completions); 1521 1522 if (phba->cfg_xri_rebalancing) { 1523 /* Multi-XRI pools handler */ 1524 lpfc_hb_mxp_handler(phba); 1525 } 1526 1527 vports = lpfc_create_vport_work_array(phba); 1528 if (vports != NULL) 1529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1530 lpfc_rcv_seq_check_edtov(vports[i]); 1531 lpfc_fdmi_change_check(vports[i]); 1532 } 1533 lpfc_destroy_vport_work_array(phba, vports); 1534 1535 if ((phba->link_state == LPFC_HBA_ERROR) || 1536 (phba->pport->load_flag & FC_UNLOADING) || 1537 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1538 return; 1539 1540 if (phba->elsbuf_cnt && 1541 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1542 spin_lock_irq(&phba->hbalock); 1543 list_splice_init(&phba->elsbuf, &completions); 1544 phba->elsbuf_cnt = 0; 1545 phba->elsbuf_prev_cnt = 0; 1546 spin_unlock_irq(&phba->hbalock); 1547 1548 while (!list_empty(&completions)) { 1549 list_remove_head(&completions, buf_ptr, 1550 struct lpfc_dmabuf, list); 1551 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1552 kfree(buf_ptr); 1553 } 1554 } 1555 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1556 1557 /* If there is no heart beat outstanding, issue a heartbeat command */ 1558 if (phba->cfg_enable_hba_heartbeat) { 1559 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1560 spin_lock_irq(&phba->pport->work_port_lock); 1561 if (time_after(phba->last_completion_time + 1562 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1563 jiffies)) { 1564 spin_unlock_irq(&phba->pport->work_port_lock); 1565 if (phba->hba_flag & HBA_HBEAT_INP) 1566 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1567 else 1568 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1569 goto out; 1570 } 1571 spin_unlock_irq(&phba->pport->work_port_lock); 1572 1573 /* Check if a MBX_HEARTBEAT is already in progress */ 1574 if (phba->hba_flag & HBA_HBEAT_INP) { 1575 /* 1576 * If heart beat timeout called with HBA_HBEAT_INP set 1577 * we need to give the hb mailbox cmd a chance to 1578 * complete or TMO. 1579 */ 1580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1581 "0459 Adapter heartbeat still outstanding: " 1582 "last compl time was %d ms.\n", 1583 jiffies_to_msecs(jiffies 1584 - phba->last_completion_time)); 1585 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1586 } else { 1587 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1588 (list_empty(&psli->mboxq))) { 1589 1590 retval = lpfc_issue_hb_mbox(phba); 1591 if (retval) { 1592 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1593 goto out; 1594 } 1595 phba->skipped_hb = 0; 1596 } else if (time_before_eq(phba->last_completion_time, 1597 phba->skipped_hb)) { 1598 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1599 "2857 Last completion time not " 1600 " updated in %d ms\n", 1601 jiffies_to_msecs(jiffies 1602 - phba->last_completion_time)); 1603 } else 1604 phba->skipped_hb = jiffies; 1605 1606 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1607 goto out; 1608 } 1609 } else { 1610 /* Check to see if we want to force a MBX_HEARTBEAT */ 1611 if (phba->hba_flag & HBA_HBEAT_TMO) { 1612 retval = lpfc_issue_hb_mbox(phba); 1613 if (retval) 1614 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1615 else 1616 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1617 goto out; 1618 } 1619 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1620 } 1621 out: 1622 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1623 } 1624 1625 /** 1626 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1627 * @phba: pointer to lpfc hba data structure. 1628 * 1629 * This routine is called to bring the HBA offline when HBA hardware error 1630 * other than Port Error 6 has been detected. 1631 **/ 1632 static void 1633 lpfc_offline_eratt(struct lpfc_hba *phba) 1634 { 1635 struct lpfc_sli *psli = &phba->sli; 1636 1637 spin_lock_irq(&phba->hbalock); 1638 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1639 spin_unlock_irq(&phba->hbalock); 1640 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1641 1642 lpfc_offline(phba); 1643 lpfc_reset_barrier(phba); 1644 spin_lock_irq(&phba->hbalock); 1645 lpfc_sli_brdreset(phba); 1646 spin_unlock_irq(&phba->hbalock); 1647 lpfc_hba_down_post(phba); 1648 lpfc_sli_brdready(phba, HS_MBRDY); 1649 lpfc_unblock_mgmt_io(phba); 1650 phba->link_state = LPFC_HBA_ERROR; 1651 return; 1652 } 1653 1654 /** 1655 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1656 * @phba: pointer to lpfc hba data structure. 1657 * 1658 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1659 * other than Port Error 6 has been detected. 1660 **/ 1661 void 1662 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1663 { 1664 spin_lock_irq(&phba->hbalock); 1665 if (phba->link_state == LPFC_HBA_ERROR && 1666 test_bit(HBA_PCI_ERR, &phba->bit_flags)) { 1667 spin_unlock_irq(&phba->hbalock); 1668 return; 1669 } 1670 phba->link_state = LPFC_HBA_ERROR; 1671 spin_unlock_irq(&phba->hbalock); 1672 1673 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1674 lpfc_sli_flush_io_rings(phba); 1675 lpfc_offline(phba); 1676 lpfc_hba_down_post(phba); 1677 lpfc_unblock_mgmt_io(phba); 1678 } 1679 1680 /** 1681 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1682 * @phba: pointer to lpfc hba data structure. 1683 * 1684 * This routine is invoked to handle the deferred HBA hardware error 1685 * conditions. This type of error is indicated by HBA by setting ER1 1686 * and another ER bit in the host status register. The driver will 1687 * wait until the ER1 bit clears before handling the error condition. 1688 **/ 1689 static void 1690 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1691 { 1692 uint32_t old_host_status = phba->work_hs; 1693 struct lpfc_sli *psli = &phba->sli; 1694 1695 /* If the pci channel is offline, ignore possible errors, 1696 * since we cannot communicate with the pci card anyway. 1697 */ 1698 if (pci_channel_offline(phba->pcidev)) { 1699 spin_lock_irq(&phba->hbalock); 1700 phba->hba_flag &= ~DEFER_ERATT; 1701 spin_unlock_irq(&phba->hbalock); 1702 return; 1703 } 1704 1705 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1706 "0479 Deferred Adapter Hardware Error " 1707 "Data: x%x x%x x%x\n", 1708 phba->work_hs, phba->work_status[0], 1709 phba->work_status[1]); 1710 1711 spin_lock_irq(&phba->hbalock); 1712 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1713 spin_unlock_irq(&phba->hbalock); 1714 1715 1716 /* 1717 * Firmware stops when it triggred erratt. That could cause the I/Os 1718 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1719 * SCSI layer retry it after re-establishing link. 1720 */ 1721 lpfc_sli_abort_fcp_rings(phba); 1722 1723 /* 1724 * There was a firmware error. Take the hba offline and then 1725 * attempt to restart it. 1726 */ 1727 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1728 lpfc_offline(phba); 1729 1730 /* Wait for the ER1 bit to clear.*/ 1731 while (phba->work_hs & HS_FFER1) { 1732 msleep(100); 1733 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1734 phba->work_hs = UNPLUG_ERR ; 1735 break; 1736 } 1737 /* If driver is unloading let the worker thread continue */ 1738 if (phba->pport->load_flag & FC_UNLOADING) { 1739 phba->work_hs = 0; 1740 break; 1741 } 1742 } 1743 1744 /* 1745 * This is to ptrotect against a race condition in which 1746 * first write to the host attention register clear the 1747 * host status register. 1748 */ 1749 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1750 phba->work_hs = old_host_status & ~HS_FFER1; 1751 1752 spin_lock_irq(&phba->hbalock); 1753 phba->hba_flag &= ~DEFER_ERATT; 1754 spin_unlock_irq(&phba->hbalock); 1755 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1756 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1757 } 1758 1759 static void 1760 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1761 { 1762 struct lpfc_board_event_header board_event; 1763 struct Scsi_Host *shost; 1764 1765 board_event.event_type = FC_REG_BOARD_EVENT; 1766 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1767 shost = lpfc_shost_from_vport(phba->pport); 1768 fc_host_post_vendor_event(shost, fc_get_event_number(), 1769 sizeof(board_event), 1770 (char *) &board_event, 1771 LPFC_NL_VENDOR_ID); 1772 } 1773 1774 /** 1775 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1776 * @phba: pointer to lpfc hba data structure. 1777 * 1778 * This routine is invoked to handle the following HBA hardware error 1779 * conditions: 1780 * 1 - HBA error attention interrupt 1781 * 2 - DMA ring index out of range 1782 * 3 - Mailbox command came back as unknown 1783 **/ 1784 static void 1785 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1786 { 1787 struct lpfc_vport *vport = phba->pport; 1788 struct lpfc_sli *psli = &phba->sli; 1789 uint32_t event_data; 1790 unsigned long temperature; 1791 struct temp_event temp_event_data; 1792 struct Scsi_Host *shost; 1793 1794 /* If the pci channel is offline, ignore possible errors, 1795 * since we cannot communicate with the pci card anyway. 1796 */ 1797 if (pci_channel_offline(phba->pcidev)) { 1798 spin_lock_irq(&phba->hbalock); 1799 phba->hba_flag &= ~DEFER_ERATT; 1800 spin_unlock_irq(&phba->hbalock); 1801 return; 1802 } 1803 1804 /* If resets are disabled then leave the HBA alone and return */ 1805 if (!phba->cfg_enable_hba_reset) 1806 return; 1807 1808 /* Send an internal error event to mgmt application */ 1809 lpfc_board_errevt_to_mgmt(phba); 1810 1811 if (phba->hba_flag & DEFER_ERATT) 1812 lpfc_handle_deferred_eratt(phba); 1813 1814 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1815 if (phba->work_hs & HS_FFER6) 1816 /* Re-establishing Link */ 1817 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1818 "1301 Re-establishing Link " 1819 "Data: x%x x%x x%x\n", 1820 phba->work_hs, phba->work_status[0], 1821 phba->work_status[1]); 1822 if (phba->work_hs & HS_FFER8) 1823 /* Device Zeroization */ 1824 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1825 "2861 Host Authentication device " 1826 "zeroization Data:x%x x%x x%x\n", 1827 phba->work_hs, phba->work_status[0], 1828 phba->work_status[1]); 1829 1830 spin_lock_irq(&phba->hbalock); 1831 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1832 spin_unlock_irq(&phba->hbalock); 1833 1834 /* 1835 * Firmware stops when it triggled erratt with HS_FFER6. 1836 * That could cause the I/Os dropped by the firmware. 1837 * Error iocb (I/O) on txcmplq and let the SCSI layer 1838 * retry it after re-establishing link. 1839 */ 1840 lpfc_sli_abort_fcp_rings(phba); 1841 1842 /* 1843 * There was a firmware error. Take the hba offline and then 1844 * attempt to restart it. 1845 */ 1846 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1847 lpfc_offline(phba); 1848 lpfc_sli_brdrestart(phba); 1849 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1850 lpfc_unblock_mgmt_io(phba); 1851 return; 1852 } 1853 lpfc_unblock_mgmt_io(phba); 1854 } else if (phba->work_hs & HS_CRIT_TEMP) { 1855 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1856 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1857 temp_event_data.event_code = LPFC_CRIT_TEMP; 1858 temp_event_data.data = (uint32_t)temperature; 1859 1860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1861 "0406 Adapter maximum temperature exceeded " 1862 "(%ld), taking this port offline " 1863 "Data: x%x x%x x%x\n", 1864 temperature, phba->work_hs, 1865 phba->work_status[0], phba->work_status[1]); 1866 1867 shost = lpfc_shost_from_vport(phba->pport); 1868 fc_host_post_vendor_event(shost, fc_get_event_number(), 1869 sizeof(temp_event_data), 1870 (char *) &temp_event_data, 1871 SCSI_NL_VID_TYPE_PCI 1872 | PCI_VENDOR_ID_EMULEX); 1873 1874 spin_lock_irq(&phba->hbalock); 1875 phba->over_temp_state = HBA_OVER_TEMP; 1876 spin_unlock_irq(&phba->hbalock); 1877 lpfc_offline_eratt(phba); 1878 1879 } else { 1880 /* The if clause above forces this code path when the status 1881 * failure is a value other than FFER6. Do not call the offline 1882 * twice. This is the adapter hardware error path. 1883 */ 1884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1885 "0457 Adapter Hardware Error " 1886 "Data: x%x x%x x%x\n", 1887 phba->work_hs, 1888 phba->work_status[0], phba->work_status[1]); 1889 1890 event_data = FC_REG_DUMP_EVENT; 1891 shost = lpfc_shost_from_vport(vport); 1892 fc_host_post_vendor_event(shost, fc_get_event_number(), 1893 sizeof(event_data), (char *) &event_data, 1894 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1895 1896 lpfc_offline_eratt(phba); 1897 } 1898 return; 1899 } 1900 1901 /** 1902 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1903 * @phba: pointer to lpfc hba data structure. 1904 * @mbx_action: flag for mailbox shutdown action. 1905 * @en_rn_msg: send reset/port recovery message. 1906 * This routine is invoked to perform an SLI4 port PCI function reset in 1907 * response to port status register polling attention. It waits for port 1908 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1909 * During this process, interrupt vectors are freed and later requested 1910 * for handling possible port resource change. 1911 **/ 1912 static int 1913 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1914 bool en_rn_msg) 1915 { 1916 int rc; 1917 uint32_t intr_mode; 1918 LPFC_MBOXQ_t *mboxq; 1919 1920 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1921 LPFC_SLI_INTF_IF_TYPE_2) { 1922 /* 1923 * On error status condition, driver need to wait for port 1924 * ready before performing reset. 1925 */ 1926 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1927 if (rc) 1928 return rc; 1929 } 1930 1931 /* need reset: attempt for port recovery */ 1932 if (en_rn_msg) 1933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1934 "2887 Reset Needed: Attempting Port " 1935 "Recovery...\n"); 1936 1937 /* If we are no wait, the HBA has been reset and is not 1938 * functional, thus we should clear 1939 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1940 */ 1941 if (mbx_action == LPFC_MBX_NO_WAIT) { 1942 spin_lock_irq(&phba->hbalock); 1943 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1944 if (phba->sli.mbox_active) { 1945 mboxq = phba->sli.mbox_active; 1946 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1947 __lpfc_mbox_cmpl_put(phba, mboxq); 1948 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1949 phba->sli.mbox_active = NULL; 1950 } 1951 spin_unlock_irq(&phba->hbalock); 1952 } 1953 1954 lpfc_offline_prep(phba, mbx_action); 1955 lpfc_sli_flush_io_rings(phba); 1956 lpfc_offline(phba); 1957 /* release interrupt for possible resource change */ 1958 lpfc_sli4_disable_intr(phba); 1959 rc = lpfc_sli_brdrestart(phba); 1960 if (rc) { 1961 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1962 "6309 Failed to restart board\n"); 1963 return rc; 1964 } 1965 /* request and enable interrupt */ 1966 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1967 if (intr_mode == LPFC_INTR_ERROR) { 1968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1969 "3175 Failed to enable interrupt\n"); 1970 return -EIO; 1971 } 1972 phba->intr_mode = intr_mode; 1973 rc = lpfc_online(phba); 1974 if (rc == 0) 1975 lpfc_unblock_mgmt_io(phba); 1976 1977 return rc; 1978 } 1979 1980 /** 1981 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1982 * @phba: pointer to lpfc hba data structure. 1983 * 1984 * This routine is invoked to handle the SLI4 HBA hardware error attention 1985 * conditions. 1986 **/ 1987 static void 1988 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1989 { 1990 struct lpfc_vport *vport = phba->pport; 1991 uint32_t event_data; 1992 struct Scsi_Host *shost; 1993 uint32_t if_type; 1994 struct lpfc_register portstat_reg = {0}; 1995 uint32_t reg_err1, reg_err2; 1996 uint32_t uerrlo_reg, uemasklo_reg; 1997 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1998 bool en_rn_msg = true; 1999 struct temp_event temp_event_data; 2000 struct lpfc_register portsmphr_reg; 2001 int rc, i; 2002 2003 /* If the pci channel is offline, ignore possible errors, since 2004 * we cannot communicate with the pci card anyway. 2005 */ 2006 if (pci_channel_offline(phba->pcidev)) { 2007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2008 "3166 pci channel is offline\n"); 2009 lpfc_sli_flush_io_rings(phba); 2010 return; 2011 } 2012 2013 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 2014 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2015 switch (if_type) { 2016 case LPFC_SLI_INTF_IF_TYPE_0: 2017 pci_rd_rc1 = lpfc_readl( 2018 phba->sli4_hba.u.if_type0.UERRLOregaddr, 2019 &uerrlo_reg); 2020 pci_rd_rc2 = lpfc_readl( 2021 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2022 &uemasklo_reg); 2023 /* consider PCI bus read error as pci_channel_offline */ 2024 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2025 return; 2026 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2027 lpfc_sli4_offline_eratt(phba); 2028 return; 2029 } 2030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2031 "7623 Checking UE recoverable"); 2032 2033 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2034 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2035 &portsmphr_reg.word0)) 2036 continue; 2037 2038 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2039 &portsmphr_reg); 2040 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2041 LPFC_PORT_SEM_UE_RECOVERABLE) 2042 break; 2043 /*Sleep for 1Sec, before checking SEMAPHORE */ 2044 msleep(1000); 2045 } 2046 2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2048 "4827 smphr_port_status x%x : Waited %dSec", 2049 smphr_port_status, i); 2050 2051 /* Recoverable UE, reset the HBA device */ 2052 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2053 LPFC_PORT_SEM_UE_RECOVERABLE) { 2054 for (i = 0; i < 20; i++) { 2055 msleep(1000); 2056 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2057 &portsmphr_reg.word0) && 2058 (LPFC_POST_STAGE_PORT_READY == 2059 bf_get(lpfc_port_smphr_port_status, 2060 &portsmphr_reg))) { 2061 rc = lpfc_sli4_port_sta_fn_reset(phba, 2062 LPFC_MBX_NO_WAIT, en_rn_msg); 2063 if (rc == 0) 2064 return; 2065 lpfc_printf_log(phba, KERN_ERR, 2066 LOG_TRACE_EVENT, 2067 "4215 Failed to recover UE"); 2068 break; 2069 } 2070 } 2071 } 2072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2073 "7624 Firmware not ready: Failing UE recovery," 2074 " waited %dSec", i); 2075 phba->link_state = LPFC_HBA_ERROR; 2076 break; 2077 2078 case LPFC_SLI_INTF_IF_TYPE_2: 2079 case LPFC_SLI_INTF_IF_TYPE_6: 2080 pci_rd_rc1 = lpfc_readl( 2081 phba->sli4_hba.u.if_type2.STATUSregaddr, 2082 &portstat_reg.word0); 2083 /* consider PCI bus read error as pci_channel_offline */ 2084 if (pci_rd_rc1 == -EIO) { 2085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2086 "3151 PCI bus read access failure: x%x\n", 2087 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2088 lpfc_sli4_offline_eratt(phba); 2089 return; 2090 } 2091 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2092 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2093 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2095 "2889 Port Overtemperature event, " 2096 "taking port offline Data: x%x x%x\n", 2097 reg_err1, reg_err2); 2098 2099 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2100 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2101 temp_event_data.event_code = LPFC_CRIT_TEMP; 2102 temp_event_data.data = 0xFFFFFFFF; 2103 2104 shost = lpfc_shost_from_vport(phba->pport); 2105 fc_host_post_vendor_event(shost, fc_get_event_number(), 2106 sizeof(temp_event_data), 2107 (char *)&temp_event_data, 2108 SCSI_NL_VID_TYPE_PCI 2109 | PCI_VENDOR_ID_EMULEX); 2110 2111 spin_lock_irq(&phba->hbalock); 2112 phba->over_temp_state = HBA_OVER_TEMP; 2113 spin_unlock_irq(&phba->hbalock); 2114 lpfc_sli4_offline_eratt(phba); 2115 return; 2116 } 2117 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2118 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2119 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2120 "3143 Port Down: Firmware Update " 2121 "Detected\n"); 2122 en_rn_msg = false; 2123 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2124 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2126 "3144 Port Down: Debug Dump\n"); 2127 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2128 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2129 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2130 "3145 Port Down: Provisioning\n"); 2131 2132 /* If resets are disabled then leave the HBA alone and return */ 2133 if (!phba->cfg_enable_hba_reset) 2134 return; 2135 2136 /* Check port status register for function reset */ 2137 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2138 en_rn_msg); 2139 if (rc == 0) { 2140 /* don't report event on forced debug dump */ 2141 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2142 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2143 return; 2144 else 2145 break; 2146 } 2147 /* fall through for not able to recover */ 2148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2149 "3152 Unrecoverable error\n"); 2150 phba->link_state = LPFC_HBA_ERROR; 2151 break; 2152 case LPFC_SLI_INTF_IF_TYPE_1: 2153 default: 2154 break; 2155 } 2156 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2157 "3123 Report dump event to upper layer\n"); 2158 /* Send an internal error event to mgmt application */ 2159 lpfc_board_errevt_to_mgmt(phba); 2160 2161 event_data = FC_REG_DUMP_EVENT; 2162 shost = lpfc_shost_from_vport(vport); 2163 fc_host_post_vendor_event(shost, fc_get_event_number(), 2164 sizeof(event_data), (char *) &event_data, 2165 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2166 } 2167 2168 /** 2169 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2170 * @phba: pointer to lpfc HBA data structure. 2171 * 2172 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2173 * routine from the API jump table function pointer from the lpfc_hba struct. 2174 * 2175 * Return codes 2176 * 0 - success. 2177 * Any other value - error. 2178 **/ 2179 void 2180 lpfc_handle_eratt(struct lpfc_hba *phba) 2181 { 2182 (*phba->lpfc_handle_eratt)(phba); 2183 } 2184 2185 /** 2186 * lpfc_handle_latt - The HBA link event handler 2187 * @phba: pointer to lpfc hba data structure. 2188 * 2189 * This routine is invoked from the worker thread to handle a HBA host 2190 * attention link event. SLI3 only. 2191 **/ 2192 void 2193 lpfc_handle_latt(struct lpfc_hba *phba) 2194 { 2195 struct lpfc_vport *vport = phba->pport; 2196 struct lpfc_sli *psli = &phba->sli; 2197 LPFC_MBOXQ_t *pmb; 2198 volatile uint32_t control; 2199 int rc = 0; 2200 2201 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2202 if (!pmb) { 2203 rc = 1; 2204 goto lpfc_handle_latt_err_exit; 2205 } 2206 2207 rc = lpfc_mbox_rsrc_prep(phba, pmb); 2208 if (rc) { 2209 rc = 2; 2210 mempool_free(pmb, phba->mbox_mem_pool); 2211 goto lpfc_handle_latt_err_exit; 2212 } 2213 2214 /* Cleanup any outstanding ELS commands */ 2215 lpfc_els_flush_all_cmd(phba); 2216 psli->slistat.link_event++; 2217 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 2218 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2219 pmb->vport = vport; 2220 /* Block ELS IOCBs until we have processed this mbox command */ 2221 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2222 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2223 if (rc == MBX_NOT_FINISHED) { 2224 rc = 4; 2225 goto lpfc_handle_latt_free_mbuf; 2226 } 2227 2228 /* Clear Link Attention in HA REG */ 2229 spin_lock_irq(&phba->hbalock); 2230 writel(HA_LATT, phba->HAregaddr); 2231 readl(phba->HAregaddr); /* flush */ 2232 spin_unlock_irq(&phba->hbalock); 2233 2234 return; 2235 2236 lpfc_handle_latt_free_mbuf: 2237 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2238 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 2239 lpfc_handle_latt_err_exit: 2240 /* Enable Link attention interrupts */ 2241 spin_lock_irq(&phba->hbalock); 2242 psli->sli_flag |= LPFC_PROCESS_LA; 2243 control = readl(phba->HCregaddr); 2244 control |= HC_LAINT_ENA; 2245 writel(control, phba->HCregaddr); 2246 readl(phba->HCregaddr); /* flush */ 2247 2248 /* Clear Link Attention in HA REG */ 2249 writel(HA_LATT, phba->HAregaddr); 2250 readl(phba->HAregaddr); /* flush */ 2251 spin_unlock_irq(&phba->hbalock); 2252 lpfc_linkdown(phba); 2253 phba->link_state = LPFC_HBA_ERROR; 2254 2255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2256 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2257 2258 return; 2259 } 2260 2261 static void 2262 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex) 2263 { 2264 int i, j; 2265 2266 while (length > 0) { 2267 /* Look for Serial Number */ 2268 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) { 2269 *pindex += 2; 2270 i = vpd[*pindex]; 2271 *pindex += 1; 2272 j = 0; 2273 length -= (3+i); 2274 while (i--) { 2275 phba->SerialNumber[j++] = vpd[(*pindex)++]; 2276 if (j == 31) 2277 break; 2278 } 2279 phba->SerialNumber[j] = 0; 2280 continue; 2281 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) { 2282 phba->vpd_flag |= VPD_MODEL_DESC; 2283 *pindex += 2; 2284 i = vpd[*pindex]; 2285 *pindex += 1; 2286 j = 0; 2287 length -= (3+i); 2288 while (i--) { 2289 phba->ModelDesc[j++] = vpd[(*pindex)++]; 2290 if (j == 255) 2291 break; 2292 } 2293 phba->ModelDesc[j] = 0; 2294 continue; 2295 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) { 2296 phba->vpd_flag |= VPD_MODEL_NAME; 2297 *pindex += 2; 2298 i = vpd[*pindex]; 2299 *pindex += 1; 2300 j = 0; 2301 length -= (3+i); 2302 while (i--) { 2303 phba->ModelName[j++] = vpd[(*pindex)++]; 2304 if (j == 79) 2305 break; 2306 } 2307 phba->ModelName[j] = 0; 2308 continue; 2309 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) { 2310 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2311 *pindex += 2; 2312 i = vpd[*pindex]; 2313 *pindex += 1; 2314 j = 0; 2315 length -= (3+i); 2316 while (i--) { 2317 phba->ProgramType[j++] = vpd[(*pindex)++]; 2318 if (j == 255) 2319 break; 2320 } 2321 phba->ProgramType[j] = 0; 2322 continue; 2323 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) { 2324 phba->vpd_flag |= VPD_PORT; 2325 *pindex += 2; 2326 i = vpd[*pindex]; 2327 *pindex += 1; 2328 j = 0; 2329 length -= (3 + i); 2330 while (i--) { 2331 if ((phba->sli_rev == LPFC_SLI_REV4) && 2332 (phba->sli4_hba.pport_name_sta == 2333 LPFC_SLI4_PPNAME_GET)) { 2334 j++; 2335 (*pindex)++; 2336 } else 2337 phba->Port[j++] = vpd[(*pindex)++]; 2338 if (j == 19) 2339 break; 2340 } 2341 if ((phba->sli_rev != LPFC_SLI_REV4) || 2342 (phba->sli4_hba.pport_name_sta == 2343 LPFC_SLI4_PPNAME_NON)) 2344 phba->Port[j] = 0; 2345 continue; 2346 } else { 2347 *pindex += 2; 2348 i = vpd[*pindex]; 2349 *pindex += 1; 2350 *pindex += i; 2351 length -= (3 + i); 2352 } 2353 } 2354 } 2355 2356 /** 2357 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2358 * @phba: pointer to lpfc hba data structure. 2359 * @vpd: pointer to the vital product data. 2360 * @len: length of the vital product data in bytes. 2361 * 2362 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2363 * an array of characters. In this routine, the ModelName, ProgramType, and 2364 * ModelDesc, etc. fields of the phba data structure will be populated. 2365 * 2366 * Return codes 2367 * 0 - pointer to the VPD passed in is NULL 2368 * 1 - success 2369 **/ 2370 int 2371 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2372 { 2373 uint8_t lenlo, lenhi; 2374 int Length; 2375 int i; 2376 int finished = 0; 2377 int index = 0; 2378 2379 if (!vpd) 2380 return 0; 2381 2382 /* Vital Product */ 2383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2384 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2385 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2386 (uint32_t) vpd[3]); 2387 while (!finished && (index < (len - 4))) { 2388 switch (vpd[index]) { 2389 case 0x82: 2390 case 0x91: 2391 index += 1; 2392 lenlo = vpd[index]; 2393 index += 1; 2394 lenhi = vpd[index]; 2395 index += 1; 2396 i = ((((unsigned short)lenhi) << 8) + lenlo); 2397 index += i; 2398 break; 2399 case 0x90: 2400 index += 1; 2401 lenlo = vpd[index]; 2402 index += 1; 2403 lenhi = vpd[index]; 2404 index += 1; 2405 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2406 if (Length > len - index) 2407 Length = len - index; 2408 2409 lpfc_fill_vpd(phba, vpd, Length, &index); 2410 finished = 0; 2411 break; 2412 case 0x78: 2413 finished = 1; 2414 break; 2415 default: 2416 index ++; 2417 break; 2418 } 2419 } 2420 2421 return(1); 2422 } 2423 2424 /** 2425 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description 2426 * @phba: pointer to lpfc hba data structure. 2427 * @mdp: pointer to the data structure to hold the derived model name. 2428 * @descp: pointer to the data structure to hold the derived description. 2429 * 2430 * This routine retrieves HBA's description based on its registered PCI device 2431 * ID. The @descp passed into this function points to an array of 256 chars. It 2432 * shall be returned with the model name, maximum speed, and the host bus type. 2433 * The @mdp passed into this function points to an array of 80 chars. When the 2434 * function returns, the @mdp will be filled with the model name. 2435 **/ 2436 static void 2437 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2438 { 2439 uint16_t sub_dev_id = phba->pcidev->subsystem_device; 2440 char *model = "<Unknown>"; 2441 int tbolt = 0; 2442 2443 switch (sub_dev_id) { 2444 case PCI_DEVICE_ID_CLRY_161E: 2445 model = "161E"; 2446 break; 2447 case PCI_DEVICE_ID_CLRY_162E: 2448 model = "162E"; 2449 break; 2450 case PCI_DEVICE_ID_CLRY_164E: 2451 model = "164E"; 2452 break; 2453 case PCI_DEVICE_ID_CLRY_161P: 2454 model = "161P"; 2455 break; 2456 case PCI_DEVICE_ID_CLRY_162P: 2457 model = "162P"; 2458 break; 2459 case PCI_DEVICE_ID_CLRY_164P: 2460 model = "164P"; 2461 break; 2462 case PCI_DEVICE_ID_CLRY_321E: 2463 model = "321E"; 2464 break; 2465 case PCI_DEVICE_ID_CLRY_322E: 2466 model = "322E"; 2467 break; 2468 case PCI_DEVICE_ID_CLRY_324E: 2469 model = "324E"; 2470 break; 2471 case PCI_DEVICE_ID_CLRY_321P: 2472 model = "321P"; 2473 break; 2474 case PCI_DEVICE_ID_CLRY_322P: 2475 model = "322P"; 2476 break; 2477 case PCI_DEVICE_ID_CLRY_324P: 2478 model = "324P"; 2479 break; 2480 case PCI_DEVICE_ID_TLFC_2XX2: 2481 model = "2XX2"; 2482 tbolt = 1; 2483 break; 2484 case PCI_DEVICE_ID_TLFC_3162: 2485 model = "3162"; 2486 tbolt = 1; 2487 break; 2488 case PCI_DEVICE_ID_TLFC_3322: 2489 model = "3322"; 2490 tbolt = 1; 2491 break; 2492 default: 2493 model = "Unknown"; 2494 break; 2495 } 2496 2497 if (mdp && mdp[0] == '\0') 2498 snprintf(mdp, 79, "%s", model); 2499 2500 if (descp && descp[0] == '\0') 2501 snprintf(descp, 255, 2502 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s", 2503 (tbolt) ? "ThunderLink FC " : "Celerity FC-", 2504 model, 2505 phba->Port); 2506 } 2507 2508 /** 2509 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2510 * @phba: pointer to lpfc hba data structure. 2511 * @mdp: pointer to the data structure to hold the derived model name. 2512 * @descp: pointer to the data structure to hold the derived description. 2513 * 2514 * This routine retrieves HBA's description based on its registered PCI device 2515 * ID. The @descp passed into this function points to an array of 256 chars. It 2516 * shall be returned with the model name, maximum speed, and the host bus type. 2517 * The @mdp passed into this function points to an array of 80 chars. When the 2518 * function returns, the @mdp will be filled with the model name. 2519 **/ 2520 static void 2521 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2522 { 2523 lpfc_vpd_t *vp; 2524 uint16_t dev_id = phba->pcidev->device; 2525 int max_speed; 2526 int GE = 0; 2527 int oneConnect = 0; /* default is not a oneConnect */ 2528 struct { 2529 char *name; 2530 char *bus; 2531 char *function; 2532 } m = {"<Unknown>", "", ""}; 2533 2534 if (mdp && mdp[0] != '\0' 2535 && descp && descp[0] != '\0') 2536 return; 2537 2538 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) { 2539 lpfc_get_atto_model_desc(phba, mdp, descp); 2540 return; 2541 } 2542 2543 if (phba->lmt & LMT_64Gb) 2544 max_speed = 64; 2545 else if (phba->lmt & LMT_32Gb) 2546 max_speed = 32; 2547 else if (phba->lmt & LMT_16Gb) 2548 max_speed = 16; 2549 else if (phba->lmt & LMT_10Gb) 2550 max_speed = 10; 2551 else if (phba->lmt & LMT_8Gb) 2552 max_speed = 8; 2553 else if (phba->lmt & LMT_4Gb) 2554 max_speed = 4; 2555 else if (phba->lmt & LMT_2Gb) 2556 max_speed = 2; 2557 else if (phba->lmt & LMT_1Gb) 2558 max_speed = 1; 2559 else 2560 max_speed = 0; 2561 2562 vp = &phba->vpd; 2563 2564 switch (dev_id) { 2565 case PCI_DEVICE_ID_FIREFLY: 2566 m = (typeof(m)){"LP6000", "PCI", 2567 "Obsolete, Unsupported Fibre Channel Adapter"}; 2568 break; 2569 case PCI_DEVICE_ID_SUPERFLY: 2570 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2571 m = (typeof(m)){"LP7000", "PCI", ""}; 2572 else 2573 m = (typeof(m)){"LP7000E", "PCI", ""}; 2574 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2575 break; 2576 case PCI_DEVICE_ID_DRAGONFLY: 2577 m = (typeof(m)){"LP8000", "PCI", 2578 "Obsolete, Unsupported Fibre Channel Adapter"}; 2579 break; 2580 case PCI_DEVICE_ID_CENTAUR: 2581 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2582 m = (typeof(m)){"LP9002", "PCI", ""}; 2583 else 2584 m = (typeof(m)){"LP9000", "PCI", ""}; 2585 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2586 break; 2587 case PCI_DEVICE_ID_RFLY: 2588 m = (typeof(m)){"LP952", "PCI", 2589 "Obsolete, Unsupported Fibre Channel Adapter"}; 2590 break; 2591 case PCI_DEVICE_ID_PEGASUS: 2592 m = (typeof(m)){"LP9802", "PCI-X", 2593 "Obsolete, Unsupported Fibre Channel Adapter"}; 2594 break; 2595 case PCI_DEVICE_ID_THOR: 2596 m = (typeof(m)){"LP10000", "PCI-X", 2597 "Obsolete, Unsupported Fibre Channel Adapter"}; 2598 break; 2599 case PCI_DEVICE_ID_VIPER: 2600 m = (typeof(m)){"LPX1000", "PCI-X", 2601 "Obsolete, Unsupported Fibre Channel Adapter"}; 2602 break; 2603 case PCI_DEVICE_ID_PFLY: 2604 m = (typeof(m)){"LP982", "PCI-X", 2605 "Obsolete, Unsupported Fibre Channel Adapter"}; 2606 break; 2607 case PCI_DEVICE_ID_TFLY: 2608 m = (typeof(m)){"LP1050", "PCI-X", 2609 "Obsolete, Unsupported Fibre Channel Adapter"}; 2610 break; 2611 case PCI_DEVICE_ID_HELIOS: 2612 m = (typeof(m)){"LP11000", "PCI-X2", 2613 "Obsolete, Unsupported Fibre Channel Adapter"}; 2614 break; 2615 case PCI_DEVICE_ID_HELIOS_SCSP: 2616 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2617 "Obsolete, Unsupported Fibre Channel Adapter"}; 2618 break; 2619 case PCI_DEVICE_ID_HELIOS_DCSP: 2620 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2621 "Obsolete, Unsupported Fibre Channel Adapter"}; 2622 break; 2623 case PCI_DEVICE_ID_NEPTUNE: 2624 m = (typeof(m)){"LPe1000", "PCIe", 2625 "Obsolete, Unsupported Fibre Channel Adapter"}; 2626 break; 2627 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2628 m = (typeof(m)){"LPe1000-SP", "PCIe", 2629 "Obsolete, Unsupported Fibre Channel Adapter"}; 2630 break; 2631 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2632 m = (typeof(m)){"LPe1002-SP", "PCIe", 2633 "Obsolete, Unsupported Fibre Channel Adapter"}; 2634 break; 2635 case PCI_DEVICE_ID_BMID: 2636 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2637 break; 2638 case PCI_DEVICE_ID_BSMB: 2639 m = (typeof(m)){"LP111", "PCI-X2", 2640 "Obsolete, Unsupported Fibre Channel Adapter"}; 2641 break; 2642 case PCI_DEVICE_ID_ZEPHYR: 2643 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2644 break; 2645 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2646 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2647 break; 2648 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2649 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2650 GE = 1; 2651 break; 2652 case PCI_DEVICE_ID_ZMID: 2653 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2654 break; 2655 case PCI_DEVICE_ID_ZSMB: 2656 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2657 break; 2658 case PCI_DEVICE_ID_LP101: 2659 m = (typeof(m)){"LP101", "PCI-X", 2660 "Obsolete, Unsupported Fibre Channel Adapter"}; 2661 break; 2662 case PCI_DEVICE_ID_LP10000S: 2663 m = (typeof(m)){"LP10000-S", "PCI", 2664 "Obsolete, Unsupported Fibre Channel Adapter"}; 2665 break; 2666 case PCI_DEVICE_ID_LP11000S: 2667 m = (typeof(m)){"LP11000-S", "PCI-X2", 2668 "Obsolete, Unsupported Fibre Channel Adapter"}; 2669 break; 2670 case PCI_DEVICE_ID_LPE11000S: 2671 m = (typeof(m)){"LPe11000-S", "PCIe", 2672 "Obsolete, Unsupported Fibre Channel Adapter"}; 2673 break; 2674 case PCI_DEVICE_ID_SAT: 2675 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2676 break; 2677 case PCI_DEVICE_ID_SAT_MID: 2678 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2679 break; 2680 case PCI_DEVICE_ID_SAT_SMB: 2681 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2682 break; 2683 case PCI_DEVICE_ID_SAT_DCSP: 2684 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2685 break; 2686 case PCI_DEVICE_ID_SAT_SCSP: 2687 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2688 break; 2689 case PCI_DEVICE_ID_SAT_S: 2690 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2691 break; 2692 case PCI_DEVICE_ID_PROTEUS_VF: 2693 m = (typeof(m)){"LPev12000", "PCIe IOV", 2694 "Obsolete, Unsupported Fibre Channel Adapter"}; 2695 break; 2696 case PCI_DEVICE_ID_PROTEUS_PF: 2697 m = (typeof(m)){"LPev12000", "PCIe IOV", 2698 "Obsolete, Unsupported Fibre Channel Adapter"}; 2699 break; 2700 case PCI_DEVICE_ID_PROTEUS_S: 2701 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2702 "Obsolete, Unsupported Fibre Channel Adapter"}; 2703 break; 2704 case PCI_DEVICE_ID_TIGERSHARK: 2705 oneConnect = 1; 2706 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2707 break; 2708 case PCI_DEVICE_ID_TOMCAT: 2709 oneConnect = 1; 2710 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2711 break; 2712 case PCI_DEVICE_ID_FALCON: 2713 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2714 "EmulexSecure Fibre"}; 2715 break; 2716 case PCI_DEVICE_ID_BALIUS: 2717 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2718 "Obsolete, Unsupported Fibre Channel Adapter"}; 2719 break; 2720 case PCI_DEVICE_ID_LANCER_FC: 2721 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2722 break; 2723 case PCI_DEVICE_ID_LANCER_FC_VF: 2724 m = (typeof(m)){"LPe16000", "PCIe", 2725 "Obsolete, Unsupported Fibre Channel Adapter"}; 2726 break; 2727 case PCI_DEVICE_ID_LANCER_FCOE: 2728 oneConnect = 1; 2729 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2730 break; 2731 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2732 oneConnect = 1; 2733 m = (typeof(m)){"OCe15100", "PCIe", 2734 "Obsolete, Unsupported FCoE"}; 2735 break; 2736 case PCI_DEVICE_ID_LANCER_G6_FC: 2737 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2738 break; 2739 case PCI_DEVICE_ID_LANCER_G7_FC: 2740 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2741 break; 2742 case PCI_DEVICE_ID_LANCER_G7P_FC: 2743 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2744 break; 2745 case PCI_DEVICE_ID_SKYHAWK: 2746 case PCI_DEVICE_ID_SKYHAWK_VF: 2747 oneConnect = 1; 2748 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2749 break; 2750 default: 2751 m = (typeof(m)){"Unknown", "", ""}; 2752 break; 2753 } 2754 2755 if (mdp && mdp[0] == '\0') 2756 snprintf(mdp, 79,"%s", m.name); 2757 /* 2758 * oneConnect hba requires special processing, they are all initiators 2759 * and we put the port number on the end 2760 */ 2761 if (descp && descp[0] == '\0') { 2762 if (oneConnect) 2763 snprintf(descp, 255, 2764 "Emulex OneConnect %s, %s Initiator %s", 2765 m.name, m.function, 2766 phba->Port); 2767 else if (max_speed == 0) 2768 snprintf(descp, 255, 2769 "Emulex %s %s %s", 2770 m.name, m.bus, m.function); 2771 else 2772 snprintf(descp, 255, 2773 "Emulex %s %d%s %s %s", 2774 m.name, max_speed, (GE) ? "GE" : "Gb", 2775 m.bus, m.function); 2776 } 2777 } 2778 2779 /** 2780 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2781 * @phba: pointer to lpfc hba data structure. 2782 * @pring: pointer to a IOCB ring. 2783 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2784 * 2785 * This routine posts a given number of IOCBs with the associated DMA buffer 2786 * descriptors specified by the cnt argument to the given IOCB ring. 2787 * 2788 * Return codes 2789 * The number of IOCBs NOT able to be posted to the IOCB ring. 2790 **/ 2791 int 2792 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2793 { 2794 IOCB_t *icmd; 2795 struct lpfc_iocbq *iocb; 2796 struct lpfc_dmabuf *mp1, *mp2; 2797 2798 cnt += pring->missbufcnt; 2799 2800 /* While there are buffers to post */ 2801 while (cnt > 0) { 2802 /* Allocate buffer for command iocb */ 2803 iocb = lpfc_sli_get_iocbq(phba); 2804 if (iocb == NULL) { 2805 pring->missbufcnt = cnt; 2806 return cnt; 2807 } 2808 icmd = &iocb->iocb; 2809 2810 /* 2 buffers can be posted per command */ 2811 /* Allocate buffer to post */ 2812 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2813 if (mp1) 2814 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2815 if (!mp1 || !mp1->virt) { 2816 kfree(mp1); 2817 lpfc_sli_release_iocbq(phba, iocb); 2818 pring->missbufcnt = cnt; 2819 return cnt; 2820 } 2821 2822 INIT_LIST_HEAD(&mp1->list); 2823 /* Allocate buffer to post */ 2824 if (cnt > 1) { 2825 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2826 if (mp2) 2827 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2828 &mp2->phys); 2829 if (!mp2 || !mp2->virt) { 2830 kfree(mp2); 2831 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2832 kfree(mp1); 2833 lpfc_sli_release_iocbq(phba, iocb); 2834 pring->missbufcnt = cnt; 2835 return cnt; 2836 } 2837 2838 INIT_LIST_HEAD(&mp2->list); 2839 } else { 2840 mp2 = NULL; 2841 } 2842 2843 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2844 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2845 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2846 icmd->ulpBdeCount = 1; 2847 cnt--; 2848 if (mp2) { 2849 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2850 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2851 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2852 cnt--; 2853 icmd->ulpBdeCount = 2; 2854 } 2855 2856 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2857 icmd->ulpLe = 1; 2858 2859 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2860 IOCB_ERROR) { 2861 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2862 kfree(mp1); 2863 cnt++; 2864 if (mp2) { 2865 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2866 kfree(mp2); 2867 cnt++; 2868 } 2869 lpfc_sli_release_iocbq(phba, iocb); 2870 pring->missbufcnt = cnt; 2871 return cnt; 2872 } 2873 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2874 if (mp2) 2875 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2876 } 2877 pring->missbufcnt = 0; 2878 return 0; 2879 } 2880 2881 /** 2882 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2883 * @phba: pointer to lpfc hba data structure. 2884 * 2885 * This routine posts initial receive IOCB buffers to the ELS ring. The 2886 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2887 * set to 64 IOCBs. SLI3 only. 2888 * 2889 * Return codes 2890 * 0 - success (currently always success) 2891 **/ 2892 static int 2893 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2894 { 2895 struct lpfc_sli *psli = &phba->sli; 2896 2897 /* Ring 0, ELS / CT buffers */ 2898 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2899 /* Ring 2 - FCP no buffers needed */ 2900 2901 return 0; 2902 } 2903 2904 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2905 2906 /** 2907 * lpfc_sha_init - Set up initial array of hash table entries 2908 * @HashResultPointer: pointer to an array as hash table. 2909 * 2910 * This routine sets up the initial values to the array of hash table entries 2911 * for the LC HBAs. 2912 **/ 2913 static void 2914 lpfc_sha_init(uint32_t * HashResultPointer) 2915 { 2916 HashResultPointer[0] = 0x67452301; 2917 HashResultPointer[1] = 0xEFCDAB89; 2918 HashResultPointer[2] = 0x98BADCFE; 2919 HashResultPointer[3] = 0x10325476; 2920 HashResultPointer[4] = 0xC3D2E1F0; 2921 } 2922 2923 /** 2924 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2925 * @HashResultPointer: pointer to an initial/result hash table. 2926 * @HashWorkingPointer: pointer to an working hash table. 2927 * 2928 * This routine iterates an initial hash table pointed by @HashResultPointer 2929 * with the values from the working hash table pointeed by @HashWorkingPointer. 2930 * The results are putting back to the initial hash table, returned through 2931 * the @HashResultPointer as the result hash table. 2932 **/ 2933 static void 2934 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2935 { 2936 int t; 2937 uint32_t TEMP; 2938 uint32_t A, B, C, D, E; 2939 t = 16; 2940 do { 2941 HashWorkingPointer[t] = 2942 S(1, 2943 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2944 8] ^ 2945 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2946 } while (++t <= 79); 2947 t = 0; 2948 A = HashResultPointer[0]; 2949 B = HashResultPointer[1]; 2950 C = HashResultPointer[2]; 2951 D = HashResultPointer[3]; 2952 E = HashResultPointer[4]; 2953 2954 do { 2955 if (t < 20) { 2956 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2957 } else if (t < 40) { 2958 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2959 } else if (t < 60) { 2960 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2961 } else { 2962 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2963 } 2964 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2965 E = D; 2966 D = C; 2967 C = S(30, B); 2968 B = A; 2969 A = TEMP; 2970 } while (++t <= 79); 2971 2972 HashResultPointer[0] += A; 2973 HashResultPointer[1] += B; 2974 HashResultPointer[2] += C; 2975 HashResultPointer[3] += D; 2976 HashResultPointer[4] += E; 2977 2978 } 2979 2980 /** 2981 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2982 * @RandomChallenge: pointer to the entry of host challenge random number array. 2983 * @HashWorking: pointer to the entry of the working hash array. 2984 * 2985 * This routine calculates the working hash array referred by @HashWorking 2986 * from the challenge random numbers associated with the host, referred by 2987 * @RandomChallenge. The result is put into the entry of the working hash 2988 * array and returned by reference through @HashWorking. 2989 **/ 2990 static void 2991 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2992 { 2993 *HashWorking = (*RandomChallenge ^ *HashWorking); 2994 } 2995 2996 /** 2997 * lpfc_hba_init - Perform special handling for LC HBA initialization 2998 * @phba: pointer to lpfc hba data structure. 2999 * @hbainit: pointer to an array of unsigned 32-bit integers. 3000 * 3001 * This routine performs the special handling for LC HBA initialization. 3002 **/ 3003 void 3004 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 3005 { 3006 int t; 3007 uint32_t *HashWorking; 3008 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 3009 3010 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 3011 if (!HashWorking) 3012 return; 3013 3014 HashWorking[0] = HashWorking[78] = *pwwnn++; 3015 HashWorking[1] = HashWorking[79] = *pwwnn; 3016 3017 for (t = 0; t < 7; t++) 3018 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 3019 3020 lpfc_sha_init(hbainit); 3021 lpfc_sha_iterate(hbainit, HashWorking); 3022 kfree(HashWorking); 3023 } 3024 3025 /** 3026 * lpfc_cleanup - Performs vport cleanups before deleting a vport 3027 * @vport: pointer to a virtual N_Port data structure. 3028 * 3029 * This routine performs the necessary cleanups before deleting the @vport. 3030 * It invokes the discovery state machine to perform necessary state 3031 * transitions and to release the ndlps associated with the @vport. Note, 3032 * the physical port is treated as @vport 0. 3033 **/ 3034 void 3035 lpfc_cleanup(struct lpfc_vport *vport) 3036 { 3037 struct lpfc_hba *phba = vport->phba; 3038 struct lpfc_nodelist *ndlp, *next_ndlp; 3039 int i = 0; 3040 3041 if (phba->link_state > LPFC_LINK_DOWN) 3042 lpfc_port_link_failure(vport); 3043 3044 /* Clean up VMID resources */ 3045 if (lpfc_is_vmid_enabled(phba)) 3046 lpfc_vmid_vport_cleanup(vport); 3047 3048 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 3049 if (vport->port_type != LPFC_PHYSICAL_PORT && 3050 ndlp->nlp_DID == Fabric_DID) { 3051 /* Just free up ndlp with Fabric_DID for vports */ 3052 lpfc_nlp_put(ndlp); 3053 continue; 3054 } 3055 3056 if (ndlp->nlp_DID == Fabric_Cntl_DID && 3057 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3058 lpfc_nlp_put(ndlp); 3059 continue; 3060 } 3061 3062 /* Fabric Ports not in UNMAPPED state are cleaned up in the 3063 * DEVICE_RM event. 3064 */ 3065 if (ndlp->nlp_type & NLP_FABRIC && 3066 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 3067 lpfc_disc_state_machine(vport, ndlp, NULL, 3068 NLP_EVT_DEVICE_RECOVERY); 3069 3070 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 3071 lpfc_disc_state_machine(vport, ndlp, NULL, 3072 NLP_EVT_DEVICE_RM); 3073 } 3074 3075 /* This is a special case flush to return all 3076 * IOs before entering this loop. There are 3077 * two points in the code where a flush is 3078 * avoided if the FC_UNLOADING flag is set. 3079 * one is in the multipool destroy, 3080 * (this prevents a crash) and the other is 3081 * in the nvme abort handler, ( also prevents 3082 * a crash). Both of these exceptions are 3083 * cases where the slot is still accessible. 3084 * The flush here is only when the pci slot 3085 * is offline. 3086 */ 3087 if (vport->load_flag & FC_UNLOADING && 3088 pci_channel_offline(phba->pcidev)) 3089 lpfc_sli_flush_io_rings(vport->phba); 3090 3091 /* At this point, ALL ndlp's should be gone 3092 * because of the previous NLP_EVT_DEVICE_RM. 3093 * Lets wait for this to happen, if needed. 3094 */ 3095 while (!list_empty(&vport->fc_nodes)) { 3096 if (i++ > 3000) { 3097 lpfc_printf_vlog(vport, KERN_ERR, 3098 LOG_TRACE_EVENT, 3099 "0233 Nodelist not empty\n"); 3100 list_for_each_entry_safe(ndlp, next_ndlp, 3101 &vport->fc_nodes, nlp_listp) { 3102 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 3103 LOG_DISCOVERY, 3104 "0282 did:x%x ndlp:x%px " 3105 "refcnt:%d xflags x%x nflag x%x\n", 3106 ndlp->nlp_DID, (void *)ndlp, 3107 kref_read(&ndlp->kref), 3108 ndlp->fc4_xpt_flags, 3109 ndlp->nlp_flag); 3110 } 3111 break; 3112 } 3113 3114 /* Wait for any activity on ndlps to settle */ 3115 msleep(10); 3116 } 3117 lpfc_cleanup_vports_rrqs(vport, NULL); 3118 } 3119 3120 /** 3121 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3122 * @vport: pointer to a virtual N_Port data structure. 3123 * 3124 * This routine stops all the timers associated with a @vport. This function 3125 * is invoked before disabling or deleting a @vport. Note that the physical 3126 * port is treated as @vport 0. 3127 **/ 3128 void 3129 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3130 { 3131 del_timer_sync(&vport->els_tmofunc); 3132 del_timer_sync(&vport->delayed_disc_tmo); 3133 lpfc_can_disctmo(vport); 3134 return; 3135 } 3136 3137 /** 3138 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3139 * @phba: pointer to lpfc hba data structure. 3140 * 3141 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3142 * caller of this routine should already hold the host lock. 3143 **/ 3144 void 3145 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3146 { 3147 /* Clear pending FCF rediscovery wait flag */ 3148 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3149 3150 /* Now, try to stop the timer */ 3151 del_timer(&phba->fcf.redisc_wait); 3152 } 3153 3154 /** 3155 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3156 * @phba: pointer to lpfc hba data structure. 3157 * 3158 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3159 * checks whether the FCF rediscovery wait timer is pending with the host 3160 * lock held before proceeding with disabling the timer and clearing the 3161 * wait timer pendig flag. 3162 **/ 3163 void 3164 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3165 { 3166 spin_lock_irq(&phba->hbalock); 3167 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3168 /* FCF rediscovery timer already fired or stopped */ 3169 spin_unlock_irq(&phba->hbalock); 3170 return; 3171 } 3172 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3173 /* Clear failover in progress flags */ 3174 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3175 spin_unlock_irq(&phba->hbalock); 3176 } 3177 3178 /** 3179 * lpfc_cmf_stop - Stop CMF processing 3180 * @phba: pointer to lpfc hba data structure. 3181 * 3182 * This is called when the link goes down or if CMF mode is turned OFF. 3183 * It is also called when going offline or unloaded just before the 3184 * congestion info buffer is unregistered. 3185 **/ 3186 void 3187 lpfc_cmf_stop(struct lpfc_hba *phba) 3188 { 3189 int cpu; 3190 struct lpfc_cgn_stat *cgs; 3191 3192 /* We only do something if CMF is enabled */ 3193 if (!phba->sli4_hba.pc_sli4_params.cmf) 3194 return; 3195 3196 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3197 "6221 Stop CMF / Cancel Timer\n"); 3198 3199 /* Cancel the CMF timer */ 3200 hrtimer_cancel(&phba->cmf_timer); 3201 3202 /* Zero CMF counters */ 3203 atomic_set(&phba->cmf_busy, 0); 3204 for_each_present_cpu(cpu) { 3205 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3206 atomic64_set(&cgs->total_bytes, 0); 3207 atomic64_set(&cgs->rcv_bytes, 0); 3208 atomic_set(&cgs->rx_io_cnt, 0); 3209 atomic64_set(&cgs->rx_latency, 0); 3210 } 3211 atomic_set(&phba->cmf_bw_wait, 0); 3212 3213 /* Resume any blocked IO - Queue unblock on workqueue */ 3214 queue_work(phba->wq, &phba->unblock_request_work); 3215 } 3216 3217 static inline uint64_t 3218 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3219 { 3220 uint64_t rate = lpfc_sli_port_speed_get(phba); 3221 3222 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3223 } 3224 3225 void 3226 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3227 { 3228 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3229 "6223 Signal CMF init\n"); 3230 3231 /* Use the new fc_linkspeed to recalculate */ 3232 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3233 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3234 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3235 phba->cmf_interval_rate, 1000); 3236 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3237 3238 /* This is a signal to firmware to sync up CMF BW with link speed */ 3239 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3240 } 3241 3242 /** 3243 * lpfc_cmf_start - Start CMF processing 3244 * @phba: pointer to lpfc hba data structure. 3245 * 3246 * This is called when the link comes up or if CMF mode is turned OFF 3247 * to Monitor or Managed. 3248 **/ 3249 void 3250 lpfc_cmf_start(struct lpfc_hba *phba) 3251 { 3252 struct lpfc_cgn_stat *cgs; 3253 int cpu; 3254 3255 /* We only do something if CMF is enabled */ 3256 if (!phba->sli4_hba.pc_sli4_params.cmf || 3257 phba->cmf_active_mode == LPFC_CFG_OFF) 3258 return; 3259 3260 /* Reinitialize congestion buffer info */ 3261 lpfc_init_congestion_buf(phba); 3262 3263 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3264 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3265 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3266 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3267 3268 atomic_set(&phba->cmf_busy, 0); 3269 for_each_present_cpu(cpu) { 3270 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3271 atomic64_set(&cgs->total_bytes, 0); 3272 atomic64_set(&cgs->rcv_bytes, 0); 3273 atomic_set(&cgs->rx_io_cnt, 0); 3274 atomic64_set(&cgs->rx_latency, 0); 3275 } 3276 phba->cmf_latency.tv_sec = 0; 3277 phba->cmf_latency.tv_nsec = 0; 3278 3279 lpfc_cmf_signal_init(phba); 3280 3281 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3282 "6222 Start CMF / Timer\n"); 3283 3284 phba->cmf_timer_cnt = 0; 3285 hrtimer_start(&phba->cmf_timer, 3286 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3287 HRTIMER_MODE_REL); 3288 /* Setup for latency check in IO cmpl routines */ 3289 ktime_get_real_ts64(&phba->cmf_latency); 3290 3291 atomic_set(&phba->cmf_bw_wait, 0); 3292 atomic_set(&phba->cmf_stop_io, 0); 3293 } 3294 3295 /** 3296 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3297 * @phba: pointer to lpfc hba data structure. 3298 * 3299 * This routine stops all the timers associated with a HBA. This function is 3300 * invoked before either putting a HBA offline or unloading the driver. 3301 **/ 3302 void 3303 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3304 { 3305 if (phba->pport) 3306 lpfc_stop_vport_timers(phba->pport); 3307 cancel_delayed_work_sync(&phba->eq_delay_work); 3308 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3309 del_timer_sync(&phba->sli.mbox_tmo); 3310 del_timer_sync(&phba->fabric_block_timer); 3311 del_timer_sync(&phba->eratt_poll); 3312 del_timer_sync(&phba->hb_tmofunc); 3313 if (phba->sli_rev == LPFC_SLI_REV4) { 3314 del_timer_sync(&phba->rrq_tmr); 3315 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3316 } 3317 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3318 3319 switch (phba->pci_dev_grp) { 3320 case LPFC_PCI_DEV_LP: 3321 /* Stop any LightPulse device specific driver timers */ 3322 del_timer_sync(&phba->fcp_poll_timer); 3323 break; 3324 case LPFC_PCI_DEV_OC: 3325 /* Stop any OneConnect device specific driver timers */ 3326 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3327 break; 3328 default: 3329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3330 "0297 Invalid device group (x%x)\n", 3331 phba->pci_dev_grp); 3332 break; 3333 } 3334 return; 3335 } 3336 3337 /** 3338 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3339 * @phba: pointer to lpfc hba data structure. 3340 * @mbx_action: flag for mailbox no wait action. 3341 * 3342 * This routine marks a HBA's management interface as blocked. Once the HBA's 3343 * management interface is marked as blocked, all the user space access to 3344 * the HBA, whether they are from sysfs interface or libdfc interface will 3345 * all be blocked. The HBA is set to block the management interface when the 3346 * driver prepares the HBA interface for online or offline. 3347 **/ 3348 static void 3349 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3350 { 3351 unsigned long iflag; 3352 uint8_t actcmd = MBX_HEARTBEAT; 3353 unsigned long timeout; 3354 3355 spin_lock_irqsave(&phba->hbalock, iflag); 3356 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3357 spin_unlock_irqrestore(&phba->hbalock, iflag); 3358 if (mbx_action == LPFC_MBX_NO_WAIT) 3359 return; 3360 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3361 spin_lock_irqsave(&phba->hbalock, iflag); 3362 if (phba->sli.mbox_active) { 3363 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3364 /* Determine how long we might wait for the active mailbox 3365 * command to be gracefully completed by firmware. 3366 */ 3367 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3368 phba->sli.mbox_active) * 1000) + jiffies; 3369 } 3370 spin_unlock_irqrestore(&phba->hbalock, iflag); 3371 3372 /* Wait for the outstnading mailbox command to complete */ 3373 while (phba->sli.mbox_active) { 3374 /* Check active mailbox complete status every 2ms */ 3375 msleep(2); 3376 if (time_after(jiffies, timeout)) { 3377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3378 "2813 Mgmt IO is Blocked %x " 3379 "- mbox cmd %x still active\n", 3380 phba->sli.sli_flag, actcmd); 3381 break; 3382 } 3383 } 3384 } 3385 3386 /** 3387 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3388 * @phba: pointer to lpfc hba data structure. 3389 * 3390 * Allocate RPIs for all active remote nodes. This is needed whenever 3391 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3392 * is to fixup the temporary rpi assignments. 3393 **/ 3394 void 3395 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3396 { 3397 struct lpfc_nodelist *ndlp, *next_ndlp; 3398 struct lpfc_vport **vports; 3399 int i, rpi; 3400 3401 if (phba->sli_rev != LPFC_SLI_REV4) 3402 return; 3403 3404 vports = lpfc_create_vport_work_array(phba); 3405 if (vports == NULL) 3406 return; 3407 3408 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3409 if (vports[i]->load_flag & FC_UNLOADING) 3410 continue; 3411 3412 list_for_each_entry_safe(ndlp, next_ndlp, 3413 &vports[i]->fc_nodes, 3414 nlp_listp) { 3415 rpi = lpfc_sli4_alloc_rpi(phba); 3416 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3417 /* TODO print log? */ 3418 continue; 3419 } 3420 ndlp->nlp_rpi = rpi; 3421 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3422 LOG_NODE | LOG_DISCOVERY, 3423 "0009 Assign RPI x%x to ndlp x%px " 3424 "DID:x%06x flg:x%x\n", 3425 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3426 ndlp->nlp_flag); 3427 } 3428 } 3429 lpfc_destroy_vport_work_array(phba, vports); 3430 } 3431 3432 /** 3433 * lpfc_create_expedite_pool - create expedite pool 3434 * @phba: pointer to lpfc hba data structure. 3435 * 3436 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3437 * to expedite pool. Mark them as expedite. 3438 **/ 3439 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3440 { 3441 struct lpfc_sli4_hdw_queue *qp; 3442 struct lpfc_io_buf *lpfc_ncmd; 3443 struct lpfc_io_buf *lpfc_ncmd_next; 3444 struct lpfc_epd_pool *epd_pool; 3445 unsigned long iflag; 3446 3447 epd_pool = &phba->epd_pool; 3448 qp = &phba->sli4_hba.hdwq[0]; 3449 3450 spin_lock_init(&epd_pool->lock); 3451 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3452 spin_lock(&epd_pool->lock); 3453 INIT_LIST_HEAD(&epd_pool->list); 3454 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3455 &qp->lpfc_io_buf_list_put, list) { 3456 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3457 lpfc_ncmd->expedite = true; 3458 qp->put_io_bufs--; 3459 epd_pool->count++; 3460 if (epd_pool->count >= XRI_BATCH) 3461 break; 3462 } 3463 spin_unlock(&epd_pool->lock); 3464 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3465 } 3466 3467 /** 3468 * lpfc_destroy_expedite_pool - destroy expedite pool 3469 * @phba: pointer to lpfc hba data structure. 3470 * 3471 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3472 * of HWQ 0. Clear the mark. 3473 **/ 3474 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3475 { 3476 struct lpfc_sli4_hdw_queue *qp; 3477 struct lpfc_io_buf *lpfc_ncmd; 3478 struct lpfc_io_buf *lpfc_ncmd_next; 3479 struct lpfc_epd_pool *epd_pool; 3480 unsigned long iflag; 3481 3482 epd_pool = &phba->epd_pool; 3483 qp = &phba->sli4_hba.hdwq[0]; 3484 3485 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3486 spin_lock(&epd_pool->lock); 3487 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3488 &epd_pool->list, list) { 3489 list_move_tail(&lpfc_ncmd->list, 3490 &qp->lpfc_io_buf_list_put); 3491 lpfc_ncmd->flags = false; 3492 qp->put_io_bufs++; 3493 epd_pool->count--; 3494 } 3495 spin_unlock(&epd_pool->lock); 3496 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3497 } 3498 3499 /** 3500 * lpfc_create_multixri_pools - create multi-XRI pools 3501 * @phba: pointer to lpfc hba data structure. 3502 * 3503 * This routine initialize public, private per HWQ. Then, move XRIs from 3504 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3505 * Initialized. 3506 **/ 3507 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3508 { 3509 u32 i, j; 3510 u32 hwq_count; 3511 u32 count_per_hwq; 3512 struct lpfc_io_buf *lpfc_ncmd; 3513 struct lpfc_io_buf *lpfc_ncmd_next; 3514 unsigned long iflag; 3515 struct lpfc_sli4_hdw_queue *qp; 3516 struct lpfc_multixri_pool *multixri_pool; 3517 struct lpfc_pbl_pool *pbl_pool; 3518 struct lpfc_pvt_pool *pvt_pool; 3519 3520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3521 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3522 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3523 phba->sli4_hba.io_xri_cnt); 3524 3525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3526 lpfc_create_expedite_pool(phba); 3527 3528 hwq_count = phba->cfg_hdw_queue; 3529 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3530 3531 for (i = 0; i < hwq_count; i++) { 3532 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3533 3534 if (!multixri_pool) { 3535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3536 "1238 Failed to allocate memory for " 3537 "multixri_pool\n"); 3538 3539 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3540 lpfc_destroy_expedite_pool(phba); 3541 3542 j = 0; 3543 while (j < i) { 3544 qp = &phba->sli4_hba.hdwq[j]; 3545 kfree(qp->p_multixri_pool); 3546 j++; 3547 } 3548 phba->cfg_xri_rebalancing = 0; 3549 return; 3550 } 3551 3552 qp = &phba->sli4_hba.hdwq[i]; 3553 qp->p_multixri_pool = multixri_pool; 3554 3555 multixri_pool->xri_limit = count_per_hwq; 3556 multixri_pool->rrb_next_hwqid = i; 3557 3558 /* Deal with public free xri pool */ 3559 pbl_pool = &multixri_pool->pbl_pool; 3560 spin_lock_init(&pbl_pool->lock); 3561 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3562 spin_lock(&pbl_pool->lock); 3563 INIT_LIST_HEAD(&pbl_pool->list); 3564 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3565 &qp->lpfc_io_buf_list_put, list) { 3566 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3567 qp->put_io_bufs--; 3568 pbl_pool->count++; 3569 } 3570 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3571 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3572 pbl_pool->count, i); 3573 spin_unlock(&pbl_pool->lock); 3574 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3575 3576 /* Deal with private free xri pool */ 3577 pvt_pool = &multixri_pool->pvt_pool; 3578 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3579 pvt_pool->low_watermark = XRI_BATCH; 3580 spin_lock_init(&pvt_pool->lock); 3581 spin_lock_irqsave(&pvt_pool->lock, iflag); 3582 INIT_LIST_HEAD(&pvt_pool->list); 3583 pvt_pool->count = 0; 3584 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3585 } 3586 } 3587 3588 /** 3589 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3590 * @phba: pointer to lpfc hba data structure. 3591 * 3592 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3593 **/ 3594 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3595 { 3596 u32 i; 3597 u32 hwq_count; 3598 struct lpfc_io_buf *lpfc_ncmd; 3599 struct lpfc_io_buf *lpfc_ncmd_next; 3600 unsigned long iflag; 3601 struct lpfc_sli4_hdw_queue *qp; 3602 struct lpfc_multixri_pool *multixri_pool; 3603 struct lpfc_pbl_pool *pbl_pool; 3604 struct lpfc_pvt_pool *pvt_pool; 3605 3606 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3607 lpfc_destroy_expedite_pool(phba); 3608 3609 if (!(phba->pport->load_flag & FC_UNLOADING)) 3610 lpfc_sli_flush_io_rings(phba); 3611 3612 hwq_count = phba->cfg_hdw_queue; 3613 3614 for (i = 0; i < hwq_count; i++) { 3615 qp = &phba->sli4_hba.hdwq[i]; 3616 multixri_pool = qp->p_multixri_pool; 3617 if (!multixri_pool) 3618 continue; 3619 3620 qp->p_multixri_pool = NULL; 3621 3622 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3623 3624 /* Deal with public free xri pool */ 3625 pbl_pool = &multixri_pool->pbl_pool; 3626 spin_lock(&pbl_pool->lock); 3627 3628 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3629 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3630 pbl_pool->count, i); 3631 3632 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3633 &pbl_pool->list, list) { 3634 list_move_tail(&lpfc_ncmd->list, 3635 &qp->lpfc_io_buf_list_put); 3636 qp->put_io_bufs++; 3637 pbl_pool->count--; 3638 } 3639 3640 INIT_LIST_HEAD(&pbl_pool->list); 3641 pbl_pool->count = 0; 3642 3643 spin_unlock(&pbl_pool->lock); 3644 3645 /* Deal with private free xri pool */ 3646 pvt_pool = &multixri_pool->pvt_pool; 3647 spin_lock(&pvt_pool->lock); 3648 3649 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3650 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3651 pvt_pool->count, i); 3652 3653 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3654 &pvt_pool->list, list) { 3655 list_move_tail(&lpfc_ncmd->list, 3656 &qp->lpfc_io_buf_list_put); 3657 qp->put_io_bufs++; 3658 pvt_pool->count--; 3659 } 3660 3661 INIT_LIST_HEAD(&pvt_pool->list); 3662 pvt_pool->count = 0; 3663 3664 spin_unlock(&pvt_pool->lock); 3665 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3666 3667 kfree(multixri_pool); 3668 } 3669 } 3670 3671 /** 3672 * lpfc_online - Initialize and bring a HBA online 3673 * @phba: pointer to lpfc hba data structure. 3674 * 3675 * This routine initializes the HBA and brings a HBA online. During this 3676 * process, the management interface is blocked to prevent user space access 3677 * to the HBA interfering with the driver initialization. 3678 * 3679 * Return codes 3680 * 0 - successful 3681 * 1 - failed 3682 **/ 3683 int 3684 lpfc_online(struct lpfc_hba *phba) 3685 { 3686 struct lpfc_vport *vport; 3687 struct lpfc_vport **vports; 3688 int i, error = 0; 3689 bool vpis_cleared = false; 3690 3691 if (!phba) 3692 return 0; 3693 vport = phba->pport; 3694 3695 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3696 return 0; 3697 3698 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3699 "0458 Bring Adapter online\n"); 3700 3701 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3702 3703 if (phba->sli_rev == LPFC_SLI_REV4) { 3704 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3705 lpfc_unblock_mgmt_io(phba); 3706 return 1; 3707 } 3708 spin_lock_irq(&phba->hbalock); 3709 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3710 vpis_cleared = true; 3711 spin_unlock_irq(&phba->hbalock); 3712 3713 /* Reestablish the local initiator port. 3714 * The offline process destroyed the previous lport. 3715 */ 3716 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3717 !phba->nvmet_support) { 3718 error = lpfc_nvme_create_localport(phba->pport); 3719 if (error) 3720 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3721 "6132 NVME restore reg failed " 3722 "on nvmei error x%x\n", error); 3723 } 3724 } else { 3725 lpfc_sli_queue_init(phba); 3726 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3727 lpfc_unblock_mgmt_io(phba); 3728 return 1; 3729 } 3730 } 3731 3732 vports = lpfc_create_vport_work_array(phba); 3733 if (vports != NULL) { 3734 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3735 struct Scsi_Host *shost; 3736 shost = lpfc_shost_from_vport(vports[i]); 3737 spin_lock_irq(shost->host_lock); 3738 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3739 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3740 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3741 if (phba->sli_rev == LPFC_SLI_REV4) { 3742 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3743 if ((vpis_cleared) && 3744 (vports[i]->port_type != 3745 LPFC_PHYSICAL_PORT)) 3746 vports[i]->vpi = 0; 3747 } 3748 spin_unlock_irq(shost->host_lock); 3749 } 3750 } 3751 lpfc_destroy_vport_work_array(phba, vports); 3752 3753 if (phba->cfg_xri_rebalancing) 3754 lpfc_create_multixri_pools(phba); 3755 3756 lpfc_cpuhp_add(phba); 3757 3758 lpfc_unblock_mgmt_io(phba); 3759 return 0; 3760 } 3761 3762 /** 3763 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3764 * @phba: pointer to lpfc hba data structure. 3765 * 3766 * This routine marks a HBA's management interface as not blocked. Once the 3767 * HBA's management interface is marked as not blocked, all the user space 3768 * access to the HBA, whether they are from sysfs interface or libdfc 3769 * interface will be allowed. The HBA is set to block the management interface 3770 * when the driver prepares the HBA interface for online or offline and then 3771 * set to unblock the management interface afterwards. 3772 **/ 3773 void 3774 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3775 { 3776 unsigned long iflag; 3777 3778 spin_lock_irqsave(&phba->hbalock, iflag); 3779 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3780 spin_unlock_irqrestore(&phba->hbalock, iflag); 3781 } 3782 3783 /** 3784 * lpfc_offline_prep - Prepare a HBA to be brought offline 3785 * @phba: pointer to lpfc hba data structure. 3786 * @mbx_action: flag for mailbox shutdown action. 3787 * 3788 * This routine is invoked to prepare a HBA to be brought offline. It performs 3789 * unregistration login to all the nodes on all vports and flushes the mailbox 3790 * queue to make it ready to be brought offline. 3791 **/ 3792 void 3793 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3794 { 3795 struct lpfc_vport *vport = phba->pport; 3796 struct lpfc_nodelist *ndlp, *next_ndlp; 3797 struct lpfc_vport **vports; 3798 struct Scsi_Host *shost; 3799 int i; 3800 int offline; 3801 bool hba_pci_err; 3802 3803 if (vport->fc_flag & FC_OFFLINE_MODE) 3804 return; 3805 3806 lpfc_block_mgmt_io(phba, mbx_action); 3807 3808 lpfc_linkdown(phba); 3809 3810 offline = pci_channel_offline(phba->pcidev); 3811 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 3812 3813 /* Issue an unreg_login to all nodes on all vports */ 3814 vports = lpfc_create_vport_work_array(phba); 3815 if (vports != NULL) { 3816 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3817 if (vports[i]->load_flag & FC_UNLOADING) 3818 continue; 3819 shost = lpfc_shost_from_vport(vports[i]); 3820 spin_lock_irq(shost->host_lock); 3821 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3822 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3823 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3824 spin_unlock_irq(shost->host_lock); 3825 3826 shost = lpfc_shost_from_vport(vports[i]); 3827 list_for_each_entry_safe(ndlp, next_ndlp, 3828 &vports[i]->fc_nodes, 3829 nlp_listp) { 3830 3831 spin_lock_irq(&ndlp->lock); 3832 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3833 spin_unlock_irq(&ndlp->lock); 3834 3835 if (offline || hba_pci_err) { 3836 spin_lock_irq(&ndlp->lock); 3837 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3838 NLP_RPI_REGISTERED); 3839 spin_unlock_irq(&ndlp->lock); 3840 if (phba->sli_rev == LPFC_SLI_REV4) 3841 lpfc_sli_rpi_release(vports[i], 3842 ndlp); 3843 } else { 3844 lpfc_unreg_rpi(vports[i], ndlp); 3845 } 3846 /* 3847 * Whenever an SLI4 port goes offline, free the 3848 * RPI. Get a new RPI when the adapter port 3849 * comes back online. 3850 */ 3851 if (phba->sli_rev == LPFC_SLI_REV4) { 3852 lpfc_printf_vlog(vports[i], KERN_INFO, 3853 LOG_NODE | LOG_DISCOVERY, 3854 "0011 Free RPI x%x on " 3855 "ndlp: x%px did x%x\n", 3856 ndlp->nlp_rpi, ndlp, 3857 ndlp->nlp_DID); 3858 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3859 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3860 } 3861 3862 if (ndlp->nlp_type & NLP_FABRIC) { 3863 lpfc_disc_state_machine(vports[i], ndlp, 3864 NULL, NLP_EVT_DEVICE_RECOVERY); 3865 3866 /* Don't remove the node unless the node 3867 * has been unregistered with the 3868 * transport, and we're not in recovery 3869 * before dev_loss_tmo triggered. 3870 * Otherwise, let dev_loss take care of 3871 * the node. 3872 */ 3873 if (!(ndlp->save_flags & 3874 NLP_IN_RECOV_POST_DEV_LOSS) && 3875 !(ndlp->fc4_xpt_flags & 3876 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3877 lpfc_disc_state_machine 3878 (vports[i], ndlp, 3879 NULL, 3880 NLP_EVT_DEVICE_RM); 3881 } 3882 } 3883 } 3884 } 3885 lpfc_destroy_vport_work_array(phba, vports); 3886 3887 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3888 3889 if (phba->wq) 3890 flush_workqueue(phba->wq); 3891 } 3892 3893 /** 3894 * lpfc_offline - Bring a HBA offline 3895 * @phba: pointer to lpfc hba data structure. 3896 * 3897 * This routine actually brings a HBA offline. It stops all the timers 3898 * associated with the HBA, brings down the SLI layer, and eventually 3899 * marks the HBA as in offline state for the upper layer protocol. 3900 **/ 3901 void 3902 lpfc_offline(struct lpfc_hba *phba) 3903 { 3904 struct Scsi_Host *shost; 3905 struct lpfc_vport **vports; 3906 int i; 3907 3908 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3909 return; 3910 3911 /* stop port and all timers associated with this hba */ 3912 lpfc_stop_port(phba); 3913 3914 /* Tear down the local and target port registrations. The 3915 * nvme transports need to cleanup. 3916 */ 3917 lpfc_nvmet_destroy_targetport(phba); 3918 lpfc_nvme_destroy_localport(phba->pport); 3919 3920 vports = lpfc_create_vport_work_array(phba); 3921 if (vports != NULL) 3922 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3923 lpfc_stop_vport_timers(vports[i]); 3924 lpfc_destroy_vport_work_array(phba, vports); 3925 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3926 "0460 Bring Adapter offline\n"); 3927 /* Bring down the SLI Layer and cleanup. The HBA is offline 3928 now. */ 3929 lpfc_sli_hba_down(phba); 3930 spin_lock_irq(&phba->hbalock); 3931 phba->work_ha = 0; 3932 spin_unlock_irq(&phba->hbalock); 3933 vports = lpfc_create_vport_work_array(phba); 3934 if (vports != NULL) 3935 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3936 shost = lpfc_shost_from_vport(vports[i]); 3937 spin_lock_irq(shost->host_lock); 3938 vports[i]->work_port_events = 0; 3939 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3940 spin_unlock_irq(shost->host_lock); 3941 } 3942 lpfc_destroy_vport_work_array(phba, vports); 3943 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3944 * in hba_unset 3945 */ 3946 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3947 __lpfc_cpuhp_remove(phba); 3948 3949 if (phba->cfg_xri_rebalancing) 3950 lpfc_destroy_multixri_pools(phba); 3951 } 3952 3953 /** 3954 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3955 * @phba: pointer to lpfc hba data structure. 3956 * 3957 * This routine is to free all the SCSI buffers and IOCBs from the driver 3958 * list back to kernel. It is called from lpfc_pci_remove_one to free 3959 * the internal resources before the device is removed from the system. 3960 **/ 3961 static void 3962 lpfc_scsi_free(struct lpfc_hba *phba) 3963 { 3964 struct lpfc_io_buf *sb, *sb_next; 3965 3966 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3967 return; 3968 3969 spin_lock_irq(&phba->hbalock); 3970 3971 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3972 3973 spin_lock(&phba->scsi_buf_list_put_lock); 3974 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3975 list) { 3976 list_del(&sb->list); 3977 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3978 sb->dma_handle); 3979 kfree(sb); 3980 phba->total_scsi_bufs--; 3981 } 3982 spin_unlock(&phba->scsi_buf_list_put_lock); 3983 3984 spin_lock(&phba->scsi_buf_list_get_lock); 3985 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3986 list) { 3987 list_del(&sb->list); 3988 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3989 sb->dma_handle); 3990 kfree(sb); 3991 phba->total_scsi_bufs--; 3992 } 3993 spin_unlock(&phba->scsi_buf_list_get_lock); 3994 spin_unlock_irq(&phba->hbalock); 3995 } 3996 3997 /** 3998 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3999 * @phba: pointer to lpfc hba data structure. 4000 * 4001 * This routine is to free all the IO buffers and IOCBs from the driver 4002 * list back to kernel. It is called from lpfc_pci_remove_one to free 4003 * the internal resources before the device is removed from the system. 4004 **/ 4005 void 4006 lpfc_io_free(struct lpfc_hba *phba) 4007 { 4008 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 4009 struct lpfc_sli4_hdw_queue *qp; 4010 int idx; 4011 4012 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4013 qp = &phba->sli4_hba.hdwq[idx]; 4014 /* Release all the lpfc_nvme_bufs maintained by this host. */ 4015 spin_lock(&qp->io_buf_list_put_lock); 4016 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4017 &qp->lpfc_io_buf_list_put, 4018 list) { 4019 list_del(&lpfc_ncmd->list); 4020 qp->put_io_bufs--; 4021 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4022 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4023 if (phba->cfg_xpsgl && !phba->nvmet_support) 4024 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 4025 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 4026 kfree(lpfc_ncmd); 4027 qp->total_io_bufs--; 4028 } 4029 spin_unlock(&qp->io_buf_list_put_lock); 4030 4031 spin_lock(&qp->io_buf_list_get_lock); 4032 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4033 &qp->lpfc_io_buf_list_get, 4034 list) { 4035 list_del(&lpfc_ncmd->list); 4036 qp->get_io_bufs--; 4037 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4038 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4039 if (phba->cfg_xpsgl && !phba->nvmet_support) 4040 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 4041 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 4042 kfree(lpfc_ncmd); 4043 qp->total_io_bufs--; 4044 } 4045 spin_unlock(&qp->io_buf_list_get_lock); 4046 } 4047 } 4048 4049 /** 4050 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 4051 * @phba: pointer to lpfc hba data structure. 4052 * 4053 * This routine first calculates the sizes of the current els and allocated 4054 * scsi sgl lists, and then goes through all sgls to updates the physical 4055 * XRIs assigned due to port function reset. During port initialization, the 4056 * current els and allocated scsi sgl lists are 0s. 4057 * 4058 * Return codes 4059 * 0 - successful (for now, it always returns 0) 4060 **/ 4061 int 4062 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 4063 { 4064 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4065 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4066 LIST_HEAD(els_sgl_list); 4067 int rc; 4068 4069 /* 4070 * update on pci function's els xri-sgl list 4071 */ 4072 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4073 4074 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 4075 /* els xri-sgl expanded */ 4076 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 4077 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4078 "3157 ELS xri-sgl count increased from " 4079 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4080 els_xri_cnt); 4081 /* allocate the additional els sgls */ 4082 for (i = 0; i < xri_cnt; i++) { 4083 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4084 GFP_KERNEL); 4085 if (sglq_entry == NULL) { 4086 lpfc_printf_log(phba, KERN_ERR, 4087 LOG_TRACE_EVENT, 4088 "2562 Failure to allocate an " 4089 "ELS sgl entry:%d\n", i); 4090 rc = -ENOMEM; 4091 goto out_free_mem; 4092 } 4093 sglq_entry->buff_type = GEN_BUFF_TYPE; 4094 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 4095 &sglq_entry->phys); 4096 if (sglq_entry->virt == NULL) { 4097 kfree(sglq_entry); 4098 lpfc_printf_log(phba, KERN_ERR, 4099 LOG_TRACE_EVENT, 4100 "2563 Failure to allocate an " 4101 "ELS mbuf:%d\n", i); 4102 rc = -ENOMEM; 4103 goto out_free_mem; 4104 } 4105 sglq_entry->sgl = sglq_entry->virt; 4106 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4107 sglq_entry->state = SGL_FREED; 4108 list_add_tail(&sglq_entry->list, &els_sgl_list); 4109 } 4110 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4111 list_splice_init(&els_sgl_list, 4112 &phba->sli4_hba.lpfc_els_sgl_list); 4113 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4114 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 4115 /* els xri-sgl shrinked */ 4116 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 4117 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4118 "3158 ELS xri-sgl count decreased from " 4119 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4120 els_xri_cnt); 4121 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4122 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4123 &els_sgl_list); 4124 /* release extra els sgls from list */ 4125 for (i = 0; i < xri_cnt; i++) { 4126 list_remove_head(&els_sgl_list, 4127 sglq_entry, struct lpfc_sglq, list); 4128 if (sglq_entry) { 4129 __lpfc_mbuf_free(phba, sglq_entry->virt, 4130 sglq_entry->phys); 4131 kfree(sglq_entry); 4132 } 4133 } 4134 list_splice_init(&els_sgl_list, 4135 &phba->sli4_hba.lpfc_els_sgl_list); 4136 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4137 } else 4138 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4139 "3163 ELS xri-sgl count unchanged: %d\n", 4140 els_xri_cnt); 4141 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4142 4143 /* update xris to els sgls on the list */ 4144 sglq_entry = NULL; 4145 sglq_entry_next = NULL; 4146 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4147 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4148 lxri = lpfc_sli4_next_xritag(phba); 4149 if (lxri == NO_XRI) { 4150 lpfc_printf_log(phba, KERN_ERR, 4151 LOG_TRACE_EVENT, 4152 "2400 Failed to allocate xri for " 4153 "ELS sgl\n"); 4154 rc = -ENOMEM; 4155 goto out_free_mem; 4156 } 4157 sglq_entry->sli4_lxritag = lxri; 4158 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4159 } 4160 return 0; 4161 4162 out_free_mem: 4163 lpfc_free_els_sgl_list(phba); 4164 return rc; 4165 } 4166 4167 /** 4168 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4169 * @phba: pointer to lpfc hba data structure. 4170 * 4171 * This routine first calculates the sizes of the current els and allocated 4172 * scsi sgl lists, and then goes through all sgls to updates the physical 4173 * XRIs assigned due to port function reset. During port initialization, the 4174 * current els and allocated scsi sgl lists are 0s. 4175 * 4176 * Return codes 4177 * 0 - successful (for now, it always returns 0) 4178 **/ 4179 int 4180 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4181 { 4182 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4183 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4184 uint16_t nvmet_xri_cnt; 4185 LIST_HEAD(nvmet_sgl_list); 4186 int rc; 4187 4188 /* 4189 * update on pci function's nvmet xri-sgl list 4190 */ 4191 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4192 4193 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4194 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4195 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4196 /* els xri-sgl expanded */ 4197 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4198 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4199 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4200 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4201 /* allocate the additional nvmet sgls */ 4202 for (i = 0; i < xri_cnt; i++) { 4203 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4204 GFP_KERNEL); 4205 if (sglq_entry == NULL) { 4206 lpfc_printf_log(phba, KERN_ERR, 4207 LOG_TRACE_EVENT, 4208 "6303 Failure to allocate an " 4209 "NVMET sgl entry:%d\n", i); 4210 rc = -ENOMEM; 4211 goto out_free_mem; 4212 } 4213 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4214 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4215 &sglq_entry->phys); 4216 if (sglq_entry->virt == NULL) { 4217 kfree(sglq_entry); 4218 lpfc_printf_log(phba, KERN_ERR, 4219 LOG_TRACE_EVENT, 4220 "6304 Failure to allocate an " 4221 "NVMET buf:%d\n", i); 4222 rc = -ENOMEM; 4223 goto out_free_mem; 4224 } 4225 sglq_entry->sgl = sglq_entry->virt; 4226 memset(sglq_entry->sgl, 0, 4227 phba->cfg_sg_dma_buf_size); 4228 sglq_entry->state = SGL_FREED; 4229 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4230 } 4231 spin_lock_irq(&phba->hbalock); 4232 spin_lock(&phba->sli4_hba.sgl_list_lock); 4233 list_splice_init(&nvmet_sgl_list, 4234 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4235 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4236 spin_unlock_irq(&phba->hbalock); 4237 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4238 /* nvmet xri-sgl shrunk */ 4239 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4240 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4241 "6305 NVMET xri-sgl count decreased from " 4242 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4243 nvmet_xri_cnt); 4244 spin_lock_irq(&phba->hbalock); 4245 spin_lock(&phba->sli4_hba.sgl_list_lock); 4246 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4247 &nvmet_sgl_list); 4248 /* release extra nvmet sgls from list */ 4249 for (i = 0; i < xri_cnt; i++) { 4250 list_remove_head(&nvmet_sgl_list, 4251 sglq_entry, struct lpfc_sglq, list); 4252 if (sglq_entry) { 4253 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4254 sglq_entry->phys); 4255 kfree(sglq_entry); 4256 } 4257 } 4258 list_splice_init(&nvmet_sgl_list, 4259 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4260 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4261 spin_unlock_irq(&phba->hbalock); 4262 } else 4263 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4264 "6306 NVMET xri-sgl count unchanged: %d\n", 4265 nvmet_xri_cnt); 4266 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4267 4268 /* update xris to nvmet sgls on the list */ 4269 sglq_entry = NULL; 4270 sglq_entry_next = NULL; 4271 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4272 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4273 lxri = lpfc_sli4_next_xritag(phba); 4274 if (lxri == NO_XRI) { 4275 lpfc_printf_log(phba, KERN_ERR, 4276 LOG_TRACE_EVENT, 4277 "6307 Failed to allocate xri for " 4278 "NVMET sgl\n"); 4279 rc = -ENOMEM; 4280 goto out_free_mem; 4281 } 4282 sglq_entry->sli4_lxritag = lxri; 4283 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4284 } 4285 return 0; 4286 4287 out_free_mem: 4288 lpfc_free_nvmet_sgl_list(phba); 4289 return rc; 4290 } 4291 4292 int 4293 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4294 { 4295 LIST_HEAD(blist); 4296 struct lpfc_sli4_hdw_queue *qp; 4297 struct lpfc_io_buf *lpfc_cmd; 4298 struct lpfc_io_buf *iobufp, *prev_iobufp; 4299 int idx, cnt, xri, inserted; 4300 4301 cnt = 0; 4302 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4303 qp = &phba->sli4_hba.hdwq[idx]; 4304 spin_lock_irq(&qp->io_buf_list_get_lock); 4305 spin_lock(&qp->io_buf_list_put_lock); 4306 4307 /* Take everything off the get and put lists */ 4308 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4309 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4310 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4311 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4312 cnt += qp->get_io_bufs + qp->put_io_bufs; 4313 qp->get_io_bufs = 0; 4314 qp->put_io_bufs = 0; 4315 qp->total_io_bufs = 0; 4316 spin_unlock(&qp->io_buf_list_put_lock); 4317 spin_unlock_irq(&qp->io_buf_list_get_lock); 4318 } 4319 4320 /* 4321 * Take IO buffers off blist and put on cbuf sorted by XRI. 4322 * This is because POST_SGL takes a sequential range of XRIs 4323 * to post to the firmware. 4324 */ 4325 for (idx = 0; idx < cnt; idx++) { 4326 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4327 if (!lpfc_cmd) 4328 return cnt; 4329 if (idx == 0) { 4330 list_add_tail(&lpfc_cmd->list, cbuf); 4331 continue; 4332 } 4333 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4334 inserted = 0; 4335 prev_iobufp = NULL; 4336 list_for_each_entry(iobufp, cbuf, list) { 4337 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4338 if (prev_iobufp) 4339 list_add(&lpfc_cmd->list, 4340 &prev_iobufp->list); 4341 else 4342 list_add(&lpfc_cmd->list, cbuf); 4343 inserted = 1; 4344 break; 4345 } 4346 prev_iobufp = iobufp; 4347 } 4348 if (!inserted) 4349 list_add_tail(&lpfc_cmd->list, cbuf); 4350 } 4351 return cnt; 4352 } 4353 4354 int 4355 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4356 { 4357 struct lpfc_sli4_hdw_queue *qp; 4358 struct lpfc_io_buf *lpfc_cmd; 4359 int idx, cnt; 4360 4361 qp = phba->sli4_hba.hdwq; 4362 cnt = 0; 4363 while (!list_empty(cbuf)) { 4364 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4365 list_remove_head(cbuf, lpfc_cmd, 4366 struct lpfc_io_buf, list); 4367 if (!lpfc_cmd) 4368 return cnt; 4369 cnt++; 4370 qp = &phba->sli4_hba.hdwq[idx]; 4371 lpfc_cmd->hdwq_no = idx; 4372 lpfc_cmd->hdwq = qp; 4373 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; 4374 spin_lock(&qp->io_buf_list_put_lock); 4375 list_add_tail(&lpfc_cmd->list, 4376 &qp->lpfc_io_buf_list_put); 4377 qp->put_io_bufs++; 4378 qp->total_io_bufs++; 4379 spin_unlock(&qp->io_buf_list_put_lock); 4380 } 4381 } 4382 return cnt; 4383 } 4384 4385 /** 4386 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4387 * @phba: pointer to lpfc hba data structure. 4388 * 4389 * This routine first calculates the sizes of the current els and allocated 4390 * scsi sgl lists, and then goes through all sgls to updates the physical 4391 * XRIs assigned due to port function reset. During port initialization, the 4392 * current els and allocated scsi sgl lists are 0s. 4393 * 4394 * Return codes 4395 * 0 - successful (for now, it always returns 0) 4396 **/ 4397 int 4398 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4399 { 4400 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4401 uint16_t i, lxri, els_xri_cnt; 4402 uint16_t io_xri_cnt, io_xri_max; 4403 LIST_HEAD(io_sgl_list); 4404 int rc, cnt; 4405 4406 /* 4407 * update on pci function's allocated nvme xri-sgl list 4408 */ 4409 4410 /* maximum number of xris available for nvme buffers */ 4411 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4412 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4413 phba->sli4_hba.io_xri_max = io_xri_max; 4414 4415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4416 "6074 Current allocated XRI sgl count:%d, " 4417 "maximum XRI count:%d els_xri_cnt:%d\n\n", 4418 phba->sli4_hba.io_xri_cnt, 4419 phba->sli4_hba.io_xri_max, 4420 els_xri_cnt); 4421 4422 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4423 4424 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4425 /* max nvme xri shrunk below the allocated nvme buffers */ 4426 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4427 phba->sli4_hba.io_xri_max; 4428 /* release the extra allocated nvme buffers */ 4429 for (i = 0; i < io_xri_cnt; i++) { 4430 list_remove_head(&io_sgl_list, lpfc_ncmd, 4431 struct lpfc_io_buf, list); 4432 if (lpfc_ncmd) { 4433 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4434 lpfc_ncmd->data, 4435 lpfc_ncmd->dma_handle); 4436 kfree(lpfc_ncmd); 4437 } 4438 } 4439 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4440 } 4441 4442 /* update xris associated to remaining allocated nvme buffers */ 4443 lpfc_ncmd = NULL; 4444 lpfc_ncmd_next = NULL; 4445 phba->sli4_hba.io_xri_cnt = cnt; 4446 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4447 &io_sgl_list, list) { 4448 lxri = lpfc_sli4_next_xritag(phba); 4449 if (lxri == NO_XRI) { 4450 lpfc_printf_log(phba, KERN_ERR, 4451 LOG_TRACE_EVENT, 4452 "6075 Failed to allocate xri for " 4453 "nvme buffer\n"); 4454 rc = -ENOMEM; 4455 goto out_free_mem; 4456 } 4457 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4458 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4459 } 4460 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4461 return 0; 4462 4463 out_free_mem: 4464 lpfc_io_free(phba); 4465 return rc; 4466 } 4467 4468 /** 4469 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4470 * @phba: Pointer to lpfc hba data structure. 4471 * @num_to_alloc: The requested number of buffers to allocate. 4472 * 4473 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4474 * the nvme buffer contains all the necessary information needed to initiate 4475 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4476 * them on a list, it post them to the port by using SGL block post. 4477 * 4478 * Return codes: 4479 * int - number of IO buffers that were allocated and posted. 4480 * 0 = failure, less than num_to_alloc is a partial failure. 4481 **/ 4482 int 4483 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4484 { 4485 struct lpfc_io_buf *lpfc_ncmd; 4486 struct lpfc_iocbq *pwqeq; 4487 uint16_t iotag, lxri = 0; 4488 int bcnt, num_posted; 4489 LIST_HEAD(prep_nblist); 4490 LIST_HEAD(post_nblist); 4491 LIST_HEAD(nvme_nblist); 4492 4493 phba->sli4_hba.io_xri_cnt = 0; 4494 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4495 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4496 if (!lpfc_ncmd) 4497 break; 4498 /* 4499 * Get memory from the pci pool to map the virt space to 4500 * pci bus space for an I/O. The DMA buffer includes the 4501 * number of SGE's necessary to support the sg_tablesize. 4502 */ 4503 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4504 GFP_KERNEL, 4505 &lpfc_ncmd->dma_handle); 4506 if (!lpfc_ncmd->data) { 4507 kfree(lpfc_ncmd); 4508 break; 4509 } 4510 4511 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4512 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4513 } else { 4514 /* 4515 * 4K Page alignment is CRITICAL to BlockGuard, double 4516 * check to be sure. 4517 */ 4518 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4519 (((unsigned long)(lpfc_ncmd->data) & 4520 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4521 lpfc_printf_log(phba, KERN_ERR, 4522 LOG_TRACE_EVENT, 4523 "3369 Memory alignment err: " 4524 "addr=%lx\n", 4525 (unsigned long)lpfc_ncmd->data); 4526 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4527 lpfc_ncmd->data, 4528 lpfc_ncmd->dma_handle); 4529 kfree(lpfc_ncmd); 4530 break; 4531 } 4532 } 4533 4534 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4535 4536 lxri = lpfc_sli4_next_xritag(phba); 4537 if (lxri == NO_XRI) { 4538 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4539 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4540 kfree(lpfc_ncmd); 4541 break; 4542 } 4543 pwqeq = &lpfc_ncmd->cur_iocbq; 4544 4545 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4546 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4547 if (iotag == 0) { 4548 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4549 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4550 kfree(lpfc_ncmd); 4551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4552 "6121 Failed to allocate IOTAG for" 4553 " XRI:0x%x\n", lxri); 4554 lpfc_sli4_free_xri(phba, lxri); 4555 break; 4556 } 4557 pwqeq->sli4_lxritag = lxri; 4558 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4559 4560 /* Initialize local short-hand pointers. */ 4561 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4562 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4563 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; 4564 spin_lock_init(&lpfc_ncmd->buf_lock); 4565 4566 /* add the nvme buffer to a post list */ 4567 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4568 phba->sli4_hba.io_xri_cnt++; 4569 } 4570 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4571 "6114 Allocate %d out of %d requested new NVME " 4572 "buffers of size x%zu bytes\n", bcnt, num_to_alloc, 4573 sizeof(*lpfc_ncmd)); 4574 4575 4576 /* post the list of nvme buffer sgls to port if available */ 4577 if (!list_empty(&post_nblist)) 4578 num_posted = lpfc_sli4_post_io_sgl_list( 4579 phba, &post_nblist, bcnt); 4580 else 4581 num_posted = 0; 4582 4583 return num_posted; 4584 } 4585 4586 static uint64_t 4587 lpfc_get_wwpn(struct lpfc_hba *phba) 4588 { 4589 uint64_t wwn; 4590 int rc; 4591 LPFC_MBOXQ_t *mboxq; 4592 MAILBOX_t *mb; 4593 4594 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4595 GFP_KERNEL); 4596 if (!mboxq) 4597 return (uint64_t)-1; 4598 4599 /* First get WWN of HBA instance */ 4600 lpfc_read_nv(phba, mboxq); 4601 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4602 if (rc != MBX_SUCCESS) { 4603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4604 "6019 Mailbox failed , mbxCmd x%x " 4605 "READ_NV, mbxStatus x%x\n", 4606 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4607 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4608 mempool_free(mboxq, phba->mbox_mem_pool); 4609 return (uint64_t) -1; 4610 } 4611 mb = &mboxq->u.mb; 4612 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4613 /* wwn is WWPN of HBA instance */ 4614 mempool_free(mboxq, phba->mbox_mem_pool); 4615 if (phba->sli_rev == LPFC_SLI_REV4) 4616 return be64_to_cpu(wwn); 4617 else 4618 return rol64(wwn, 32); 4619 } 4620 4621 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba) 4622 { 4623 if (phba->sli_rev == LPFC_SLI_REV4) 4624 if (phba->cfg_xpsgl && !phba->nvmet_support) 4625 return LPFC_MAX_SG_TABLESIZE; 4626 else 4627 return phba->cfg_scsi_seg_cnt; 4628 else 4629 return phba->cfg_sg_seg_cnt; 4630 } 4631 4632 /** 4633 * lpfc_vmid_res_alloc - Allocates resources for VMID 4634 * @phba: pointer to lpfc hba data structure. 4635 * @vport: pointer to vport data structure 4636 * 4637 * This routine allocated the resources needed for the VMID. 4638 * 4639 * Return codes 4640 * 0 on Success 4641 * Non-0 on Failure 4642 */ 4643 static int 4644 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4645 { 4646 /* VMID feature is supported only on SLI4 */ 4647 if (phba->sli_rev == LPFC_SLI_REV3) { 4648 phba->cfg_vmid_app_header = 0; 4649 phba->cfg_vmid_priority_tagging = 0; 4650 } 4651 4652 if (lpfc_is_vmid_enabled(phba)) { 4653 vport->vmid = 4654 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4655 GFP_KERNEL); 4656 if (!vport->vmid) 4657 return -ENOMEM; 4658 4659 rwlock_init(&vport->vmid_lock); 4660 4661 /* Set the VMID parameters for the vport */ 4662 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4663 vport->vmid_inactivity_timeout = 4664 phba->cfg_vmid_inactivity_timeout; 4665 vport->max_vmid = phba->cfg_max_vmid; 4666 vport->cur_vmid_cnt = 0; 4667 4668 vport->vmid_priority_range = bitmap_zalloc 4669 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4670 4671 if (!vport->vmid_priority_range) { 4672 kfree(vport->vmid); 4673 return -ENOMEM; 4674 } 4675 4676 hash_init(vport->hash_table); 4677 } 4678 return 0; 4679 } 4680 4681 /** 4682 * lpfc_create_port - Create an FC port 4683 * @phba: pointer to lpfc hba data structure. 4684 * @instance: a unique integer ID to this FC port. 4685 * @dev: pointer to the device data structure. 4686 * 4687 * This routine creates a FC port for the upper layer protocol. The FC port 4688 * can be created on top of either a physical port or a virtual port provided 4689 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4690 * and associates the FC port created before adding the shost into the SCSI 4691 * layer. 4692 * 4693 * Return codes 4694 * @vport - pointer to the virtual N_Port data structure. 4695 * NULL - port create failed. 4696 **/ 4697 struct lpfc_vport * 4698 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4699 { 4700 struct lpfc_vport *vport; 4701 struct Scsi_Host *shost = NULL; 4702 struct scsi_host_template *template; 4703 int error = 0; 4704 int i; 4705 uint64_t wwn; 4706 bool use_no_reset_hba = false; 4707 int rc; 4708 4709 if (lpfc_no_hba_reset_cnt) { 4710 if (phba->sli_rev < LPFC_SLI_REV4 && 4711 dev == &phba->pcidev->dev) { 4712 /* Reset the port first */ 4713 lpfc_sli_brdrestart(phba); 4714 rc = lpfc_sli_chipset_init(phba); 4715 if (rc) 4716 return NULL; 4717 } 4718 wwn = lpfc_get_wwpn(phba); 4719 } 4720 4721 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4722 if (wwn == lpfc_no_hba_reset[i]) { 4723 lpfc_printf_log(phba, KERN_ERR, 4724 LOG_TRACE_EVENT, 4725 "6020 Setting use_no_reset port=%llx\n", 4726 wwn); 4727 use_no_reset_hba = true; 4728 break; 4729 } 4730 } 4731 4732 /* Seed template for SCSI host registration */ 4733 if (dev == &phba->pcidev->dev) { 4734 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4735 /* Seed physical port template */ 4736 template = &lpfc_template; 4737 4738 if (use_no_reset_hba) 4739 /* template is for a no reset SCSI Host */ 4740 template->eh_host_reset_handler = NULL; 4741 4742 /* Seed updated value of sg_tablesize */ 4743 template->sg_tablesize = lpfc_get_sg_tablesize(phba); 4744 } else { 4745 /* NVMET is for physical port only */ 4746 template = &lpfc_template_nvme; 4747 } 4748 } else { 4749 /* Seed vport template */ 4750 template = &lpfc_vport_template; 4751 4752 /* Seed updated value of sg_tablesize */ 4753 template->sg_tablesize = lpfc_get_sg_tablesize(phba); 4754 } 4755 4756 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4757 if (!shost) 4758 goto out; 4759 4760 vport = (struct lpfc_vport *) shost->hostdata; 4761 vport->phba = phba; 4762 vport->load_flag |= FC_LOADING; 4763 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4764 vport->fc_rscn_flush = 0; 4765 lpfc_get_vport_cfgparam(vport); 4766 4767 /* Adjust value in vport */ 4768 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4769 4770 shost->unique_id = instance; 4771 shost->max_id = LPFC_MAX_TARGET; 4772 shost->max_lun = vport->cfg_max_luns; 4773 shost->this_id = -1; 4774 shost->max_cmd_len = 16; 4775 4776 if (phba->sli_rev == LPFC_SLI_REV4) { 4777 if (!phba->cfg_fcp_mq_threshold || 4778 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4779 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4780 4781 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4782 phba->cfg_fcp_mq_threshold); 4783 4784 shost->dma_boundary = 4785 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4786 } else 4787 /* SLI-3 has a limited number of hardware queues (3), 4788 * thus there is only one for FCP processing. 4789 */ 4790 shost->nr_hw_queues = 1; 4791 4792 /* 4793 * Set initial can_queue value since 0 is no longer supported and 4794 * scsi_add_host will fail. This will be adjusted later based on the 4795 * max xri value determined in hba setup. 4796 */ 4797 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4798 if (dev != &phba->pcidev->dev) { 4799 shost->transportt = lpfc_vport_transport_template; 4800 vport->port_type = LPFC_NPIV_PORT; 4801 } else { 4802 shost->transportt = lpfc_transport_template; 4803 vport->port_type = LPFC_PHYSICAL_PORT; 4804 } 4805 4806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4807 "9081 CreatePort TMPLATE type %x TBLsize %d " 4808 "SEGcnt %d/%d\n", 4809 vport->port_type, shost->sg_tablesize, 4810 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4811 4812 /* Allocate the resources for VMID */ 4813 rc = lpfc_vmid_res_alloc(phba, vport); 4814 4815 if (rc) 4816 goto out_put_shost; 4817 4818 /* Initialize all internally managed lists. */ 4819 INIT_LIST_HEAD(&vport->fc_nodes); 4820 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4821 spin_lock_init(&vport->work_port_lock); 4822 4823 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4824 4825 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4826 4827 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4828 4829 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4830 lpfc_setup_bg(phba, shost); 4831 4832 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4833 if (error) 4834 goto out_free_vmid; 4835 4836 spin_lock_irq(&phba->port_list_lock); 4837 list_add_tail(&vport->listentry, &phba->port_list); 4838 spin_unlock_irq(&phba->port_list_lock); 4839 return vport; 4840 4841 out_free_vmid: 4842 kfree(vport->vmid); 4843 bitmap_free(vport->vmid_priority_range); 4844 out_put_shost: 4845 scsi_host_put(shost); 4846 out: 4847 return NULL; 4848 } 4849 4850 /** 4851 * destroy_port - destroy an FC port 4852 * @vport: pointer to an lpfc virtual N_Port data structure. 4853 * 4854 * This routine destroys a FC port from the upper layer protocol. All the 4855 * resources associated with the port are released. 4856 **/ 4857 void 4858 destroy_port(struct lpfc_vport *vport) 4859 { 4860 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4861 struct lpfc_hba *phba = vport->phba; 4862 4863 lpfc_debugfs_terminate(vport); 4864 fc_remove_host(shost); 4865 scsi_remove_host(shost); 4866 4867 spin_lock_irq(&phba->port_list_lock); 4868 list_del_init(&vport->listentry); 4869 spin_unlock_irq(&phba->port_list_lock); 4870 4871 lpfc_cleanup(vport); 4872 return; 4873 } 4874 4875 /** 4876 * lpfc_get_instance - Get a unique integer ID 4877 * 4878 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4879 * uses the kernel idr facility to perform the task. 4880 * 4881 * Return codes: 4882 * instance - a unique integer ID allocated as the new instance. 4883 * -1 - lpfc get instance failed. 4884 **/ 4885 int 4886 lpfc_get_instance(void) 4887 { 4888 int ret; 4889 4890 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4891 return ret < 0 ? -1 : ret; 4892 } 4893 4894 /** 4895 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4896 * @shost: pointer to SCSI host data structure. 4897 * @time: elapsed time of the scan in jiffies. 4898 * 4899 * This routine is called by the SCSI layer with a SCSI host to determine 4900 * whether the scan host is finished. 4901 * 4902 * Note: there is no scan_start function as adapter initialization will have 4903 * asynchronously kicked off the link initialization. 4904 * 4905 * Return codes 4906 * 0 - SCSI host scan is not over yet. 4907 * 1 - SCSI host scan is over. 4908 **/ 4909 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4910 { 4911 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4912 struct lpfc_hba *phba = vport->phba; 4913 int stat = 0; 4914 4915 spin_lock_irq(shost->host_lock); 4916 4917 if (vport->load_flag & FC_UNLOADING) { 4918 stat = 1; 4919 goto finished; 4920 } 4921 if (time >= msecs_to_jiffies(30 * 1000)) { 4922 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4923 "0461 Scanning longer than 30 " 4924 "seconds. Continuing initialization\n"); 4925 stat = 1; 4926 goto finished; 4927 } 4928 if (time >= msecs_to_jiffies(15 * 1000) && 4929 phba->link_state <= LPFC_LINK_DOWN) { 4930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4931 "0465 Link down longer than 15 " 4932 "seconds. Continuing initialization\n"); 4933 stat = 1; 4934 goto finished; 4935 } 4936 4937 if (vport->port_state != LPFC_VPORT_READY) 4938 goto finished; 4939 if (vport->num_disc_nodes || vport->fc_prli_sent) 4940 goto finished; 4941 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4942 goto finished; 4943 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4944 goto finished; 4945 4946 stat = 1; 4947 4948 finished: 4949 spin_unlock_irq(shost->host_lock); 4950 return stat; 4951 } 4952 4953 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4954 { 4955 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4956 struct lpfc_hba *phba = vport->phba; 4957 4958 fc_host_supported_speeds(shost) = 0; 4959 /* 4960 * Avoid reporting supported link speed for FCoE as it can't be 4961 * controlled via FCoE. 4962 */ 4963 if (phba->hba_flag & HBA_FCOE_MODE) 4964 return; 4965 4966 if (phba->lmt & LMT_256Gb) 4967 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4968 if (phba->lmt & LMT_128Gb) 4969 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4970 if (phba->lmt & LMT_64Gb) 4971 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4972 if (phba->lmt & LMT_32Gb) 4973 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4974 if (phba->lmt & LMT_16Gb) 4975 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4976 if (phba->lmt & LMT_10Gb) 4977 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4978 if (phba->lmt & LMT_8Gb) 4979 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4980 if (phba->lmt & LMT_4Gb) 4981 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4982 if (phba->lmt & LMT_2Gb) 4983 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4984 if (phba->lmt & LMT_1Gb) 4985 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4986 } 4987 4988 /** 4989 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4990 * @shost: pointer to SCSI host data structure. 4991 * 4992 * This routine initializes a given SCSI host attributes on a FC port. The 4993 * SCSI host can be either on top of a physical port or a virtual port. 4994 **/ 4995 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4996 { 4997 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4998 struct lpfc_hba *phba = vport->phba; 4999 /* 5000 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 5001 */ 5002 5003 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5004 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5005 fc_host_supported_classes(shost) = FC_COS_CLASS3; 5006 5007 memset(fc_host_supported_fc4s(shost), 0, 5008 sizeof(fc_host_supported_fc4s(shost))); 5009 fc_host_supported_fc4s(shost)[2] = 1; 5010 fc_host_supported_fc4s(shost)[7] = 1; 5011 5012 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 5013 sizeof fc_host_symbolic_name(shost)); 5014 5015 lpfc_host_supported_speeds_set(shost); 5016 5017 fc_host_maxframe_size(shost) = 5018 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 5019 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 5020 5021 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 5022 5023 /* This value is also unchanging */ 5024 memset(fc_host_active_fc4s(shost), 0, 5025 sizeof(fc_host_active_fc4s(shost))); 5026 fc_host_active_fc4s(shost)[2] = 1; 5027 fc_host_active_fc4s(shost)[7] = 1; 5028 5029 fc_host_max_npiv_vports(shost) = phba->max_vpi; 5030 spin_lock_irq(shost->host_lock); 5031 vport->load_flag &= ~FC_LOADING; 5032 spin_unlock_irq(shost->host_lock); 5033 } 5034 5035 /** 5036 * lpfc_stop_port_s3 - Stop SLI3 device port 5037 * @phba: pointer to lpfc hba data structure. 5038 * 5039 * This routine is invoked to stop an SLI3 device port, it stops the device 5040 * from generating interrupts and stops the device driver's timers for the 5041 * device. 5042 **/ 5043 static void 5044 lpfc_stop_port_s3(struct lpfc_hba *phba) 5045 { 5046 /* Clear all interrupt enable conditions */ 5047 writel(0, phba->HCregaddr); 5048 readl(phba->HCregaddr); /* flush */ 5049 /* Clear all pending interrupts */ 5050 writel(0xffffffff, phba->HAregaddr); 5051 readl(phba->HAregaddr); /* flush */ 5052 5053 /* Reset some HBA SLI setup states */ 5054 lpfc_stop_hba_timers(phba); 5055 phba->pport->work_port_events = 0; 5056 } 5057 5058 /** 5059 * lpfc_stop_port_s4 - Stop SLI4 device port 5060 * @phba: pointer to lpfc hba data structure. 5061 * 5062 * This routine is invoked to stop an SLI4 device port, it stops the device 5063 * from generating interrupts and stops the device driver's timers for the 5064 * device. 5065 **/ 5066 static void 5067 lpfc_stop_port_s4(struct lpfc_hba *phba) 5068 { 5069 /* Reset some HBA SLI4 setup states */ 5070 lpfc_stop_hba_timers(phba); 5071 if (phba->pport) 5072 phba->pport->work_port_events = 0; 5073 phba->sli4_hba.intr_enable = 0; 5074 } 5075 5076 /** 5077 * lpfc_stop_port - Wrapper function for stopping hba port 5078 * @phba: Pointer to HBA context object. 5079 * 5080 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 5081 * the API jump table function pointer from the lpfc_hba struct. 5082 **/ 5083 void 5084 lpfc_stop_port(struct lpfc_hba *phba) 5085 { 5086 phba->lpfc_stop_port(phba); 5087 5088 if (phba->wq) 5089 flush_workqueue(phba->wq); 5090 } 5091 5092 /** 5093 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 5094 * @phba: Pointer to hba for which this call is being executed. 5095 * 5096 * This routine starts the timer waiting for the FCF rediscovery to complete. 5097 **/ 5098 void 5099 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 5100 { 5101 unsigned long fcf_redisc_wait_tmo = 5102 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 5103 /* Start fcf rediscovery wait period timer */ 5104 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 5105 spin_lock_irq(&phba->hbalock); 5106 /* Allow action to new fcf asynchronous event */ 5107 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 5108 /* Mark the FCF rediscovery pending state */ 5109 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 5110 spin_unlock_irq(&phba->hbalock); 5111 } 5112 5113 /** 5114 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5115 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5116 * 5117 * This routine is invoked when waiting for FCF table rediscover has been 5118 * timed out. If new FCF record(s) has (have) been discovered during the 5119 * wait period, a new FCF event shall be added to the FCOE async event 5120 * list, and then worker thread shall be waked up for processing from the 5121 * worker thread context. 5122 **/ 5123 static void 5124 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5125 { 5126 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5127 5128 /* Don't send FCF rediscovery event if timer cancelled */ 5129 spin_lock_irq(&phba->hbalock); 5130 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5131 spin_unlock_irq(&phba->hbalock); 5132 return; 5133 } 5134 /* Clear FCF rediscovery timer pending flag */ 5135 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5136 /* FCF rediscovery event to worker thread */ 5137 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5138 spin_unlock_irq(&phba->hbalock); 5139 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5140 "2776 FCF rediscover quiescent timer expired\n"); 5141 /* wake up worker thread */ 5142 lpfc_worker_wake_up(phba); 5143 } 5144 5145 /** 5146 * lpfc_vmid_poll - VMID timeout detection 5147 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5148 * 5149 * This routine is invoked when there is no I/O on by a VM for the specified 5150 * amount of time. When this situation is detected, the VMID has to be 5151 * deregistered from the switch and all the local resources freed. The VMID 5152 * will be reassigned to the VM once the I/O begins. 5153 **/ 5154 static void 5155 lpfc_vmid_poll(struct timer_list *t) 5156 { 5157 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5158 u32 wake_up = 0; 5159 5160 /* check if there is a need to issue QFPA */ 5161 if (phba->pport->vmid_priority_tagging) { 5162 wake_up = 1; 5163 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5164 } 5165 5166 /* Is the vmid inactivity timer enabled */ 5167 if (phba->pport->vmid_inactivity_timeout || 5168 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5169 wake_up = 1; 5170 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5171 } 5172 5173 if (wake_up) 5174 lpfc_worker_wake_up(phba); 5175 5176 /* restart the timer for the next iteration */ 5177 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5178 LPFC_VMID_TIMER)); 5179 } 5180 5181 /** 5182 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5183 * @phba: pointer to lpfc hba data structure. 5184 * @acqe_link: pointer to the async link completion queue entry. 5185 * 5186 * This routine is to parse the SLI4 link-attention link fault code. 5187 **/ 5188 static void 5189 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5190 struct lpfc_acqe_link *acqe_link) 5191 { 5192 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5193 case LPFC_ASYNC_LINK_FAULT_NONE: 5194 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5195 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5196 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5197 break; 5198 default: 5199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5200 "0398 Unknown link fault code: x%x\n", 5201 bf_get(lpfc_acqe_link_fault, acqe_link)); 5202 break; 5203 } 5204 } 5205 5206 /** 5207 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5208 * @phba: pointer to lpfc hba data structure. 5209 * @acqe_link: pointer to the async link completion queue entry. 5210 * 5211 * This routine is to parse the SLI4 link attention type and translate it 5212 * into the base driver's link attention type coding. 5213 * 5214 * Return: Link attention type in terms of base driver's coding. 5215 **/ 5216 static uint8_t 5217 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5218 struct lpfc_acqe_link *acqe_link) 5219 { 5220 uint8_t att_type; 5221 5222 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5223 case LPFC_ASYNC_LINK_STATUS_DOWN: 5224 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5225 att_type = LPFC_ATT_LINK_DOWN; 5226 break; 5227 case LPFC_ASYNC_LINK_STATUS_UP: 5228 /* Ignore physical link up events - wait for logical link up */ 5229 att_type = LPFC_ATT_RESERVED; 5230 break; 5231 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5232 att_type = LPFC_ATT_LINK_UP; 5233 break; 5234 default: 5235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5236 "0399 Invalid link attention type: x%x\n", 5237 bf_get(lpfc_acqe_link_status, acqe_link)); 5238 att_type = LPFC_ATT_RESERVED; 5239 break; 5240 } 5241 return att_type; 5242 } 5243 5244 /** 5245 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5246 * @phba: pointer to lpfc hba data structure. 5247 * 5248 * This routine is to get an SLI3 FC port's link speed in Mbps. 5249 * 5250 * Return: link speed in terms of Mbps. 5251 **/ 5252 uint32_t 5253 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5254 { 5255 uint32_t link_speed; 5256 5257 if (!lpfc_is_link_up(phba)) 5258 return 0; 5259 5260 if (phba->sli_rev <= LPFC_SLI_REV3) { 5261 switch (phba->fc_linkspeed) { 5262 case LPFC_LINK_SPEED_1GHZ: 5263 link_speed = 1000; 5264 break; 5265 case LPFC_LINK_SPEED_2GHZ: 5266 link_speed = 2000; 5267 break; 5268 case LPFC_LINK_SPEED_4GHZ: 5269 link_speed = 4000; 5270 break; 5271 case LPFC_LINK_SPEED_8GHZ: 5272 link_speed = 8000; 5273 break; 5274 case LPFC_LINK_SPEED_10GHZ: 5275 link_speed = 10000; 5276 break; 5277 case LPFC_LINK_SPEED_16GHZ: 5278 link_speed = 16000; 5279 break; 5280 default: 5281 link_speed = 0; 5282 } 5283 } else { 5284 if (phba->sli4_hba.link_state.logical_speed) 5285 link_speed = 5286 phba->sli4_hba.link_state.logical_speed; 5287 else 5288 link_speed = phba->sli4_hba.link_state.speed; 5289 } 5290 return link_speed; 5291 } 5292 5293 /** 5294 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5295 * @phba: pointer to lpfc hba data structure. 5296 * @evt_code: asynchronous event code. 5297 * @speed_code: asynchronous event link speed code. 5298 * 5299 * This routine is to parse the giving SLI4 async event link speed code into 5300 * value of Mbps for the link speed. 5301 * 5302 * Return: link speed in terms of Mbps. 5303 **/ 5304 static uint32_t 5305 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5306 uint8_t speed_code) 5307 { 5308 uint32_t port_speed; 5309 5310 switch (evt_code) { 5311 case LPFC_TRAILER_CODE_LINK: 5312 switch (speed_code) { 5313 case LPFC_ASYNC_LINK_SPEED_ZERO: 5314 port_speed = 0; 5315 break; 5316 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5317 port_speed = 10; 5318 break; 5319 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5320 port_speed = 100; 5321 break; 5322 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5323 port_speed = 1000; 5324 break; 5325 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5326 port_speed = 10000; 5327 break; 5328 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5329 port_speed = 20000; 5330 break; 5331 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5332 port_speed = 25000; 5333 break; 5334 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5335 port_speed = 40000; 5336 break; 5337 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5338 port_speed = 100000; 5339 break; 5340 default: 5341 port_speed = 0; 5342 } 5343 break; 5344 case LPFC_TRAILER_CODE_FC: 5345 switch (speed_code) { 5346 case LPFC_FC_LA_SPEED_UNKNOWN: 5347 port_speed = 0; 5348 break; 5349 case LPFC_FC_LA_SPEED_1G: 5350 port_speed = 1000; 5351 break; 5352 case LPFC_FC_LA_SPEED_2G: 5353 port_speed = 2000; 5354 break; 5355 case LPFC_FC_LA_SPEED_4G: 5356 port_speed = 4000; 5357 break; 5358 case LPFC_FC_LA_SPEED_8G: 5359 port_speed = 8000; 5360 break; 5361 case LPFC_FC_LA_SPEED_10G: 5362 port_speed = 10000; 5363 break; 5364 case LPFC_FC_LA_SPEED_16G: 5365 port_speed = 16000; 5366 break; 5367 case LPFC_FC_LA_SPEED_32G: 5368 port_speed = 32000; 5369 break; 5370 case LPFC_FC_LA_SPEED_64G: 5371 port_speed = 64000; 5372 break; 5373 case LPFC_FC_LA_SPEED_128G: 5374 port_speed = 128000; 5375 break; 5376 case LPFC_FC_LA_SPEED_256G: 5377 port_speed = 256000; 5378 break; 5379 default: 5380 port_speed = 0; 5381 } 5382 break; 5383 default: 5384 port_speed = 0; 5385 } 5386 return port_speed; 5387 } 5388 5389 /** 5390 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5391 * @phba: pointer to lpfc hba data structure. 5392 * @acqe_link: pointer to the async link completion queue entry. 5393 * 5394 * This routine is to handle the SLI4 asynchronous FCoE link event. 5395 **/ 5396 static void 5397 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5398 struct lpfc_acqe_link *acqe_link) 5399 { 5400 LPFC_MBOXQ_t *pmb; 5401 MAILBOX_t *mb; 5402 struct lpfc_mbx_read_top *la; 5403 uint8_t att_type; 5404 int rc; 5405 5406 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5407 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5408 return; 5409 phba->fcoe_eventtag = acqe_link->event_tag; 5410 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5411 if (!pmb) { 5412 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5413 "0395 The mboxq allocation failed\n"); 5414 return; 5415 } 5416 5417 rc = lpfc_mbox_rsrc_prep(phba, pmb); 5418 if (rc) { 5419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5420 "0396 mailbox allocation failed\n"); 5421 goto out_free_pmb; 5422 } 5423 5424 /* Cleanup any outstanding ELS commands */ 5425 lpfc_els_flush_all_cmd(phba); 5426 5427 /* Block ELS IOCBs until we have done process link event */ 5428 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5429 5430 /* Update link event statistics */ 5431 phba->sli.slistat.link_event++; 5432 5433 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5434 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 5435 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5436 pmb->vport = phba->pport; 5437 5438 /* Keep the link status for extra SLI4 state machine reference */ 5439 phba->sli4_hba.link_state.speed = 5440 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5441 bf_get(lpfc_acqe_link_speed, acqe_link)); 5442 phba->sli4_hba.link_state.duplex = 5443 bf_get(lpfc_acqe_link_duplex, acqe_link); 5444 phba->sli4_hba.link_state.status = 5445 bf_get(lpfc_acqe_link_status, acqe_link); 5446 phba->sli4_hba.link_state.type = 5447 bf_get(lpfc_acqe_link_type, acqe_link); 5448 phba->sli4_hba.link_state.number = 5449 bf_get(lpfc_acqe_link_number, acqe_link); 5450 phba->sli4_hba.link_state.fault = 5451 bf_get(lpfc_acqe_link_fault, acqe_link); 5452 phba->sli4_hba.link_state.logical_speed = 5453 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5454 5455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5456 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5457 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5458 "Logical speed:%dMbps Fault:%d\n", 5459 phba->sli4_hba.link_state.speed, 5460 phba->sli4_hba.link_state.topology, 5461 phba->sli4_hba.link_state.status, 5462 phba->sli4_hba.link_state.type, 5463 phba->sli4_hba.link_state.number, 5464 phba->sli4_hba.link_state.logical_speed, 5465 phba->sli4_hba.link_state.fault); 5466 /* 5467 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5468 * topology info. Note: Optional for non FC-AL ports. 5469 */ 5470 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5471 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5472 if (rc == MBX_NOT_FINISHED) 5473 goto out_free_pmb; 5474 return; 5475 } 5476 /* 5477 * For FCoE Mode: fill in all the topology information we need and call 5478 * the READ_TOPOLOGY completion routine to continue without actually 5479 * sending the READ_TOPOLOGY mailbox command to the port. 5480 */ 5481 /* Initialize completion status */ 5482 mb = &pmb->u.mb; 5483 mb->mbxStatus = MBX_SUCCESS; 5484 5485 /* Parse port fault information field */ 5486 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5487 5488 /* Parse and translate link attention fields */ 5489 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5490 la->eventTag = acqe_link->event_tag; 5491 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5492 bf_set(lpfc_mbx_read_top_link_spd, la, 5493 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5494 5495 /* Fake the the following irrelvant fields */ 5496 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5497 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5498 bf_set(lpfc_mbx_read_top_il, la, 0); 5499 bf_set(lpfc_mbx_read_top_pb, la, 0); 5500 bf_set(lpfc_mbx_read_top_fa, la, 0); 5501 bf_set(lpfc_mbx_read_top_mm, la, 0); 5502 5503 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5504 lpfc_mbx_cmpl_read_topology(phba, pmb); 5505 5506 return; 5507 5508 out_free_pmb: 5509 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5510 } 5511 5512 /** 5513 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5514 * topology. 5515 * @phba: pointer to lpfc hba data structure. 5516 * @speed_code: asynchronous event link speed code. 5517 * 5518 * This routine is to parse the giving SLI4 async event link speed code into 5519 * value of Read topology link speed. 5520 * 5521 * Return: link speed in terms of Read topology. 5522 **/ 5523 static uint8_t 5524 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5525 { 5526 uint8_t port_speed; 5527 5528 switch (speed_code) { 5529 case LPFC_FC_LA_SPEED_1G: 5530 port_speed = LPFC_LINK_SPEED_1GHZ; 5531 break; 5532 case LPFC_FC_LA_SPEED_2G: 5533 port_speed = LPFC_LINK_SPEED_2GHZ; 5534 break; 5535 case LPFC_FC_LA_SPEED_4G: 5536 port_speed = LPFC_LINK_SPEED_4GHZ; 5537 break; 5538 case LPFC_FC_LA_SPEED_8G: 5539 port_speed = LPFC_LINK_SPEED_8GHZ; 5540 break; 5541 case LPFC_FC_LA_SPEED_16G: 5542 port_speed = LPFC_LINK_SPEED_16GHZ; 5543 break; 5544 case LPFC_FC_LA_SPEED_32G: 5545 port_speed = LPFC_LINK_SPEED_32GHZ; 5546 break; 5547 case LPFC_FC_LA_SPEED_64G: 5548 port_speed = LPFC_LINK_SPEED_64GHZ; 5549 break; 5550 case LPFC_FC_LA_SPEED_128G: 5551 port_speed = LPFC_LINK_SPEED_128GHZ; 5552 break; 5553 case LPFC_FC_LA_SPEED_256G: 5554 port_speed = LPFC_LINK_SPEED_256GHZ; 5555 break; 5556 default: 5557 port_speed = 0; 5558 break; 5559 } 5560 5561 return port_speed; 5562 } 5563 5564 void 5565 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5566 { 5567 if (!phba->rx_monitor) { 5568 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5569 "4411 Rx Monitor Info is empty.\n"); 5570 } else { 5571 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0, 5572 LPFC_MAX_RXMONITOR_DUMP); 5573 } 5574 } 5575 5576 /** 5577 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5578 * @phba: pointer to lpfc hba data structure. 5579 * @dtag: FPIN descriptor received 5580 * 5581 * Increment the FPIN received counter/time when it happens. 5582 */ 5583 void 5584 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5585 { 5586 struct lpfc_cgn_info *cp; 5587 struct tm broken; 5588 struct timespec64 cur_time; 5589 u32 cnt; 5590 u32 value; 5591 5592 /* Make sure we have a congestion info buffer */ 5593 if (!phba->cgn_i) 5594 return; 5595 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5596 ktime_get_real_ts64(&cur_time); 5597 time64_to_tm(cur_time.tv_sec, 0, &broken); 5598 5599 /* Update congestion statistics */ 5600 switch (dtag) { 5601 case ELS_DTAG_LNK_INTEGRITY: 5602 cnt = le32_to_cpu(cp->link_integ_notification); 5603 cnt++; 5604 cp->link_integ_notification = cpu_to_le32(cnt); 5605 5606 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5607 cp->cgn_stat_lnk_day = broken.tm_mday; 5608 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5609 cp->cgn_stat_lnk_hour = broken.tm_hour; 5610 cp->cgn_stat_lnk_min = broken.tm_min; 5611 cp->cgn_stat_lnk_sec = broken.tm_sec; 5612 break; 5613 case ELS_DTAG_DELIVERY: 5614 cnt = le32_to_cpu(cp->delivery_notification); 5615 cnt++; 5616 cp->delivery_notification = cpu_to_le32(cnt); 5617 5618 cp->cgn_stat_del_month = broken.tm_mon + 1; 5619 cp->cgn_stat_del_day = broken.tm_mday; 5620 cp->cgn_stat_del_year = broken.tm_year - 100; 5621 cp->cgn_stat_del_hour = broken.tm_hour; 5622 cp->cgn_stat_del_min = broken.tm_min; 5623 cp->cgn_stat_del_sec = broken.tm_sec; 5624 break; 5625 case ELS_DTAG_PEER_CONGEST: 5626 cnt = le32_to_cpu(cp->cgn_peer_notification); 5627 cnt++; 5628 cp->cgn_peer_notification = cpu_to_le32(cnt); 5629 5630 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5631 cp->cgn_stat_peer_day = broken.tm_mday; 5632 cp->cgn_stat_peer_year = broken.tm_year - 100; 5633 cp->cgn_stat_peer_hour = broken.tm_hour; 5634 cp->cgn_stat_peer_min = broken.tm_min; 5635 cp->cgn_stat_peer_sec = broken.tm_sec; 5636 break; 5637 case ELS_DTAG_CONGESTION: 5638 cnt = le32_to_cpu(cp->cgn_notification); 5639 cnt++; 5640 cp->cgn_notification = cpu_to_le32(cnt); 5641 5642 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5643 cp->cgn_stat_cgn_day = broken.tm_mday; 5644 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5645 cp->cgn_stat_cgn_hour = broken.tm_hour; 5646 cp->cgn_stat_cgn_min = broken.tm_min; 5647 cp->cgn_stat_cgn_sec = broken.tm_sec; 5648 } 5649 if (phba->cgn_fpin_frequency && 5650 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5651 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5652 cp->cgn_stat_npm = value; 5653 } 5654 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5655 LPFC_CGN_CRC32_SEED); 5656 cp->cgn_info_crc = cpu_to_le32(value); 5657 } 5658 5659 /** 5660 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5661 * @phba: pointer to lpfc hba data structure. 5662 * 5663 * Save the congestion event data every minute. 5664 * On the hour collapse all the minute data into hour data. Every day 5665 * collapse all the hour data into daily data. Separate driver 5666 * and fabrc congestion event counters that will be saved out 5667 * to the registered congestion buffer every minute. 5668 */ 5669 static void 5670 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5671 { 5672 struct lpfc_cgn_info *cp; 5673 struct tm broken; 5674 struct timespec64 cur_time; 5675 uint32_t i, index; 5676 uint16_t value, mvalue; 5677 uint64_t bps; 5678 uint32_t mbps; 5679 uint32_t dvalue, wvalue, lvalue, avalue; 5680 uint64_t latsum; 5681 __le16 *ptr; 5682 __le32 *lptr; 5683 __le16 *mptr; 5684 5685 /* Make sure we have a congestion info buffer */ 5686 if (!phba->cgn_i) 5687 return; 5688 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5689 5690 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5691 return; 5692 phba->cgn_evt_timestamp = jiffies + 5693 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5694 phba->cgn_evt_minute++; 5695 5696 /* We should get to this point in the routine on 1 minute intervals */ 5697 5698 ktime_get_real_ts64(&cur_time); 5699 time64_to_tm(cur_time.tv_sec, 0, &broken); 5700 5701 if (phba->cgn_fpin_frequency && 5702 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5703 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5704 cp->cgn_stat_npm = value; 5705 } 5706 5707 /* Read and clear the latency counters for this minute */ 5708 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5709 latsum = atomic64_read(&phba->cgn_latency_evt); 5710 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5711 atomic64_set(&phba->cgn_latency_evt, 0); 5712 5713 /* We need to store MB/sec bandwidth in the congestion information. 5714 * block_cnt is count of 512 byte blocks for the entire minute, 5715 * bps will get bytes per sec before finally converting to MB/sec. 5716 */ 5717 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5718 phba->rx_block_cnt = 0; 5719 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5720 5721 /* Every minute */ 5722 /* cgn parameters */ 5723 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5724 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5725 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5726 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5727 5728 /* Fill in default LUN qdepth */ 5729 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5730 cp->cgn_lunq = cpu_to_le16(value); 5731 5732 /* Record congestion buffer info - every minute 5733 * cgn_driver_evt_cnt (Driver events) 5734 * cgn_fabric_warn_cnt (Congestion Warnings) 5735 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5736 * cgn_fabric_alarm_cnt (Congestion Alarms) 5737 */ 5738 index = ++cp->cgn_index_minute; 5739 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5740 cp->cgn_index_minute = 0; 5741 index = 0; 5742 } 5743 5744 /* Get the number of driver events in this sample and reset counter */ 5745 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5746 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5747 5748 /* Get the number of warning events - FPIN and Signal for this minute */ 5749 wvalue = 0; 5750 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5751 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5752 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5753 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5754 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5755 5756 /* Get the number of alarm events - FPIN and Signal for this minute */ 5757 avalue = 0; 5758 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5759 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5760 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5761 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5762 5763 /* Collect the driver, warning, alarm and latency counts for this 5764 * minute into the driver congestion buffer. 5765 */ 5766 ptr = &cp->cgn_drvr_min[index]; 5767 value = (uint16_t)dvalue; 5768 *ptr = cpu_to_le16(value); 5769 5770 ptr = &cp->cgn_warn_min[index]; 5771 value = (uint16_t)wvalue; 5772 *ptr = cpu_to_le16(value); 5773 5774 ptr = &cp->cgn_alarm_min[index]; 5775 value = (uint16_t)avalue; 5776 *ptr = cpu_to_le16(value); 5777 5778 lptr = &cp->cgn_latency_min[index]; 5779 if (lvalue) { 5780 lvalue = (uint32_t)div_u64(latsum, lvalue); 5781 *lptr = cpu_to_le32(lvalue); 5782 } else { 5783 *lptr = 0; 5784 } 5785 5786 /* Collect the bandwidth value into the driver's congesion buffer. */ 5787 mptr = &cp->cgn_bw_min[index]; 5788 *mptr = cpu_to_le16(mvalue); 5789 5790 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5791 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5792 index, dvalue, wvalue, *lptr, mvalue, avalue); 5793 5794 /* Every hour */ 5795 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5796 /* Record congestion buffer info - every hour 5797 * Collapse all minutes into an hour 5798 */ 5799 index = ++cp->cgn_index_hour; 5800 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5801 cp->cgn_index_hour = 0; 5802 index = 0; 5803 } 5804 5805 dvalue = 0; 5806 wvalue = 0; 5807 lvalue = 0; 5808 avalue = 0; 5809 mvalue = 0; 5810 mbps = 0; 5811 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5812 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5813 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5814 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5815 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5816 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5817 } 5818 if (lvalue) /* Avg of latency averages */ 5819 lvalue /= LPFC_MIN_HOUR; 5820 if (mbps) /* Avg of Bandwidth averages */ 5821 mvalue = mbps / LPFC_MIN_HOUR; 5822 5823 lptr = &cp->cgn_drvr_hr[index]; 5824 *lptr = cpu_to_le32(dvalue); 5825 lptr = &cp->cgn_warn_hr[index]; 5826 *lptr = cpu_to_le32(wvalue); 5827 lptr = &cp->cgn_latency_hr[index]; 5828 *lptr = cpu_to_le32(lvalue); 5829 mptr = &cp->cgn_bw_hr[index]; 5830 *mptr = cpu_to_le16(mvalue); 5831 lptr = &cp->cgn_alarm_hr[index]; 5832 *lptr = cpu_to_le32(avalue); 5833 5834 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5835 "2419 Congestion Info - hour " 5836 "(%d): %d %d %d %d %d\n", 5837 index, dvalue, wvalue, lvalue, mvalue, avalue); 5838 } 5839 5840 /* Every day */ 5841 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5842 /* Record congestion buffer info - every hour 5843 * Collapse all hours into a day. Rotate days 5844 * after LPFC_MAX_CGN_DAYS. 5845 */ 5846 index = ++cp->cgn_index_day; 5847 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5848 cp->cgn_index_day = 0; 5849 index = 0; 5850 } 5851 5852 /* Anytime we overwrite daily index 0, after we wrap, 5853 * we will be overwriting the oldest day, so we must 5854 * update the congestion data start time for that day. 5855 * That start time should have previously been saved after 5856 * we wrote the last days worth of data. 5857 */ 5858 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5859 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5860 5861 cp->cgn_info_month = broken.tm_mon + 1; 5862 cp->cgn_info_day = broken.tm_mday; 5863 cp->cgn_info_year = broken.tm_year - 100; 5864 cp->cgn_info_hour = broken.tm_hour; 5865 cp->cgn_info_minute = broken.tm_min; 5866 cp->cgn_info_second = broken.tm_sec; 5867 5868 lpfc_printf_log 5869 (phba, KERN_INFO, LOG_CGN_MGMT, 5870 "2646 CGNInfo idx0 Start Time: " 5871 "%d/%d/%d %d:%d:%d\n", 5872 cp->cgn_info_day, cp->cgn_info_month, 5873 cp->cgn_info_year, cp->cgn_info_hour, 5874 cp->cgn_info_minute, cp->cgn_info_second); 5875 } 5876 5877 dvalue = 0; 5878 wvalue = 0; 5879 lvalue = 0; 5880 mvalue = 0; 5881 mbps = 0; 5882 avalue = 0; 5883 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5884 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5885 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5886 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5887 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5888 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5889 } 5890 if (lvalue) /* Avg of latency averages */ 5891 lvalue /= LPFC_HOUR_DAY; 5892 if (mbps) /* Avg of Bandwidth averages */ 5893 mvalue = mbps / LPFC_HOUR_DAY; 5894 5895 lptr = &cp->cgn_drvr_day[index]; 5896 *lptr = cpu_to_le32(dvalue); 5897 lptr = &cp->cgn_warn_day[index]; 5898 *lptr = cpu_to_le32(wvalue); 5899 lptr = &cp->cgn_latency_day[index]; 5900 *lptr = cpu_to_le32(lvalue); 5901 mptr = &cp->cgn_bw_day[index]; 5902 *mptr = cpu_to_le16(mvalue); 5903 lptr = &cp->cgn_alarm_day[index]; 5904 *lptr = cpu_to_le32(avalue); 5905 5906 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5907 "2420 Congestion Info - daily (%d): " 5908 "%d %d %d %d %d\n", 5909 index, dvalue, wvalue, lvalue, mvalue, avalue); 5910 5911 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5912 * so we are wrapped on any data after this. 5913 * Save this as the start time for the next day. 5914 */ 5915 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5916 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5917 ktime_get_real_ts64(&phba->cgn_daily_ts); 5918 } 5919 } 5920 5921 /* Use the frequency found in the last rcv'ed FPIN */ 5922 value = phba->cgn_fpin_frequency; 5923 cp->cgn_warn_freq = cpu_to_le16(value); 5924 cp->cgn_alarm_freq = cpu_to_le16(value); 5925 5926 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5927 LPFC_CGN_CRC32_SEED); 5928 cp->cgn_info_crc = cpu_to_le32(lvalue); 5929 } 5930 5931 /** 5932 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5933 * @phba: The Hba for which this call is being executed. 5934 * 5935 * The routine calculates the latency from the beginning of the CMF timer 5936 * interval to the current point in time. It is called from IO completion 5937 * when we exceed our Bandwidth limitation for the time interval. 5938 */ 5939 uint32_t 5940 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5941 { 5942 struct timespec64 cmpl_time; 5943 uint32_t msec = 0; 5944 5945 ktime_get_real_ts64(&cmpl_time); 5946 5947 /* This routine works on a ms granularity so sec and usec are 5948 * converted accordingly. 5949 */ 5950 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5951 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5952 NSEC_PER_MSEC; 5953 } else { 5954 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5955 msec = (cmpl_time.tv_sec - 5956 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5957 msec += ((cmpl_time.tv_nsec - 5958 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5959 } else { 5960 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5961 1) * MSEC_PER_SEC; 5962 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5963 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5964 } 5965 } 5966 return msec; 5967 } 5968 5969 /** 5970 * lpfc_cmf_timer - This is the timer function for one congestion 5971 * rate interval. 5972 * @timer: Pointer to the high resolution timer that expired 5973 */ 5974 static enum hrtimer_restart 5975 lpfc_cmf_timer(struct hrtimer *timer) 5976 { 5977 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5978 cmf_timer); 5979 struct rx_info_entry entry; 5980 uint32_t io_cnt; 5981 uint32_t busy, max_read; 5982 uint64_t total, rcv, lat, mbpi, extra, cnt; 5983 int timer_interval = LPFC_CMF_INTERVAL; 5984 uint32_t ms; 5985 struct lpfc_cgn_stat *cgs; 5986 int cpu; 5987 5988 /* Only restart the timer if congestion mgmt is on */ 5989 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5990 !phba->cmf_latency.tv_sec) { 5991 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5992 "6224 CMF timer exit: %d %lld\n", 5993 phba->cmf_active_mode, 5994 (uint64_t)phba->cmf_latency.tv_sec); 5995 return HRTIMER_NORESTART; 5996 } 5997 5998 /* If pport is not ready yet, just exit and wait for 5999 * the next timer cycle to hit. 6000 */ 6001 if (!phba->pport) 6002 goto skip; 6003 6004 /* Do not block SCSI IO while in the timer routine since 6005 * total_bytes will be cleared 6006 */ 6007 atomic_set(&phba->cmf_stop_io, 1); 6008 6009 /* First we need to calculate the actual ms between 6010 * the last timer interrupt and this one. We ask for 6011 * LPFC_CMF_INTERVAL, however the actual time may 6012 * vary depending on system overhead. 6013 */ 6014 ms = lpfc_calc_cmf_latency(phba); 6015 6016 6017 /* Immediately after we calculate the time since the last 6018 * timer interrupt, set the start time for the next 6019 * interrupt 6020 */ 6021 ktime_get_real_ts64(&phba->cmf_latency); 6022 6023 phba->cmf_link_byte_count = 6024 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 6025 6026 /* Collect all the stats from the prior timer interval */ 6027 total = 0; 6028 io_cnt = 0; 6029 lat = 0; 6030 rcv = 0; 6031 for_each_present_cpu(cpu) { 6032 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 6033 total += atomic64_xchg(&cgs->total_bytes, 0); 6034 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 6035 lat += atomic64_xchg(&cgs->rx_latency, 0); 6036 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 6037 } 6038 6039 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 6040 * returned from the last CMF_SYNC_WQE issued, from 6041 * cmf_last_sync_bw. This will be the target BW for 6042 * this next timer interval. 6043 */ 6044 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 6045 phba->link_state != LPFC_LINK_DOWN && 6046 phba->hba_flag & HBA_SETUP) { 6047 mbpi = phba->cmf_last_sync_bw; 6048 phba->cmf_last_sync_bw = 0; 6049 extra = 0; 6050 6051 /* Calculate any extra bytes needed to account for the 6052 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 6053 * calculate the adjustment needed for total to reflect 6054 * a full LPFC_CMF_INTERVAL. 6055 */ 6056 if (ms && ms < LPFC_CMF_INTERVAL) { 6057 cnt = div_u64(total, ms); /* bytes per ms */ 6058 cnt *= LPFC_CMF_INTERVAL; /* what total should be */ 6059 6060 /* If the timeout is scheduled to be shorter, 6061 * this value may skew the data, so cap it at mbpi. 6062 */ 6063 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) 6064 cnt = mbpi; 6065 6066 extra = cnt - total; 6067 } 6068 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6069 } else { 6070 /* For Monitor mode or link down we want mbpi 6071 * to be the full link speed 6072 */ 6073 mbpi = phba->cmf_link_byte_count; 6074 extra = 0; 6075 } 6076 phba->cmf_timer_cnt++; 6077 6078 if (io_cnt) { 6079 /* Update congestion info buffer latency in us */ 6080 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6081 atomic64_add(lat, &phba->cgn_latency_evt); 6082 } 6083 busy = atomic_xchg(&phba->cmf_busy, 0); 6084 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6085 6086 /* Calculate MBPI for the next timer interval */ 6087 if (mbpi) { 6088 if (mbpi > phba->cmf_link_byte_count || 6089 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6090 mbpi = phba->cmf_link_byte_count; 6091 6092 /* Change max_bytes_per_interval to what the prior 6093 * CMF_SYNC_WQE cmpl indicated. 6094 */ 6095 if (mbpi != phba->cmf_max_bytes_per_interval) 6096 phba->cmf_max_bytes_per_interval = mbpi; 6097 } 6098 6099 /* Save rxmonitor information for debug */ 6100 if (phba->rx_monitor) { 6101 entry.total_bytes = total; 6102 entry.cmf_bytes = total + extra; 6103 entry.rcv_bytes = rcv; 6104 entry.cmf_busy = busy; 6105 entry.cmf_info = phba->cmf_active_info; 6106 if (io_cnt) { 6107 entry.avg_io_latency = div_u64(lat, io_cnt); 6108 entry.avg_io_size = div_u64(rcv, io_cnt); 6109 } else { 6110 entry.avg_io_latency = 0; 6111 entry.avg_io_size = 0; 6112 } 6113 entry.max_read_cnt = max_read; 6114 entry.io_cnt = io_cnt; 6115 entry.max_bytes_per_interval = mbpi; 6116 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6117 entry.timer_utilization = phba->cmf_last_ts; 6118 else 6119 entry.timer_utilization = ms; 6120 entry.timer_interval = ms; 6121 phba->cmf_last_ts = 0; 6122 6123 lpfc_rx_monitor_record(phba->rx_monitor, &entry); 6124 } 6125 6126 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6127 /* If Monitor mode, check if we are oversubscribed 6128 * against the full line rate. 6129 */ 6130 if (mbpi && total > mbpi) 6131 atomic_inc(&phba->cgn_driver_evt_cnt); 6132 } 6133 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6134 6135 /* Each minute save Fabric and Driver congestion information */ 6136 lpfc_cgn_save_evt_cnt(phba); 6137 6138 phba->hba_flag &= ~HBA_SHORT_CMF; 6139 6140 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6141 * minute, adjust our next timer interval, if needed, to ensure a 6142 * 1 minute granularity when we get the next timer interrupt. 6143 */ 6144 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6145 phba->cgn_evt_timestamp)) { 6146 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6147 jiffies); 6148 if (timer_interval <= 0) 6149 timer_interval = LPFC_CMF_INTERVAL; 6150 else 6151 phba->hba_flag |= HBA_SHORT_CMF; 6152 6153 /* If we adjust timer_interval, max_bytes_per_interval 6154 * needs to be adjusted as well. 6155 */ 6156 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6157 timer_interval, 1000); 6158 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6159 phba->cmf_max_bytes_per_interval = 6160 phba->cmf_link_byte_count; 6161 } 6162 6163 /* Since total_bytes has already been zero'ed, its okay to unblock 6164 * after max_bytes_per_interval is setup. 6165 */ 6166 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6167 queue_work(phba->wq, &phba->unblock_request_work); 6168 6169 /* SCSI IO is now unblocked */ 6170 atomic_set(&phba->cmf_stop_io, 0); 6171 6172 skip: 6173 hrtimer_forward_now(timer, 6174 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6175 return HRTIMER_RESTART; 6176 } 6177 6178 #define trunk_link_status(__idx)\ 6179 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6180 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6181 "Link up" : "Link down") : "NA" 6182 /* Did port __idx reported an error */ 6183 #define trunk_port_fault(__idx)\ 6184 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6185 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6186 6187 static void 6188 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6189 struct lpfc_acqe_fc_la *acqe_fc) 6190 { 6191 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6192 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6193 u8 cnt = 0; 6194 6195 phba->sli4_hba.link_state.speed = 6196 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6197 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6198 6199 phba->sli4_hba.link_state.logical_speed = 6200 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6201 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6202 phba->fc_linkspeed = 6203 lpfc_async_link_speed_to_read_top( 6204 phba, 6205 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6206 6207 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6208 phba->trunk_link.link0.state = 6209 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6210 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6211 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6212 cnt++; 6213 } 6214 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6215 phba->trunk_link.link1.state = 6216 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6217 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6218 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6219 cnt++; 6220 } 6221 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6222 phba->trunk_link.link2.state = 6223 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6224 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6225 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6226 cnt++; 6227 } 6228 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6229 phba->trunk_link.link3.state = 6230 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6231 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6232 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6233 cnt++; 6234 } 6235 6236 if (cnt) 6237 phba->trunk_link.phy_lnk_speed = 6238 phba->sli4_hba.link_state.logical_speed / (cnt * 1000); 6239 else 6240 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; 6241 6242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6243 "2910 Async FC Trunking Event - Speed:%d\n" 6244 "\tLogical speed:%d " 6245 "port0: %s port1: %s port2: %s port3: %s\n", 6246 phba->sli4_hba.link_state.speed, 6247 phba->sli4_hba.link_state.logical_speed, 6248 trunk_link_status(0), trunk_link_status(1), 6249 trunk_link_status(2), trunk_link_status(3)); 6250 6251 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6252 lpfc_cmf_signal_init(phba); 6253 6254 if (port_fault) 6255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6256 "3202 trunk error:0x%x (%s) seen on port0:%s " 6257 /* 6258 * SLI-4: We have only 0xA error codes 6259 * defined as of now. print an appropriate 6260 * message in case driver needs to be updated. 6261 */ 6262 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6263 "UNDEFINED. update driver." : trunk_errmsg[err], 6264 trunk_port_fault(0), trunk_port_fault(1), 6265 trunk_port_fault(2), trunk_port_fault(3)); 6266 } 6267 6268 6269 /** 6270 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6271 * @phba: pointer to lpfc hba data structure. 6272 * @acqe_fc: pointer to the async fc completion queue entry. 6273 * 6274 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6275 * that the event was received and then issue a read_topology mailbox command so 6276 * that the rest of the driver will treat it the same as SLI3. 6277 **/ 6278 static void 6279 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6280 { 6281 LPFC_MBOXQ_t *pmb; 6282 MAILBOX_t *mb; 6283 struct lpfc_mbx_read_top *la; 6284 int rc; 6285 6286 if (bf_get(lpfc_trailer_type, acqe_fc) != 6287 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6289 "2895 Non FC link Event detected.(%d)\n", 6290 bf_get(lpfc_trailer_type, acqe_fc)); 6291 return; 6292 } 6293 6294 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6295 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6296 lpfc_update_trunk_link_status(phba, acqe_fc); 6297 return; 6298 } 6299 6300 /* Keep the link status for extra SLI4 state machine reference */ 6301 phba->sli4_hba.link_state.speed = 6302 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6303 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6304 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6305 phba->sli4_hba.link_state.topology = 6306 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6307 phba->sli4_hba.link_state.status = 6308 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6309 phba->sli4_hba.link_state.type = 6310 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6311 phba->sli4_hba.link_state.number = 6312 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6313 phba->sli4_hba.link_state.fault = 6314 bf_get(lpfc_acqe_link_fault, acqe_fc); 6315 6316 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6317 LPFC_FC_LA_TYPE_LINK_DOWN) 6318 phba->sli4_hba.link_state.logical_speed = 0; 6319 else if (!phba->sli4_hba.conf_trunk) 6320 phba->sli4_hba.link_state.logical_speed = 6321 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6322 6323 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6324 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6325 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6326 "%dMbps Fault:%d\n", 6327 phba->sli4_hba.link_state.speed, 6328 phba->sli4_hba.link_state.topology, 6329 phba->sli4_hba.link_state.status, 6330 phba->sli4_hba.link_state.type, 6331 phba->sli4_hba.link_state.number, 6332 phba->sli4_hba.link_state.logical_speed, 6333 phba->sli4_hba.link_state.fault); 6334 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6335 if (!pmb) { 6336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6337 "2897 The mboxq allocation failed\n"); 6338 return; 6339 } 6340 rc = lpfc_mbox_rsrc_prep(phba, pmb); 6341 if (rc) { 6342 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6343 "2898 The mboxq prep failed\n"); 6344 goto out_free_pmb; 6345 } 6346 6347 /* Cleanup any outstanding ELS commands */ 6348 lpfc_els_flush_all_cmd(phba); 6349 6350 /* Block ELS IOCBs until we have done process link event */ 6351 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6352 6353 /* Update link event statistics */ 6354 phba->sli.slistat.link_event++; 6355 6356 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6357 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 6358 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6359 pmb->vport = phba->pport; 6360 6361 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6362 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6363 6364 switch (phba->sli4_hba.link_state.status) { 6365 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6366 phba->link_flag |= LS_MDS_LINK_DOWN; 6367 break; 6368 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6369 phba->link_flag |= LS_MDS_LOOPBACK; 6370 break; 6371 default: 6372 break; 6373 } 6374 6375 /* Initialize completion status */ 6376 mb = &pmb->u.mb; 6377 mb->mbxStatus = MBX_SUCCESS; 6378 6379 /* Parse port fault information field */ 6380 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6381 6382 /* Parse and translate link attention fields */ 6383 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6384 la->eventTag = acqe_fc->event_tag; 6385 6386 if (phba->sli4_hba.link_state.status == 6387 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6388 bf_set(lpfc_mbx_read_top_att_type, la, 6389 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6390 } else { 6391 bf_set(lpfc_mbx_read_top_att_type, la, 6392 LPFC_FC_LA_TYPE_LINK_DOWN); 6393 } 6394 /* Invoke the mailbox command callback function */ 6395 lpfc_mbx_cmpl_read_topology(phba, pmb); 6396 6397 return; 6398 } 6399 6400 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6401 if (rc == MBX_NOT_FINISHED) 6402 goto out_free_pmb; 6403 return; 6404 6405 out_free_pmb: 6406 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 6407 } 6408 6409 /** 6410 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6411 * @phba: pointer to lpfc hba data structure. 6412 * @acqe_sli: pointer to the async SLI completion queue entry. 6413 * 6414 * This routine is to handle the SLI4 asynchronous SLI events. 6415 **/ 6416 static void 6417 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6418 { 6419 char port_name; 6420 char message[128]; 6421 uint8_t status; 6422 uint8_t evt_type; 6423 uint8_t operational = 0; 6424 struct temp_event temp_event_data; 6425 struct lpfc_acqe_misconfigured_event *misconfigured; 6426 struct lpfc_acqe_cgn_signal *cgn_signal; 6427 struct Scsi_Host *shost; 6428 struct lpfc_vport **vports; 6429 int rc, i, cnt; 6430 6431 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6432 6433 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6434 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6435 "x%08x x%08x x%08x\n", evt_type, 6436 acqe_sli->event_data1, acqe_sli->event_data2, 6437 acqe_sli->event_data3, acqe_sli->trailer); 6438 6439 port_name = phba->Port[0]; 6440 if (port_name == 0x00) 6441 port_name = '?'; /* get port name is empty */ 6442 6443 switch (evt_type) { 6444 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6445 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6446 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6447 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6448 6449 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6450 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6451 acqe_sli->event_data1, port_name); 6452 6453 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6454 shost = lpfc_shost_from_vport(phba->pport); 6455 fc_host_post_vendor_event(shost, fc_get_event_number(), 6456 sizeof(temp_event_data), 6457 (char *)&temp_event_data, 6458 SCSI_NL_VID_TYPE_PCI 6459 | PCI_VENDOR_ID_EMULEX); 6460 break; 6461 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6462 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6463 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6464 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6465 6466 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT, 6467 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6468 acqe_sli->event_data1, port_name); 6469 6470 shost = lpfc_shost_from_vport(phba->pport); 6471 fc_host_post_vendor_event(shost, fc_get_event_number(), 6472 sizeof(temp_event_data), 6473 (char *)&temp_event_data, 6474 SCSI_NL_VID_TYPE_PCI 6475 | PCI_VENDOR_ID_EMULEX); 6476 break; 6477 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6478 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6479 &acqe_sli->event_data1; 6480 6481 /* fetch the status for this port */ 6482 switch (phba->sli4_hba.lnk_info.lnk_no) { 6483 case LPFC_LINK_NUMBER_0: 6484 status = bf_get(lpfc_sli_misconfigured_port0_state, 6485 &misconfigured->theEvent); 6486 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6487 &misconfigured->theEvent); 6488 break; 6489 case LPFC_LINK_NUMBER_1: 6490 status = bf_get(lpfc_sli_misconfigured_port1_state, 6491 &misconfigured->theEvent); 6492 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6493 &misconfigured->theEvent); 6494 break; 6495 case LPFC_LINK_NUMBER_2: 6496 status = bf_get(lpfc_sli_misconfigured_port2_state, 6497 &misconfigured->theEvent); 6498 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6499 &misconfigured->theEvent); 6500 break; 6501 case LPFC_LINK_NUMBER_3: 6502 status = bf_get(lpfc_sli_misconfigured_port3_state, 6503 &misconfigured->theEvent); 6504 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6505 &misconfigured->theEvent); 6506 break; 6507 default: 6508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6509 "3296 " 6510 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6511 "event: Invalid link %d", 6512 phba->sli4_hba.lnk_info.lnk_no); 6513 return; 6514 } 6515 6516 /* Skip if optic state unchanged */ 6517 if (phba->sli4_hba.lnk_info.optic_state == status) 6518 return; 6519 6520 switch (status) { 6521 case LPFC_SLI_EVENT_STATUS_VALID: 6522 sprintf(message, "Physical Link is functional"); 6523 break; 6524 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6525 sprintf(message, "Optics faulted/incorrectly " 6526 "installed/not installed - Reseat optics, " 6527 "if issue not resolved, replace."); 6528 break; 6529 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6530 sprintf(message, 6531 "Optics of two types installed - Remove one " 6532 "optic or install matching pair of optics."); 6533 break; 6534 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6535 sprintf(message, "Incompatible optics - Replace with " 6536 "compatible optics for card to function."); 6537 break; 6538 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6539 sprintf(message, "Unqualified optics - Replace with " 6540 "Avago optics for Warranty and Technical " 6541 "Support - Link is%s operational", 6542 (operational) ? " not" : ""); 6543 break; 6544 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6545 sprintf(message, "Uncertified optics - Replace with " 6546 "Avago-certified optics to enable link " 6547 "operation - Link is%s operational", 6548 (operational) ? " not" : ""); 6549 break; 6550 default: 6551 /* firmware is reporting a status we don't know about */ 6552 sprintf(message, "Unknown event status x%02x", status); 6553 break; 6554 } 6555 6556 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6557 rc = lpfc_sli4_read_config(phba); 6558 if (rc) { 6559 phba->lmt = 0; 6560 lpfc_printf_log(phba, KERN_ERR, 6561 LOG_TRACE_EVENT, 6562 "3194 Unable to retrieve supported " 6563 "speeds, rc = 0x%x\n", rc); 6564 } 6565 rc = lpfc_sli4_refresh_params(phba); 6566 if (rc) { 6567 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6568 "3174 Unable to update pls support, " 6569 "rc x%x\n", rc); 6570 } 6571 vports = lpfc_create_vport_work_array(phba); 6572 if (vports != NULL) { 6573 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6574 i++) { 6575 shost = lpfc_shost_from_vport(vports[i]); 6576 lpfc_host_supported_speeds_set(shost); 6577 } 6578 } 6579 lpfc_destroy_vport_work_array(phba, vports); 6580 6581 phba->sli4_hba.lnk_info.optic_state = status; 6582 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6583 "3176 Port Name %c %s\n", port_name, message); 6584 break; 6585 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6586 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6587 "3192 Remote DPort Test Initiated - " 6588 "Event Data1:x%08x Event Data2: x%08x\n", 6589 acqe_sli->event_data1, acqe_sli->event_data2); 6590 break; 6591 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6592 /* Call FW to obtain active parms */ 6593 lpfc_sli4_cgn_parm_chg_evt(phba); 6594 break; 6595 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6596 /* Misconfigured WWN. Reports that the SLI Port is configured 6597 * to use FA-WWN, but the attached device doesn’t support it. 6598 * Event Data1 - N.A, Event Data2 - N.A 6599 * This event only happens on the physical port. 6600 */ 6601 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY, 6602 "2699 Misconfigured FA-PWWN - Attached device " 6603 "does not support FA-PWWN\n"); 6604 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; 6605 memset(phba->pport->fc_portname.u.wwn, 0, 6606 sizeof(struct lpfc_name)); 6607 break; 6608 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6609 /* EEPROM failure. No driver action is required */ 6610 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6611 "2518 EEPROM failure - " 6612 "Event Data1: x%08x Event Data2: x%08x\n", 6613 acqe_sli->event_data1, acqe_sli->event_data2); 6614 break; 6615 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6616 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6617 break; 6618 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6619 &acqe_sli->event_data1; 6620 phba->cgn_acqe_cnt++; 6621 6622 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6623 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6624 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6625 6626 /* no threshold for CMF, even 1 signal will trigger an event */ 6627 6628 /* Alarm overrides warning, so check that first */ 6629 if (cgn_signal->alarm_cnt) { 6630 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6631 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6632 atomic_add(cgn_signal->alarm_cnt, 6633 &phba->cgn_sync_alarm_cnt); 6634 } 6635 } else if (cnt) { 6636 /* signal action needs to be taken */ 6637 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6638 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6639 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6640 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6641 } 6642 } 6643 break; 6644 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL: 6645 /* May be accompanied by a temperature event */ 6646 lpfc_printf_log(phba, KERN_INFO, 6647 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT, 6648 "2902 Remote Degrade Signaling: x%08x x%08x " 6649 "x%08x\n", 6650 acqe_sli->event_data1, acqe_sli->event_data2, 6651 acqe_sli->event_data3); 6652 break; 6653 default: 6654 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6655 "3193 Unrecognized SLI event, type: 0x%x", 6656 evt_type); 6657 break; 6658 } 6659 } 6660 6661 /** 6662 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6663 * @vport: pointer to vport data structure. 6664 * 6665 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6666 * response to a CVL event. 6667 * 6668 * Return the pointer to the ndlp with the vport if successful, otherwise 6669 * return NULL. 6670 **/ 6671 static struct lpfc_nodelist * 6672 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6673 { 6674 struct lpfc_nodelist *ndlp; 6675 struct Scsi_Host *shost; 6676 struct lpfc_hba *phba; 6677 6678 if (!vport) 6679 return NULL; 6680 phba = vport->phba; 6681 if (!phba) 6682 return NULL; 6683 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6684 if (!ndlp) { 6685 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6686 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6687 if (!ndlp) 6688 return NULL; 6689 /* Set the node type */ 6690 ndlp->nlp_type |= NLP_FABRIC; 6691 /* Put ndlp onto node list */ 6692 lpfc_enqueue_node(vport, ndlp); 6693 } 6694 if ((phba->pport->port_state < LPFC_FLOGI) && 6695 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6696 return NULL; 6697 /* If virtual link is not yet instantiated ignore CVL */ 6698 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6699 && (vport->port_state != LPFC_VPORT_FAILED)) 6700 return NULL; 6701 shost = lpfc_shost_from_vport(vport); 6702 if (!shost) 6703 return NULL; 6704 lpfc_linkdown_port(vport); 6705 lpfc_cleanup_pending_mbox(vport); 6706 spin_lock_irq(shost->host_lock); 6707 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6708 spin_unlock_irq(shost->host_lock); 6709 6710 return ndlp; 6711 } 6712 6713 /** 6714 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6715 * @phba: pointer to lpfc hba data structure. 6716 * 6717 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6718 * response to a FCF dead event. 6719 **/ 6720 static void 6721 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6722 { 6723 struct lpfc_vport **vports; 6724 int i; 6725 6726 vports = lpfc_create_vport_work_array(phba); 6727 if (vports) 6728 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6729 lpfc_sli4_perform_vport_cvl(vports[i]); 6730 lpfc_destroy_vport_work_array(phba, vports); 6731 } 6732 6733 /** 6734 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6735 * @phba: pointer to lpfc hba data structure. 6736 * @acqe_fip: pointer to the async fcoe completion queue entry. 6737 * 6738 * This routine is to handle the SLI4 asynchronous fcoe event. 6739 **/ 6740 static void 6741 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6742 struct lpfc_acqe_fip *acqe_fip) 6743 { 6744 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6745 int rc; 6746 struct lpfc_vport *vport; 6747 struct lpfc_nodelist *ndlp; 6748 int active_vlink_present; 6749 struct lpfc_vport **vports; 6750 int i; 6751 6752 phba->fc_eventTag = acqe_fip->event_tag; 6753 phba->fcoe_eventtag = acqe_fip->event_tag; 6754 switch (event_type) { 6755 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6756 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6757 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6759 "2546 New FCF event, evt_tag:x%x, " 6760 "index:x%x\n", 6761 acqe_fip->event_tag, 6762 acqe_fip->index); 6763 else 6764 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6765 LOG_DISCOVERY, 6766 "2788 FCF param modified event, " 6767 "evt_tag:x%x, index:x%x\n", 6768 acqe_fip->event_tag, 6769 acqe_fip->index); 6770 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6771 /* 6772 * During period of FCF discovery, read the FCF 6773 * table record indexed by the event to update 6774 * FCF roundrobin failover eligible FCF bmask. 6775 */ 6776 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6777 LOG_DISCOVERY, 6778 "2779 Read FCF (x%x) for updating " 6779 "roundrobin FCF failover bmask\n", 6780 acqe_fip->index); 6781 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6782 } 6783 6784 /* If the FCF discovery is in progress, do nothing. */ 6785 spin_lock_irq(&phba->hbalock); 6786 if (phba->hba_flag & FCF_TS_INPROG) { 6787 spin_unlock_irq(&phba->hbalock); 6788 break; 6789 } 6790 /* If fast FCF failover rescan event is pending, do nothing */ 6791 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6792 spin_unlock_irq(&phba->hbalock); 6793 break; 6794 } 6795 6796 /* If the FCF has been in discovered state, do nothing. */ 6797 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6798 spin_unlock_irq(&phba->hbalock); 6799 break; 6800 } 6801 spin_unlock_irq(&phba->hbalock); 6802 6803 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6804 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6805 "2770 Start FCF table scan per async FCF " 6806 "event, evt_tag:x%x, index:x%x\n", 6807 acqe_fip->event_tag, acqe_fip->index); 6808 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6809 LPFC_FCOE_FCF_GET_FIRST); 6810 if (rc) 6811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6812 "2547 Issue FCF scan read FCF mailbox " 6813 "command failed (x%x)\n", rc); 6814 break; 6815 6816 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6817 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6818 "2548 FCF Table full count 0x%x tag 0x%x\n", 6819 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6820 acqe_fip->event_tag); 6821 break; 6822 6823 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6824 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6826 "2549 FCF (x%x) disconnected from network, " 6827 "tag:x%x\n", acqe_fip->index, 6828 acqe_fip->event_tag); 6829 /* 6830 * If we are in the middle of FCF failover process, clear 6831 * the corresponding FCF bit in the roundrobin bitmap. 6832 */ 6833 spin_lock_irq(&phba->hbalock); 6834 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6835 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6836 spin_unlock_irq(&phba->hbalock); 6837 /* Update FLOGI FCF failover eligible FCF bmask */ 6838 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6839 break; 6840 } 6841 spin_unlock_irq(&phba->hbalock); 6842 6843 /* If the event is not for currently used fcf do nothing */ 6844 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6845 break; 6846 6847 /* 6848 * Otherwise, request the port to rediscover the entire FCF 6849 * table for a fast recovery from case that the current FCF 6850 * is no longer valid as we are not in the middle of FCF 6851 * failover process already. 6852 */ 6853 spin_lock_irq(&phba->hbalock); 6854 /* Mark the fast failover process in progress */ 6855 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6856 spin_unlock_irq(&phba->hbalock); 6857 6858 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6859 "2771 Start FCF fast failover process due to " 6860 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6861 "\n", acqe_fip->event_tag, acqe_fip->index); 6862 rc = lpfc_sli4_redisc_fcf_table(phba); 6863 if (rc) { 6864 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6865 LOG_TRACE_EVENT, 6866 "2772 Issue FCF rediscover mailbox " 6867 "command failed, fail through to FCF " 6868 "dead event\n"); 6869 spin_lock_irq(&phba->hbalock); 6870 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6871 spin_unlock_irq(&phba->hbalock); 6872 /* 6873 * Last resort will fail over by treating this 6874 * as a link down to FCF registration. 6875 */ 6876 lpfc_sli4_fcf_dead_failthrough(phba); 6877 } else { 6878 /* Reset FCF roundrobin bmask for new discovery */ 6879 lpfc_sli4_clear_fcf_rr_bmask(phba); 6880 /* 6881 * Handling fast FCF failover to a DEAD FCF event is 6882 * considered equalivant to receiving CVL to all vports. 6883 */ 6884 lpfc_sli4_perform_all_vport_cvl(phba); 6885 } 6886 break; 6887 case LPFC_FIP_EVENT_TYPE_CVL: 6888 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6889 lpfc_printf_log(phba, KERN_ERR, 6890 LOG_TRACE_EVENT, 6891 "2718 Clear Virtual Link Received for VPI 0x%x" 6892 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6893 6894 vport = lpfc_find_vport_by_vpid(phba, 6895 acqe_fip->index); 6896 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6897 if (!ndlp) 6898 break; 6899 active_vlink_present = 0; 6900 6901 vports = lpfc_create_vport_work_array(phba); 6902 if (vports) { 6903 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6904 i++) { 6905 if ((!(vports[i]->fc_flag & 6906 FC_VPORT_CVL_RCVD)) && 6907 (vports[i]->port_state > LPFC_FDISC)) { 6908 active_vlink_present = 1; 6909 break; 6910 } 6911 } 6912 lpfc_destroy_vport_work_array(phba, vports); 6913 } 6914 6915 /* 6916 * Don't re-instantiate if vport is marked for deletion. 6917 * If we are here first then vport_delete is going to wait 6918 * for discovery to complete. 6919 */ 6920 if (!(vport->load_flag & FC_UNLOADING) && 6921 active_vlink_present) { 6922 /* 6923 * If there are other active VLinks present, 6924 * re-instantiate the Vlink using FDISC. 6925 */ 6926 mod_timer(&ndlp->nlp_delayfunc, 6927 jiffies + msecs_to_jiffies(1000)); 6928 spin_lock_irq(&ndlp->lock); 6929 ndlp->nlp_flag |= NLP_DELAY_TMO; 6930 spin_unlock_irq(&ndlp->lock); 6931 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6932 vport->port_state = LPFC_FDISC; 6933 } else { 6934 /* 6935 * Otherwise, we request port to rediscover 6936 * the entire FCF table for a fast recovery 6937 * from possible case that the current FCF 6938 * is no longer valid if we are not already 6939 * in the FCF failover process. 6940 */ 6941 spin_lock_irq(&phba->hbalock); 6942 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6943 spin_unlock_irq(&phba->hbalock); 6944 break; 6945 } 6946 /* Mark the fast failover process in progress */ 6947 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6948 spin_unlock_irq(&phba->hbalock); 6949 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6950 LOG_DISCOVERY, 6951 "2773 Start FCF failover per CVL, " 6952 "evt_tag:x%x\n", acqe_fip->event_tag); 6953 rc = lpfc_sli4_redisc_fcf_table(phba); 6954 if (rc) { 6955 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6956 LOG_TRACE_EVENT, 6957 "2774 Issue FCF rediscover " 6958 "mailbox command failed, " 6959 "through to CVL event\n"); 6960 spin_lock_irq(&phba->hbalock); 6961 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6962 spin_unlock_irq(&phba->hbalock); 6963 /* 6964 * Last resort will be re-try on the 6965 * the current registered FCF entry. 6966 */ 6967 lpfc_retry_pport_discovery(phba); 6968 } else 6969 /* 6970 * Reset FCF roundrobin bmask for new 6971 * discovery. 6972 */ 6973 lpfc_sli4_clear_fcf_rr_bmask(phba); 6974 } 6975 break; 6976 default: 6977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6978 "0288 Unknown FCoE event type 0x%x event tag " 6979 "0x%x\n", event_type, acqe_fip->event_tag); 6980 break; 6981 } 6982 } 6983 6984 /** 6985 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6986 * @phba: pointer to lpfc hba data structure. 6987 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6988 * 6989 * This routine is to handle the SLI4 asynchronous dcbx event. 6990 **/ 6991 static void 6992 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6993 struct lpfc_acqe_dcbx *acqe_dcbx) 6994 { 6995 phba->fc_eventTag = acqe_dcbx->event_tag; 6996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6997 "0290 The SLI4 DCBX asynchronous event is not " 6998 "handled yet\n"); 6999 } 7000 7001 /** 7002 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 7003 * @phba: pointer to lpfc hba data structure. 7004 * @acqe_grp5: pointer to the async grp5 completion queue entry. 7005 * 7006 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 7007 * is an asynchronous notified of a logical link speed change. The Port 7008 * reports the logical link speed in units of 10Mbps. 7009 **/ 7010 static void 7011 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 7012 struct lpfc_acqe_grp5 *acqe_grp5) 7013 { 7014 uint16_t prev_ll_spd; 7015 7016 phba->fc_eventTag = acqe_grp5->event_tag; 7017 phba->fcoe_eventtag = acqe_grp5->event_tag; 7018 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 7019 phba->sli4_hba.link_state.logical_speed = 7020 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 7021 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7022 "2789 GRP5 Async Event: Updating logical link speed " 7023 "from %dMbps to %dMbps\n", prev_ll_spd, 7024 phba->sli4_hba.link_state.logical_speed); 7025 } 7026 7027 /** 7028 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 7029 * @phba: pointer to lpfc hba data structure. 7030 * 7031 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 7032 * is an asynchronous notification of a request to reset CM stats. 7033 **/ 7034 static void 7035 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 7036 { 7037 if (!phba->cgn_i) 7038 return; 7039 lpfc_init_congestion_stat(phba); 7040 } 7041 7042 /** 7043 * lpfc_cgn_params_val - Validate FW congestion parameters. 7044 * @phba: pointer to lpfc hba data structure. 7045 * @p_cfg_param: pointer to FW provided congestion parameters. 7046 * 7047 * This routine validates the congestion parameters passed 7048 * by the FW to the driver via an ACQE event. 7049 **/ 7050 static void 7051 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 7052 { 7053 spin_lock_irq(&phba->hbalock); 7054 7055 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 7056 LPFC_CFG_MONITOR)) { 7057 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 7058 "6225 CMF mode param out of range: %d\n", 7059 p_cfg_param->cgn_param_mode); 7060 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 7061 } 7062 7063 spin_unlock_irq(&phba->hbalock); 7064 } 7065 7066 static const char * const lpfc_cmf_mode_to_str[] = { 7067 "OFF", 7068 "MANAGED", 7069 "MONITOR", 7070 }; 7071 7072 /** 7073 * lpfc_cgn_params_parse - Process a FW cong parm change event 7074 * @phba: pointer to lpfc hba data structure. 7075 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7076 * @len: the size of pdata in bytes. 7077 * 7078 * This routine validates the congestion management buffer signature 7079 * from the FW, validates the contents and makes corrections for 7080 * valid, in-range values. If the signature magic is correct and 7081 * after parameter validation, the contents are copied to the driver's 7082 * @phba structure. If the magic is incorrect, an error message is 7083 * logged. 7084 **/ 7085 static void 7086 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7087 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7088 { 7089 struct lpfc_cgn_info *cp; 7090 uint32_t crc, oldmode; 7091 char acr_string[4] = {0}; 7092 7093 /* Make sure the FW has encoded the correct magic number to 7094 * validate the congestion parameter in FW memory. 7095 */ 7096 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7097 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7098 "4668 FW cgn parm buffer data: " 7099 "magic 0x%x version %d mode %d " 7100 "level0 %d level1 %d " 7101 "level2 %d byte13 %d " 7102 "byte14 %d byte15 %d " 7103 "byte11 %d byte12 %d activeMode %d\n", 7104 p_cgn_param->cgn_param_magic, 7105 p_cgn_param->cgn_param_version, 7106 p_cgn_param->cgn_param_mode, 7107 p_cgn_param->cgn_param_level0, 7108 p_cgn_param->cgn_param_level1, 7109 p_cgn_param->cgn_param_level2, 7110 p_cgn_param->byte13, 7111 p_cgn_param->byte14, 7112 p_cgn_param->byte15, 7113 p_cgn_param->byte11, 7114 p_cgn_param->byte12, 7115 phba->cmf_active_mode); 7116 7117 oldmode = phba->cmf_active_mode; 7118 7119 /* Any parameters out of range are corrected to defaults 7120 * by this routine. No need to fail. 7121 */ 7122 lpfc_cgn_params_val(phba, p_cgn_param); 7123 7124 /* Parameters are verified, move them into driver storage */ 7125 spin_lock_irq(&phba->hbalock); 7126 memcpy(&phba->cgn_p, p_cgn_param, 7127 sizeof(struct lpfc_cgn_param)); 7128 7129 /* Update parameters in congestion info buffer now */ 7130 if (phba->cgn_i) { 7131 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7132 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7133 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7134 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7135 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7136 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7137 LPFC_CGN_CRC32_SEED); 7138 cp->cgn_info_crc = cpu_to_le32(crc); 7139 } 7140 spin_unlock_irq(&phba->hbalock); 7141 7142 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7143 7144 switch (oldmode) { 7145 case LPFC_CFG_OFF: 7146 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7147 /* Turning CMF on */ 7148 lpfc_cmf_start(phba); 7149 7150 if (phba->link_state >= LPFC_LINK_UP) { 7151 phba->cgn_reg_fpin = 7152 phba->cgn_init_reg_fpin; 7153 phba->cgn_reg_signal = 7154 phba->cgn_init_reg_signal; 7155 lpfc_issue_els_edc(phba->pport, 0); 7156 } 7157 } 7158 break; 7159 case LPFC_CFG_MANAGED: 7160 switch (phba->cgn_p.cgn_param_mode) { 7161 case LPFC_CFG_OFF: 7162 /* Turning CMF off */ 7163 lpfc_cmf_stop(phba); 7164 if (phba->link_state >= LPFC_LINK_UP) 7165 lpfc_issue_els_edc(phba->pport, 0); 7166 break; 7167 case LPFC_CFG_MONITOR: 7168 phba->cmf_max_bytes_per_interval = 7169 phba->cmf_link_byte_count; 7170 7171 /* Resume blocked IO - unblock on workqueue */ 7172 queue_work(phba->wq, 7173 &phba->unblock_request_work); 7174 break; 7175 } 7176 break; 7177 case LPFC_CFG_MONITOR: 7178 switch (phba->cgn_p.cgn_param_mode) { 7179 case LPFC_CFG_OFF: 7180 /* Turning CMF off */ 7181 lpfc_cmf_stop(phba); 7182 if (phba->link_state >= LPFC_LINK_UP) 7183 lpfc_issue_els_edc(phba->pport, 0); 7184 break; 7185 case LPFC_CFG_MANAGED: 7186 lpfc_cmf_signal_init(phba); 7187 break; 7188 } 7189 break; 7190 } 7191 if (oldmode != LPFC_CFG_OFF || 7192 oldmode != phba->cgn_p.cgn_param_mode) { 7193 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED) 7194 scnprintf(acr_string, sizeof(acr_string), "%u", 7195 phba->cgn_p.cgn_param_level0); 7196 else 7197 scnprintf(acr_string, sizeof(acr_string), "NA"); 7198 7199 dev_info(&phba->pcidev->dev, "%d: " 7200 "4663 CMF: Mode %s acr %s\n", 7201 phba->brd_no, 7202 lpfc_cmf_mode_to_str 7203 [phba->cgn_p.cgn_param_mode], 7204 acr_string); 7205 } 7206 } else { 7207 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7208 "4669 FW cgn parm buf wrong magic 0x%x " 7209 "version %d\n", p_cgn_param->cgn_param_magic, 7210 p_cgn_param->cgn_param_version); 7211 } 7212 } 7213 7214 /** 7215 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7216 * @phba: pointer to lpfc hba data structure. 7217 * 7218 * This routine issues a read_object mailbox command to 7219 * get the congestion management parameters from the FW 7220 * parses it and updates the driver maintained values. 7221 * 7222 * Returns 7223 * 0 if the object was empty 7224 * -Eval if an error was encountered 7225 * Count if bytes were read from object 7226 **/ 7227 int 7228 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7229 { 7230 int ret = 0; 7231 struct lpfc_cgn_param *p_cgn_param = NULL; 7232 u32 *pdata = NULL; 7233 u32 len = 0; 7234 7235 /* Find out if the FW has a new set of congestion parameters. */ 7236 len = sizeof(struct lpfc_cgn_param); 7237 pdata = kzalloc(len, GFP_KERNEL); 7238 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7239 pdata, len); 7240 7241 /* 0 means no data. A negative means error. A positive means 7242 * bytes were copied. 7243 */ 7244 if (!ret) { 7245 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7246 "4670 CGN RD OBJ returns no data\n"); 7247 goto rd_obj_err; 7248 } else if (ret < 0) { 7249 /* Some error. Just exit and return it to the caller.*/ 7250 goto rd_obj_err; 7251 } 7252 7253 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7254 "6234 READ CGN PARAMS Successful %d\n", len); 7255 7256 /* Parse data pointer over len and update the phba congestion 7257 * parameters with values passed back. The receive rate values 7258 * may have been altered in FW, but take no action here. 7259 */ 7260 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7261 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7262 7263 rd_obj_err: 7264 kfree(pdata); 7265 return ret; 7266 } 7267 7268 /** 7269 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7270 * @phba: pointer to lpfc hba data structure. 7271 * 7272 * The FW generated Async ACQE SLI event calls this routine when 7273 * the event type is an SLI Internal Port Event and the Event Code 7274 * indicates a change to the FW maintained congestion parameters. 7275 * 7276 * This routine executes a Read_Object mailbox call to obtain the 7277 * current congestion parameters maintained in FW and corrects 7278 * the driver's active congestion parameters. 7279 * 7280 * The acqe event is not passed because there is no further data 7281 * required. 7282 * 7283 * Returns nonzero error if event processing encountered an error. 7284 * Zero otherwise for success. 7285 **/ 7286 static int 7287 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7288 { 7289 int ret = 0; 7290 7291 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7292 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7293 "4664 Cgn Evt when E2E off. Drop event\n"); 7294 return -EACCES; 7295 } 7296 7297 /* If the event is claiming an empty object, it's ok. A write 7298 * could have cleared it. Only error is a negative return 7299 * status. 7300 */ 7301 ret = lpfc_sli4_cgn_params_read(phba); 7302 if (ret < 0) { 7303 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7304 "4667 Error reading Cgn Params (%d)\n", 7305 ret); 7306 } else if (!ret) { 7307 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7308 "4673 CGN Event empty object.\n"); 7309 } 7310 return ret; 7311 } 7312 7313 /** 7314 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7315 * @phba: pointer to lpfc hba data structure. 7316 * 7317 * This routine is invoked by the worker thread to process all the pending 7318 * SLI4 asynchronous events. 7319 **/ 7320 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7321 { 7322 struct lpfc_cq_event *cq_event; 7323 unsigned long iflags; 7324 7325 /* First, declare the async event has been handled */ 7326 spin_lock_irqsave(&phba->hbalock, iflags); 7327 phba->hba_flag &= ~ASYNC_EVENT; 7328 spin_unlock_irqrestore(&phba->hbalock, iflags); 7329 7330 /* Now, handle all the async events */ 7331 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7332 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7333 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7334 cq_event, struct lpfc_cq_event, list); 7335 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7336 iflags); 7337 7338 /* Process the asynchronous event */ 7339 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7340 case LPFC_TRAILER_CODE_LINK: 7341 lpfc_sli4_async_link_evt(phba, 7342 &cq_event->cqe.acqe_link); 7343 break; 7344 case LPFC_TRAILER_CODE_FCOE: 7345 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7346 break; 7347 case LPFC_TRAILER_CODE_DCBX: 7348 lpfc_sli4_async_dcbx_evt(phba, 7349 &cq_event->cqe.acqe_dcbx); 7350 break; 7351 case LPFC_TRAILER_CODE_GRP5: 7352 lpfc_sli4_async_grp5_evt(phba, 7353 &cq_event->cqe.acqe_grp5); 7354 break; 7355 case LPFC_TRAILER_CODE_FC: 7356 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7357 break; 7358 case LPFC_TRAILER_CODE_SLI: 7359 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7360 break; 7361 case LPFC_TRAILER_CODE_CMSTAT: 7362 lpfc_sli4_async_cmstat_evt(phba); 7363 break; 7364 default: 7365 lpfc_printf_log(phba, KERN_ERR, 7366 LOG_TRACE_EVENT, 7367 "1804 Invalid asynchronous event code: " 7368 "x%x\n", bf_get(lpfc_trailer_code, 7369 &cq_event->cqe.mcqe_cmpl)); 7370 break; 7371 } 7372 7373 /* Free the completion event processed to the free pool */ 7374 lpfc_sli4_cq_event_release(phba, cq_event); 7375 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7376 } 7377 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7378 } 7379 7380 /** 7381 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7382 * @phba: pointer to lpfc hba data structure. 7383 * 7384 * This routine is invoked by the worker thread to process FCF table 7385 * rediscovery pending completion event. 7386 **/ 7387 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7388 { 7389 int rc; 7390 7391 spin_lock_irq(&phba->hbalock); 7392 /* Clear FCF rediscovery timeout event */ 7393 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7394 /* Clear driver fast failover FCF record flag */ 7395 phba->fcf.failover_rec.flag = 0; 7396 /* Set state for FCF fast failover */ 7397 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7398 spin_unlock_irq(&phba->hbalock); 7399 7400 /* Scan FCF table from the first entry to re-discover SAN */ 7401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7402 "2777 Start post-quiescent FCF table scan\n"); 7403 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7404 if (rc) 7405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7406 "2747 Issue FCF scan read FCF mailbox " 7407 "command failed 0x%x\n", rc); 7408 } 7409 7410 /** 7411 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7412 * @phba: pointer to lpfc hba data structure. 7413 * @dev_grp: The HBA PCI-Device group number. 7414 * 7415 * This routine is invoked to set up the per HBA PCI-Device group function 7416 * API jump table entries. 7417 * 7418 * Return: 0 if success, otherwise -ENODEV 7419 **/ 7420 int 7421 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7422 { 7423 int rc; 7424 7425 /* Set up lpfc PCI-device group */ 7426 phba->pci_dev_grp = dev_grp; 7427 7428 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7429 if (dev_grp == LPFC_PCI_DEV_OC) 7430 phba->sli_rev = LPFC_SLI_REV4; 7431 7432 /* Set up device INIT API function jump table */ 7433 rc = lpfc_init_api_table_setup(phba, dev_grp); 7434 if (rc) 7435 return -ENODEV; 7436 /* Set up SCSI API function jump table */ 7437 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7438 if (rc) 7439 return -ENODEV; 7440 /* Set up SLI API function jump table */ 7441 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7442 if (rc) 7443 return -ENODEV; 7444 /* Set up MBOX API function jump table */ 7445 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7446 if (rc) 7447 return -ENODEV; 7448 7449 return 0; 7450 } 7451 7452 /** 7453 * lpfc_log_intr_mode - Log the active interrupt mode 7454 * @phba: pointer to lpfc hba data structure. 7455 * @intr_mode: active interrupt mode adopted. 7456 * 7457 * This routine it invoked to log the currently used active interrupt mode 7458 * to the device. 7459 **/ 7460 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7461 { 7462 switch (intr_mode) { 7463 case 0: 7464 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7465 "0470 Enable INTx interrupt mode.\n"); 7466 break; 7467 case 1: 7468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7469 "0481 Enabled MSI interrupt mode.\n"); 7470 break; 7471 case 2: 7472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7473 "0480 Enabled MSI-X interrupt mode.\n"); 7474 break; 7475 default: 7476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7477 "0482 Illegal interrupt mode.\n"); 7478 break; 7479 } 7480 return; 7481 } 7482 7483 /** 7484 * lpfc_enable_pci_dev - Enable a generic PCI device. 7485 * @phba: pointer to lpfc hba data structure. 7486 * 7487 * This routine is invoked to enable the PCI device that is common to all 7488 * PCI devices. 7489 * 7490 * Return codes 7491 * 0 - successful 7492 * other values - error 7493 **/ 7494 static int 7495 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7496 { 7497 struct pci_dev *pdev; 7498 7499 /* Obtain PCI device reference */ 7500 if (!phba->pcidev) 7501 goto out_error; 7502 else 7503 pdev = phba->pcidev; 7504 /* Enable PCI device */ 7505 if (pci_enable_device_mem(pdev)) 7506 goto out_error; 7507 /* Request PCI resource for the device */ 7508 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7509 goto out_disable_device; 7510 /* Set up device as PCI master and save state for EEH */ 7511 pci_set_master(pdev); 7512 pci_try_set_mwi(pdev); 7513 pci_save_state(pdev); 7514 7515 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7516 if (pci_is_pcie(pdev)) 7517 pdev->needs_freset = 1; 7518 7519 return 0; 7520 7521 out_disable_device: 7522 pci_disable_device(pdev); 7523 out_error: 7524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7525 "1401 Failed to enable pci device\n"); 7526 return -ENODEV; 7527 } 7528 7529 /** 7530 * lpfc_disable_pci_dev - Disable a generic PCI device. 7531 * @phba: pointer to lpfc hba data structure. 7532 * 7533 * This routine is invoked to disable the PCI device that is common to all 7534 * PCI devices. 7535 **/ 7536 static void 7537 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7538 { 7539 struct pci_dev *pdev; 7540 7541 /* Obtain PCI device reference */ 7542 if (!phba->pcidev) 7543 return; 7544 else 7545 pdev = phba->pcidev; 7546 /* Release PCI resource and disable PCI device */ 7547 pci_release_mem_regions(pdev); 7548 pci_disable_device(pdev); 7549 7550 return; 7551 } 7552 7553 /** 7554 * lpfc_reset_hba - Reset a hba 7555 * @phba: pointer to lpfc hba data structure. 7556 * 7557 * This routine is invoked to reset a hba device. It brings the HBA 7558 * offline, performs a board restart, and then brings the board back 7559 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7560 * on outstanding mailbox commands. 7561 **/ 7562 void 7563 lpfc_reset_hba(struct lpfc_hba *phba) 7564 { 7565 /* If resets are disabled then set error state and return. */ 7566 if (!phba->cfg_enable_hba_reset) { 7567 phba->link_state = LPFC_HBA_ERROR; 7568 return; 7569 } 7570 7571 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7572 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7573 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7574 } else { 7575 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7576 lpfc_sli_flush_io_rings(phba); 7577 } 7578 lpfc_offline(phba); 7579 lpfc_sli_brdrestart(phba); 7580 lpfc_online(phba); 7581 lpfc_unblock_mgmt_io(phba); 7582 } 7583 7584 /** 7585 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7586 * @phba: pointer to lpfc hba data structure. 7587 * 7588 * This function enables the PCI SR-IOV virtual functions to a physical 7589 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7590 * enable the number of virtual functions to the physical function. As 7591 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7592 * API call does not considered as an error condition for most of the device. 7593 **/ 7594 uint16_t 7595 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7596 { 7597 struct pci_dev *pdev = phba->pcidev; 7598 uint16_t nr_virtfn; 7599 int pos; 7600 7601 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7602 if (pos == 0) 7603 return 0; 7604 7605 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7606 return nr_virtfn; 7607 } 7608 7609 /** 7610 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7611 * @phba: pointer to lpfc hba data structure. 7612 * @nr_vfn: number of virtual functions to be enabled. 7613 * 7614 * This function enables the PCI SR-IOV virtual functions to a physical 7615 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7616 * enable the number of virtual functions to the physical function. As 7617 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7618 * API call does not considered as an error condition for most of the device. 7619 **/ 7620 int 7621 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7622 { 7623 struct pci_dev *pdev = phba->pcidev; 7624 uint16_t max_nr_vfn; 7625 int rc; 7626 7627 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7628 if (nr_vfn > max_nr_vfn) { 7629 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7630 "3057 Requested vfs (%d) greater than " 7631 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7632 return -EINVAL; 7633 } 7634 7635 rc = pci_enable_sriov(pdev, nr_vfn); 7636 if (rc) { 7637 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7638 "2806 Failed to enable sriov on this device " 7639 "with vfn number nr_vf:%d, rc:%d\n", 7640 nr_vfn, rc); 7641 } else 7642 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7643 "2807 Successful enable sriov on this device " 7644 "with vfn number nr_vf:%d\n", nr_vfn); 7645 return rc; 7646 } 7647 7648 static void 7649 lpfc_unblock_requests_work(struct work_struct *work) 7650 { 7651 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7652 unblock_request_work); 7653 7654 lpfc_unblock_requests(phba); 7655 } 7656 7657 /** 7658 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7659 * @phba: pointer to lpfc hba data structure. 7660 * 7661 * This routine is invoked to set up the driver internal resources before the 7662 * device specific resource setup to support the HBA device it attached to. 7663 * 7664 * Return codes 7665 * 0 - successful 7666 * other values - error 7667 **/ 7668 static int 7669 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7670 { 7671 struct lpfc_sli *psli = &phba->sli; 7672 7673 /* 7674 * Driver resources common to all SLI revisions 7675 */ 7676 atomic_set(&phba->fast_event_count, 0); 7677 atomic_set(&phba->dbg_log_idx, 0); 7678 atomic_set(&phba->dbg_log_cnt, 0); 7679 atomic_set(&phba->dbg_log_dmping, 0); 7680 spin_lock_init(&phba->hbalock); 7681 7682 /* Initialize port_list spinlock */ 7683 spin_lock_init(&phba->port_list_lock); 7684 INIT_LIST_HEAD(&phba->port_list); 7685 7686 INIT_LIST_HEAD(&phba->work_list); 7687 7688 /* Initialize the wait queue head for the kernel thread */ 7689 init_waitqueue_head(&phba->work_waitq); 7690 7691 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7692 "1403 Protocols supported %s %s %s\n", 7693 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7694 "SCSI" : " "), 7695 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7696 "NVME" : " "), 7697 (phba->nvmet_support ? "NVMET" : " ")); 7698 7699 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7700 spin_lock_init(&phba->scsi_buf_list_get_lock); 7701 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7702 spin_lock_init(&phba->scsi_buf_list_put_lock); 7703 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7704 7705 /* Initialize the fabric iocb list */ 7706 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7707 7708 /* Initialize list to save ELS buffers */ 7709 INIT_LIST_HEAD(&phba->elsbuf); 7710 7711 /* Initialize FCF connection rec list */ 7712 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7713 7714 /* Initialize OAS configuration list */ 7715 spin_lock_init(&phba->devicelock); 7716 INIT_LIST_HEAD(&phba->luns); 7717 7718 /* MBOX heartbeat timer */ 7719 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7720 /* Fabric block timer */ 7721 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7722 /* EA polling mode timer */ 7723 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7724 /* Heartbeat timer */ 7725 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7726 7727 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7728 7729 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7730 lpfc_idle_stat_delay_work); 7731 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7732 return 0; 7733 } 7734 7735 /** 7736 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7737 * @phba: pointer to lpfc hba data structure. 7738 * 7739 * This routine is invoked to set up the driver internal resources specific to 7740 * support the SLI-3 HBA device it attached to. 7741 * 7742 * Return codes 7743 * 0 - successful 7744 * other values - error 7745 **/ 7746 static int 7747 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7748 { 7749 int rc, entry_sz; 7750 7751 /* 7752 * Initialize timers used by driver 7753 */ 7754 7755 /* FCP polling mode timer */ 7756 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7757 7758 /* Host attention work mask setup */ 7759 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7760 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7761 7762 /* Get all the module params for configuring this host */ 7763 lpfc_get_cfgparam(phba); 7764 /* Set up phase-1 common device driver resources */ 7765 7766 rc = lpfc_setup_driver_resource_phase1(phba); 7767 if (rc) 7768 return -ENODEV; 7769 7770 if (!phba->sli.sli3_ring) 7771 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7772 sizeof(struct lpfc_sli_ring), 7773 GFP_KERNEL); 7774 if (!phba->sli.sli3_ring) 7775 return -ENOMEM; 7776 7777 /* 7778 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7779 * used to create the sg_dma_buf_pool must be dynamically calculated. 7780 */ 7781 7782 if (phba->sli_rev == LPFC_SLI_REV4) 7783 entry_sz = sizeof(struct sli4_sge); 7784 else 7785 entry_sz = sizeof(struct ulp_bde64); 7786 7787 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7788 if (phba->cfg_enable_bg) { 7789 /* 7790 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7791 * the FCP rsp, and a BDE for each. Sice we have no control 7792 * over how many protection data segments the SCSI Layer 7793 * will hand us (ie: there could be one for every block 7794 * in the IO), we just allocate enough BDEs to accomidate 7795 * our max amount and we need to limit lpfc_sg_seg_cnt to 7796 * minimize the risk of running out. 7797 */ 7798 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7799 sizeof(struct fcp_rsp) + 7800 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7801 7802 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7803 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7804 7805 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7806 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7807 } else { 7808 /* 7809 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7810 * the FCP rsp, a BDE for each, and a BDE for up to 7811 * cfg_sg_seg_cnt data segments. 7812 */ 7813 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7814 sizeof(struct fcp_rsp) + 7815 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7816 7817 /* Total BDEs in BPL for scsi_sg_list */ 7818 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7819 } 7820 7821 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7822 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7823 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7824 phba->cfg_total_seg_cnt); 7825 7826 phba->max_vpi = LPFC_MAX_VPI; 7827 /* This will be set to correct value after config_port mbox */ 7828 phba->max_vports = 0; 7829 7830 /* 7831 * Initialize the SLI Layer to run with lpfc HBAs. 7832 */ 7833 lpfc_sli_setup(phba); 7834 lpfc_sli_queue_init(phba); 7835 7836 /* Allocate device driver memory */ 7837 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7838 return -ENOMEM; 7839 7840 phba->lpfc_sg_dma_buf_pool = 7841 dma_pool_create("lpfc_sg_dma_buf_pool", 7842 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7843 BPL_ALIGN_SZ, 0); 7844 7845 if (!phba->lpfc_sg_dma_buf_pool) 7846 goto fail_free_mem; 7847 7848 phba->lpfc_cmd_rsp_buf_pool = 7849 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7850 &phba->pcidev->dev, 7851 sizeof(struct fcp_cmnd) + 7852 sizeof(struct fcp_rsp), 7853 BPL_ALIGN_SZ, 0); 7854 7855 if (!phba->lpfc_cmd_rsp_buf_pool) 7856 goto fail_free_dma_buf_pool; 7857 7858 /* 7859 * Enable sr-iov virtual functions if supported and configured 7860 * through the module parameter. 7861 */ 7862 if (phba->cfg_sriov_nr_virtfn > 0) { 7863 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7864 phba->cfg_sriov_nr_virtfn); 7865 if (rc) { 7866 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7867 "2808 Requested number of SR-IOV " 7868 "virtual functions (%d) is not " 7869 "supported\n", 7870 phba->cfg_sriov_nr_virtfn); 7871 phba->cfg_sriov_nr_virtfn = 0; 7872 } 7873 } 7874 7875 return 0; 7876 7877 fail_free_dma_buf_pool: 7878 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7879 phba->lpfc_sg_dma_buf_pool = NULL; 7880 fail_free_mem: 7881 lpfc_mem_free(phba); 7882 return -ENOMEM; 7883 } 7884 7885 /** 7886 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7887 * @phba: pointer to lpfc hba data structure. 7888 * 7889 * This routine is invoked to unset the driver internal resources set up 7890 * specific for supporting the SLI-3 HBA device it attached to. 7891 **/ 7892 static void 7893 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7894 { 7895 /* Free device driver memory allocated */ 7896 lpfc_mem_free_all(phba); 7897 7898 return; 7899 } 7900 7901 /** 7902 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7903 * @phba: pointer to lpfc hba data structure. 7904 * 7905 * This routine is invoked to set up the driver internal resources specific to 7906 * support the SLI-4 HBA device it attached to. 7907 * 7908 * Return codes 7909 * 0 - successful 7910 * other values - error 7911 **/ 7912 static int 7913 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7914 { 7915 LPFC_MBOXQ_t *mboxq; 7916 MAILBOX_t *mb; 7917 int rc, i, max_buf_size; 7918 int longs; 7919 int extra; 7920 uint64_t wwn; 7921 u32 if_type; 7922 u32 if_fam; 7923 7924 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7925 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7926 phba->sli4_hba.curr_disp_cpu = 0; 7927 7928 /* Get all the module params for configuring this host */ 7929 lpfc_get_cfgparam(phba); 7930 7931 /* Set up phase-1 common device driver resources */ 7932 rc = lpfc_setup_driver_resource_phase1(phba); 7933 if (rc) 7934 return -ENODEV; 7935 7936 /* Before proceed, wait for POST done and device ready */ 7937 rc = lpfc_sli4_post_status_check(phba); 7938 if (rc) 7939 return -ENODEV; 7940 7941 /* Allocate all driver workqueues here */ 7942 7943 /* The lpfc_wq workqueue for deferred irq use */ 7944 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7945 if (!phba->wq) 7946 return -ENOMEM; 7947 7948 /* 7949 * Initialize timers used by driver 7950 */ 7951 7952 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7953 7954 /* FCF rediscover timer */ 7955 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7956 7957 /* CMF congestion timer */ 7958 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7959 phba->cmf_timer.function = lpfc_cmf_timer; 7960 7961 /* 7962 * Control structure for handling external multi-buffer mailbox 7963 * command pass-through. 7964 */ 7965 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7966 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7967 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7968 7969 phba->max_vpi = LPFC_MAX_VPI; 7970 7971 /* This will be set to correct value after the read_config mbox */ 7972 phba->max_vports = 0; 7973 7974 /* Program the default value of vlan_id and fc_map */ 7975 phba->valid_vlan = 0; 7976 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7977 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7978 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7979 7980 /* 7981 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7982 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7983 * The WQ create will allocate the ring. 7984 */ 7985 7986 /* Initialize buffer queue management fields */ 7987 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7988 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7989 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7990 7991 /* for VMID idle timeout if VMID is enabled */ 7992 if (lpfc_is_vmid_enabled(phba)) 7993 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7994 7995 /* 7996 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7997 */ 7998 /* Initialize the Abort buffer list used by driver */ 7999 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 8000 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 8001 8002 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8003 /* Initialize the Abort nvme buffer list used by driver */ 8004 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 8005 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8006 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 8007 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 8008 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 8009 } 8010 8011 /* This abort list used by worker thread */ 8012 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 8013 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 8014 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 8015 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 8016 8017 /* 8018 * Initialize driver internal slow-path work queues 8019 */ 8020 8021 /* Driver internel slow-path CQ Event pool */ 8022 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 8023 /* Response IOCB work queue list */ 8024 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 8025 /* Asynchronous event CQ Event work queue list */ 8026 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 8027 /* Slow-path XRI aborted CQ Event work queue list */ 8028 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 8029 /* Receive queue CQ Event work queue list */ 8030 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 8031 8032 /* Initialize extent block lists. */ 8033 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 8034 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 8035 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 8036 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 8037 8038 /* Initialize mboxq lists. If the early init routines fail 8039 * these lists need to be correctly initialized. 8040 */ 8041 INIT_LIST_HEAD(&phba->sli.mboxq); 8042 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 8043 8044 /* initialize optic_state to 0xFF */ 8045 phba->sli4_hba.lnk_info.optic_state = 0xff; 8046 8047 /* Allocate device driver memory */ 8048 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 8049 if (rc) 8050 goto out_destroy_workqueue; 8051 8052 /* IF Type 2 ports get initialized now. */ 8053 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 8054 LPFC_SLI_INTF_IF_TYPE_2) { 8055 rc = lpfc_pci_function_reset(phba); 8056 if (unlikely(rc)) { 8057 rc = -ENODEV; 8058 goto out_free_mem; 8059 } 8060 phba->temp_sensor_support = 1; 8061 } 8062 8063 /* Create the bootstrap mailbox command */ 8064 rc = lpfc_create_bootstrap_mbox(phba); 8065 if (unlikely(rc)) 8066 goto out_free_mem; 8067 8068 /* Set up the host's endian order with the device. */ 8069 rc = lpfc_setup_endian_order(phba); 8070 if (unlikely(rc)) 8071 goto out_free_bsmbx; 8072 8073 /* Set up the hba's configuration parameters. */ 8074 rc = lpfc_sli4_read_config(phba); 8075 if (unlikely(rc)) 8076 goto out_free_bsmbx; 8077 8078 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { 8079 /* Right now the link is down, if FA-PWWN is configured the 8080 * firmware will try FLOGI before the driver gets a link up. 8081 * If it fails, the driver should get a MISCONFIGURED async 8082 * event which will clear this flag. The only notification 8083 * the driver gets is if it fails, if it succeeds there is no 8084 * notification given. Assume success. 8085 */ 8086 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 8087 } 8088 8089 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8090 if (unlikely(rc)) 8091 goto out_free_bsmbx; 8092 8093 /* IF Type 0 ports get initialized now. */ 8094 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8095 LPFC_SLI_INTF_IF_TYPE_0) { 8096 rc = lpfc_pci_function_reset(phba); 8097 if (unlikely(rc)) 8098 goto out_free_bsmbx; 8099 } 8100 8101 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8102 GFP_KERNEL); 8103 if (!mboxq) { 8104 rc = -ENOMEM; 8105 goto out_free_bsmbx; 8106 } 8107 8108 /* Check for NVMET being configured */ 8109 phba->nvmet_support = 0; 8110 if (lpfc_enable_nvmet_cnt) { 8111 8112 /* First get WWN of HBA instance */ 8113 lpfc_read_nv(phba, mboxq); 8114 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8115 if (rc != MBX_SUCCESS) { 8116 lpfc_printf_log(phba, KERN_ERR, 8117 LOG_TRACE_EVENT, 8118 "6016 Mailbox failed , mbxCmd x%x " 8119 "READ_NV, mbxStatus x%x\n", 8120 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8121 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8122 mempool_free(mboxq, phba->mbox_mem_pool); 8123 rc = -EIO; 8124 goto out_free_bsmbx; 8125 } 8126 mb = &mboxq->u.mb; 8127 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8128 sizeof(uint64_t)); 8129 wwn = cpu_to_be64(wwn); 8130 phba->sli4_hba.wwnn.u.name = wwn; 8131 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8132 sizeof(uint64_t)); 8133 /* wwn is WWPN of HBA instance */ 8134 wwn = cpu_to_be64(wwn); 8135 phba->sli4_hba.wwpn.u.name = wwn; 8136 8137 /* Check to see if it matches any module parameter */ 8138 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8139 if (wwn == lpfc_enable_nvmet[i]) { 8140 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8141 if (lpfc_nvmet_mem_alloc(phba)) 8142 break; 8143 8144 phba->nvmet_support = 1; /* a match */ 8145 8146 lpfc_printf_log(phba, KERN_ERR, 8147 LOG_TRACE_EVENT, 8148 "6017 NVME Target %016llx\n", 8149 wwn); 8150 #else 8151 lpfc_printf_log(phba, KERN_ERR, 8152 LOG_TRACE_EVENT, 8153 "6021 Can't enable NVME Target." 8154 " NVME_TARGET_FC infrastructure" 8155 " is not in kernel\n"); 8156 #endif 8157 /* Not supported for NVMET */ 8158 phba->cfg_xri_rebalancing = 0; 8159 if (phba->irq_chann_mode == NHT_MODE) { 8160 phba->cfg_irq_chann = 8161 phba->sli4_hba.num_present_cpu; 8162 phba->cfg_hdw_queue = 8163 phba->sli4_hba.num_present_cpu; 8164 phba->irq_chann_mode = NORMAL_MODE; 8165 } 8166 break; 8167 } 8168 } 8169 } 8170 8171 lpfc_nvme_mod_param_dep(phba); 8172 8173 /* 8174 * Get sli4 parameters that override parameters from Port capabilities. 8175 * If this call fails, it isn't critical unless the SLI4 parameters come 8176 * back in conflict. 8177 */ 8178 rc = lpfc_get_sli4_parameters(phba, mboxq); 8179 if (rc) { 8180 if_type = bf_get(lpfc_sli_intf_if_type, 8181 &phba->sli4_hba.sli_intf); 8182 if_fam = bf_get(lpfc_sli_intf_sli_family, 8183 &phba->sli4_hba.sli_intf); 8184 if (phba->sli4_hba.extents_in_use && 8185 phba->sli4_hba.rpi_hdrs_in_use) { 8186 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8187 "2999 Unsupported SLI4 Parameters " 8188 "Extents and RPI headers enabled.\n"); 8189 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8190 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8191 mempool_free(mboxq, phba->mbox_mem_pool); 8192 rc = -EIO; 8193 goto out_free_bsmbx; 8194 } 8195 } 8196 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8197 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8198 mempool_free(mboxq, phba->mbox_mem_pool); 8199 rc = -EIO; 8200 goto out_free_bsmbx; 8201 } 8202 } 8203 8204 /* 8205 * 1 for cmd, 1 for rsp, NVME adds an extra one 8206 * for boundary conditions in its max_sgl_segment template. 8207 */ 8208 extra = 2; 8209 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8210 extra++; 8211 8212 /* 8213 * It doesn't matter what family our adapter is in, we are 8214 * limited to 2 Pages, 512 SGEs, for our SGL. 8215 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8216 */ 8217 max_buf_size = (2 * SLI4_PAGE_SIZE); 8218 8219 /* 8220 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8221 * used to create the sg_dma_buf_pool must be calculated. 8222 */ 8223 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8224 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8225 8226 /* 8227 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8228 * the FCP rsp, and a SGE. Sice we have no control 8229 * over how many protection segments the SCSI Layer 8230 * will hand us (ie: there could be one for every block 8231 * in the IO), just allocate enough SGEs to accomidate 8232 * our max amount and we need to limit lpfc_sg_seg_cnt 8233 * to minimize the risk of running out. 8234 */ 8235 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8236 sizeof(struct fcp_rsp) + max_buf_size; 8237 8238 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8239 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8240 8241 /* 8242 * If supporting DIF, reduce the seg count for scsi to 8243 * allow room for the DIF sges. 8244 */ 8245 if (phba->cfg_enable_bg && 8246 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8247 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8248 else 8249 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8250 8251 } else { 8252 /* 8253 * The scsi_buf for a regular I/O holds the FCP cmnd, 8254 * the FCP rsp, a SGE for each, and a SGE for up to 8255 * cfg_sg_seg_cnt data segments. 8256 */ 8257 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8258 sizeof(struct fcp_rsp) + 8259 ((phba->cfg_sg_seg_cnt + extra) * 8260 sizeof(struct sli4_sge)); 8261 8262 /* Total SGEs for scsi_sg_list */ 8263 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8264 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8265 8266 /* 8267 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8268 * need to post 1 page for the SGL. 8269 */ 8270 } 8271 8272 if (phba->cfg_xpsgl && !phba->nvmet_support) 8273 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8274 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8275 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8276 else 8277 phba->cfg_sg_dma_buf_size = 8278 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8279 8280 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8281 sizeof(struct sli4_sge); 8282 8283 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8284 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8285 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8287 "6300 Reducing NVME sg segment " 8288 "cnt to %d\n", 8289 LPFC_MAX_NVME_SEG_CNT); 8290 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8291 } else 8292 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8293 } 8294 8295 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8296 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8297 "total:%d scsi:%d nvme:%d\n", 8298 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8299 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8300 phba->cfg_nvme_seg_cnt); 8301 8302 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8303 i = phba->cfg_sg_dma_buf_size; 8304 else 8305 i = SLI4_PAGE_SIZE; 8306 8307 phba->lpfc_sg_dma_buf_pool = 8308 dma_pool_create("lpfc_sg_dma_buf_pool", 8309 &phba->pcidev->dev, 8310 phba->cfg_sg_dma_buf_size, 8311 i, 0); 8312 if (!phba->lpfc_sg_dma_buf_pool) { 8313 rc = -ENOMEM; 8314 goto out_free_bsmbx; 8315 } 8316 8317 phba->lpfc_cmd_rsp_buf_pool = 8318 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8319 &phba->pcidev->dev, 8320 sizeof(struct fcp_cmnd) + 8321 sizeof(struct fcp_rsp), 8322 i, 0); 8323 if (!phba->lpfc_cmd_rsp_buf_pool) { 8324 rc = -ENOMEM; 8325 goto out_free_sg_dma_buf; 8326 } 8327 8328 mempool_free(mboxq, phba->mbox_mem_pool); 8329 8330 /* Verify OAS is supported */ 8331 lpfc_sli4_oas_verify(phba); 8332 8333 /* Verify RAS support on adapter */ 8334 lpfc_sli4_ras_init(phba); 8335 8336 /* Verify all the SLI4 queues */ 8337 rc = lpfc_sli4_queue_verify(phba); 8338 if (rc) 8339 goto out_free_cmd_rsp_buf; 8340 8341 /* Create driver internal CQE event pool */ 8342 rc = lpfc_sli4_cq_event_pool_create(phba); 8343 if (rc) 8344 goto out_free_cmd_rsp_buf; 8345 8346 /* Initialize sgl lists per host */ 8347 lpfc_init_sgl_list(phba); 8348 8349 /* Allocate and initialize active sgl array */ 8350 rc = lpfc_init_active_sgl_array(phba); 8351 if (rc) { 8352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8353 "1430 Failed to initialize sgl list.\n"); 8354 goto out_destroy_cq_event_pool; 8355 } 8356 rc = lpfc_sli4_init_rpi_hdrs(phba); 8357 if (rc) { 8358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8359 "1432 Failed to initialize rpi headers.\n"); 8360 goto out_free_active_sgl; 8361 } 8362 8363 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8364 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8365 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8366 GFP_KERNEL); 8367 if (!phba->fcf.fcf_rr_bmask) { 8368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8369 "2759 Failed allocate memory for FCF round " 8370 "robin failover bmask\n"); 8371 rc = -ENOMEM; 8372 goto out_remove_rpi_hdrs; 8373 } 8374 8375 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8376 sizeof(struct lpfc_hba_eq_hdl), 8377 GFP_KERNEL); 8378 if (!phba->sli4_hba.hba_eq_hdl) { 8379 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8380 "2572 Failed allocate memory for " 8381 "fast-path per-EQ handle array\n"); 8382 rc = -ENOMEM; 8383 goto out_free_fcf_rr_bmask; 8384 } 8385 8386 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8387 sizeof(struct lpfc_vector_map_info), 8388 GFP_KERNEL); 8389 if (!phba->sli4_hba.cpu_map) { 8390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8391 "3327 Failed allocate memory for msi-x " 8392 "interrupt vector mapping\n"); 8393 rc = -ENOMEM; 8394 goto out_free_hba_eq_hdl; 8395 } 8396 8397 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8398 if (!phba->sli4_hba.eq_info) { 8399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8400 "3321 Failed allocation for per_cpu stats\n"); 8401 rc = -ENOMEM; 8402 goto out_free_hba_cpu_map; 8403 } 8404 8405 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8406 sizeof(*phba->sli4_hba.idle_stat), 8407 GFP_KERNEL); 8408 if (!phba->sli4_hba.idle_stat) { 8409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8410 "3390 Failed allocation for idle_stat\n"); 8411 rc = -ENOMEM; 8412 goto out_free_hba_eq_info; 8413 } 8414 8415 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8416 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8417 if (!phba->sli4_hba.c_stat) { 8418 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8419 "3332 Failed allocating per cpu hdwq stats\n"); 8420 rc = -ENOMEM; 8421 goto out_free_hba_idle_stat; 8422 } 8423 #endif 8424 8425 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8426 if (!phba->cmf_stat) { 8427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8428 "3331 Failed allocating per cpu cgn stats\n"); 8429 rc = -ENOMEM; 8430 goto out_free_hba_hdwq_info; 8431 } 8432 8433 /* 8434 * Enable sr-iov virtual functions if supported and configured 8435 * through the module parameter. 8436 */ 8437 if (phba->cfg_sriov_nr_virtfn > 0) { 8438 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8439 phba->cfg_sriov_nr_virtfn); 8440 if (rc) { 8441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8442 "3020 Requested number of SR-IOV " 8443 "virtual functions (%d) is not " 8444 "supported\n", 8445 phba->cfg_sriov_nr_virtfn); 8446 phba->cfg_sriov_nr_virtfn = 0; 8447 } 8448 } 8449 8450 return 0; 8451 8452 out_free_hba_hdwq_info: 8453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8454 free_percpu(phba->sli4_hba.c_stat); 8455 out_free_hba_idle_stat: 8456 #endif 8457 kfree(phba->sli4_hba.idle_stat); 8458 out_free_hba_eq_info: 8459 free_percpu(phba->sli4_hba.eq_info); 8460 out_free_hba_cpu_map: 8461 kfree(phba->sli4_hba.cpu_map); 8462 out_free_hba_eq_hdl: 8463 kfree(phba->sli4_hba.hba_eq_hdl); 8464 out_free_fcf_rr_bmask: 8465 kfree(phba->fcf.fcf_rr_bmask); 8466 out_remove_rpi_hdrs: 8467 lpfc_sli4_remove_rpi_hdrs(phba); 8468 out_free_active_sgl: 8469 lpfc_free_active_sgl(phba); 8470 out_destroy_cq_event_pool: 8471 lpfc_sli4_cq_event_pool_destroy(phba); 8472 out_free_cmd_rsp_buf: 8473 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8474 phba->lpfc_cmd_rsp_buf_pool = NULL; 8475 out_free_sg_dma_buf: 8476 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8477 phba->lpfc_sg_dma_buf_pool = NULL; 8478 out_free_bsmbx: 8479 lpfc_destroy_bootstrap_mbox(phba); 8480 out_free_mem: 8481 lpfc_mem_free(phba); 8482 out_destroy_workqueue: 8483 destroy_workqueue(phba->wq); 8484 phba->wq = NULL; 8485 return rc; 8486 } 8487 8488 /** 8489 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8490 * @phba: pointer to lpfc hba data structure. 8491 * 8492 * This routine is invoked to unset the driver internal resources set up 8493 * specific for supporting the SLI-4 HBA device it attached to. 8494 **/ 8495 static void 8496 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8497 { 8498 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8499 8500 free_percpu(phba->sli4_hba.eq_info); 8501 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8502 free_percpu(phba->sli4_hba.c_stat); 8503 #endif 8504 free_percpu(phba->cmf_stat); 8505 kfree(phba->sli4_hba.idle_stat); 8506 8507 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8508 kfree(phba->sli4_hba.cpu_map); 8509 phba->sli4_hba.num_possible_cpu = 0; 8510 phba->sli4_hba.num_present_cpu = 0; 8511 phba->sli4_hba.curr_disp_cpu = 0; 8512 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8513 8514 /* Free memory allocated for fast-path work queue handles */ 8515 kfree(phba->sli4_hba.hba_eq_hdl); 8516 8517 /* Free the allocated rpi headers. */ 8518 lpfc_sli4_remove_rpi_hdrs(phba); 8519 lpfc_sli4_remove_rpis(phba); 8520 8521 /* Free eligible FCF index bmask */ 8522 kfree(phba->fcf.fcf_rr_bmask); 8523 8524 /* Free the ELS sgl list */ 8525 lpfc_free_active_sgl(phba); 8526 lpfc_free_els_sgl_list(phba); 8527 lpfc_free_nvmet_sgl_list(phba); 8528 8529 /* Free the completion queue EQ event pool */ 8530 lpfc_sli4_cq_event_release_all(phba); 8531 lpfc_sli4_cq_event_pool_destroy(phba); 8532 8533 /* Release resource identifiers. */ 8534 lpfc_sli4_dealloc_resource_identifiers(phba); 8535 8536 /* Free the bsmbx region. */ 8537 lpfc_destroy_bootstrap_mbox(phba); 8538 8539 /* Free the SLI Layer memory with SLI4 HBAs */ 8540 lpfc_mem_free_all(phba); 8541 8542 /* Free the current connect table */ 8543 list_for_each_entry_safe(conn_entry, next_conn_entry, 8544 &phba->fcf_conn_rec_list, list) { 8545 list_del_init(&conn_entry->list); 8546 kfree(conn_entry); 8547 } 8548 8549 return; 8550 } 8551 8552 /** 8553 * lpfc_init_api_table_setup - Set up init api function jump table 8554 * @phba: The hba struct for which this call is being executed. 8555 * @dev_grp: The HBA PCI-Device group number. 8556 * 8557 * This routine sets up the device INIT interface API function jump table 8558 * in @phba struct. 8559 * 8560 * Returns: 0 - success, -ENODEV - failure. 8561 **/ 8562 int 8563 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8564 { 8565 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8566 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8567 phba->lpfc_selective_reset = lpfc_selective_reset; 8568 switch (dev_grp) { 8569 case LPFC_PCI_DEV_LP: 8570 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8571 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8572 phba->lpfc_stop_port = lpfc_stop_port_s3; 8573 break; 8574 case LPFC_PCI_DEV_OC: 8575 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8576 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8577 phba->lpfc_stop_port = lpfc_stop_port_s4; 8578 break; 8579 default: 8580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8581 "1431 Invalid HBA PCI-device group: 0x%x\n", 8582 dev_grp); 8583 return -ENODEV; 8584 } 8585 return 0; 8586 } 8587 8588 /** 8589 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8590 * @phba: pointer to lpfc hba data structure. 8591 * 8592 * This routine is invoked to set up the driver internal resources after the 8593 * device specific resource setup to support the HBA device it attached to. 8594 * 8595 * Return codes 8596 * 0 - successful 8597 * other values - error 8598 **/ 8599 static int 8600 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8601 { 8602 int error; 8603 8604 /* Startup the kernel thread for this host adapter. */ 8605 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8606 "lpfc_worker_%d", phba->brd_no); 8607 if (IS_ERR(phba->worker_thread)) { 8608 error = PTR_ERR(phba->worker_thread); 8609 return error; 8610 } 8611 8612 return 0; 8613 } 8614 8615 /** 8616 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8617 * @phba: pointer to lpfc hba data structure. 8618 * 8619 * This routine is invoked to unset the driver internal resources set up after 8620 * the device specific resource setup for supporting the HBA device it 8621 * attached to. 8622 **/ 8623 static void 8624 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8625 { 8626 if (phba->wq) { 8627 destroy_workqueue(phba->wq); 8628 phba->wq = NULL; 8629 } 8630 8631 /* Stop kernel worker thread */ 8632 if (phba->worker_thread) 8633 kthread_stop(phba->worker_thread); 8634 } 8635 8636 /** 8637 * lpfc_free_iocb_list - Free iocb list. 8638 * @phba: pointer to lpfc hba data structure. 8639 * 8640 * This routine is invoked to free the driver's IOCB list and memory. 8641 **/ 8642 void 8643 lpfc_free_iocb_list(struct lpfc_hba *phba) 8644 { 8645 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8646 8647 spin_lock_irq(&phba->hbalock); 8648 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8649 &phba->lpfc_iocb_list, list) { 8650 list_del(&iocbq_entry->list); 8651 kfree(iocbq_entry); 8652 phba->total_iocbq_bufs--; 8653 } 8654 spin_unlock_irq(&phba->hbalock); 8655 8656 return; 8657 } 8658 8659 /** 8660 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8661 * @phba: pointer to lpfc hba data structure. 8662 * @iocb_count: number of requested iocbs 8663 * 8664 * This routine is invoked to allocate and initizlize the driver's IOCB 8665 * list and set up the IOCB tag array accordingly. 8666 * 8667 * Return codes 8668 * 0 - successful 8669 * other values - error 8670 **/ 8671 int 8672 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8673 { 8674 struct lpfc_iocbq *iocbq_entry = NULL; 8675 uint16_t iotag; 8676 int i; 8677 8678 /* Initialize and populate the iocb list per host. */ 8679 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8680 for (i = 0; i < iocb_count; i++) { 8681 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8682 if (iocbq_entry == NULL) { 8683 printk(KERN_ERR "%s: only allocated %d iocbs of " 8684 "expected %d count. Unloading driver.\n", 8685 __func__, i, iocb_count); 8686 goto out_free_iocbq; 8687 } 8688 8689 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8690 if (iotag == 0) { 8691 kfree(iocbq_entry); 8692 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8693 "Unloading driver.\n", __func__); 8694 goto out_free_iocbq; 8695 } 8696 iocbq_entry->sli4_lxritag = NO_XRI; 8697 iocbq_entry->sli4_xritag = NO_XRI; 8698 8699 spin_lock_irq(&phba->hbalock); 8700 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8701 phba->total_iocbq_bufs++; 8702 spin_unlock_irq(&phba->hbalock); 8703 } 8704 8705 return 0; 8706 8707 out_free_iocbq: 8708 lpfc_free_iocb_list(phba); 8709 8710 return -ENOMEM; 8711 } 8712 8713 /** 8714 * lpfc_free_sgl_list - Free a given sgl list. 8715 * @phba: pointer to lpfc hba data structure. 8716 * @sglq_list: pointer to the head of sgl list. 8717 * 8718 * This routine is invoked to free a give sgl list and memory. 8719 **/ 8720 void 8721 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8722 { 8723 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8724 8725 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8726 list_del(&sglq_entry->list); 8727 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8728 kfree(sglq_entry); 8729 } 8730 } 8731 8732 /** 8733 * lpfc_free_els_sgl_list - Free els sgl list. 8734 * @phba: pointer to lpfc hba data structure. 8735 * 8736 * This routine is invoked to free the driver's els sgl list and memory. 8737 **/ 8738 static void 8739 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8740 { 8741 LIST_HEAD(sglq_list); 8742 8743 /* Retrieve all els sgls from driver list */ 8744 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8745 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8746 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8747 8748 /* Now free the sgl list */ 8749 lpfc_free_sgl_list(phba, &sglq_list); 8750 } 8751 8752 /** 8753 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8754 * @phba: pointer to lpfc hba data structure. 8755 * 8756 * This routine is invoked to free the driver's nvmet sgl list and memory. 8757 **/ 8758 static void 8759 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8760 { 8761 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8762 LIST_HEAD(sglq_list); 8763 8764 /* Retrieve all nvmet sgls from driver list */ 8765 spin_lock_irq(&phba->hbalock); 8766 spin_lock(&phba->sli4_hba.sgl_list_lock); 8767 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8768 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8769 spin_unlock_irq(&phba->hbalock); 8770 8771 /* Now free the sgl list */ 8772 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8773 list_del(&sglq_entry->list); 8774 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8775 kfree(sglq_entry); 8776 } 8777 8778 /* Update the nvmet_xri_cnt to reflect no current sgls. 8779 * The next initialization cycle sets the count and allocates 8780 * the sgls over again. 8781 */ 8782 phba->sli4_hba.nvmet_xri_cnt = 0; 8783 } 8784 8785 /** 8786 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8787 * @phba: pointer to lpfc hba data structure. 8788 * 8789 * This routine is invoked to allocate the driver's active sgl memory. 8790 * This array will hold the sglq_entry's for active IOs. 8791 **/ 8792 static int 8793 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8794 { 8795 int size; 8796 size = sizeof(struct lpfc_sglq *); 8797 size *= phba->sli4_hba.max_cfg_param.max_xri; 8798 8799 phba->sli4_hba.lpfc_sglq_active_list = 8800 kzalloc(size, GFP_KERNEL); 8801 if (!phba->sli4_hba.lpfc_sglq_active_list) 8802 return -ENOMEM; 8803 return 0; 8804 } 8805 8806 /** 8807 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8808 * @phba: pointer to lpfc hba data structure. 8809 * 8810 * This routine is invoked to walk through the array of active sglq entries 8811 * and free all of the resources. 8812 * This is just a place holder for now. 8813 **/ 8814 static void 8815 lpfc_free_active_sgl(struct lpfc_hba *phba) 8816 { 8817 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8818 } 8819 8820 /** 8821 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8822 * @phba: pointer to lpfc hba data structure. 8823 * 8824 * This routine is invoked to allocate and initizlize the driver's sgl 8825 * list and set up the sgl xritag tag array accordingly. 8826 * 8827 **/ 8828 static void 8829 lpfc_init_sgl_list(struct lpfc_hba *phba) 8830 { 8831 /* Initialize and populate the sglq list per host/VF. */ 8832 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8833 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8834 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8835 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8836 8837 /* els xri-sgl book keeping */ 8838 phba->sli4_hba.els_xri_cnt = 0; 8839 8840 /* nvme xri-buffer book keeping */ 8841 phba->sli4_hba.io_xri_cnt = 0; 8842 } 8843 8844 /** 8845 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8846 * @phba: pointer to lpfc hba data structure. 8847 * 8848 * This routine is invoked to post rpi header templates to the 8849 * port for those SLI4 ports that do not support extents. This routine 8850 * posts a PAGE_SIZE memory region to the port to hold up to 8851 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8852 * and should be called only when interrupts are disabled. 8853 * 8854 * Return codes 8855 * 0 - successful 8856 * -ERROR - otherwise. 8857 **/ 8858 int 8859 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8860 { 8861 int rc = 0; 8862 struct lpfc_rpi_hdr *rpi_hdr; 8863 8864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8865 if (!phba->sli4_hba.rpi_hdrs_in_use) 8866 return rc; 8867 if (phba->sli4_hba.extents_in_use) 8868 return -EIO; 8869 8870 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8871 if (!rpi_hdr) { 8872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8873 "0391 Error during rpi post operation\n"); 8874 lpfc_sli4_remove_rpis(phba); 8875 rc = -ENODEV; 8876 } 8877 8878 return rc; 8879 } 8880 8881 /** 8882 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8883 * @phba: pointer to lpfc hba data structure. 8884 * 8885 * This routine is invoked to allocate a single 4KB memory region to 8886 * support rpis and stores them in the phba. This single region 8887 * provides support for up to 64 rpis. The region is used globally 8888 * by the device. 8889 * 8890 * Returns: 8891 * A valid rpi hdr on success. 8892 * A NULL pointer on any failure. 8893 **/ 8894 struct lpfc_rpi_hdr * 8895 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8896 { 8897 uint16_t rpi_limit, curr_rpi_range; 8898 struct lpfc_dmabuf *dmabuf; 8899 struct lpfc_rpi_hdr *rpi_hdr; 8900 8901 /* 8902 * If the SLI4 port supports extents, posting the rpi header isn't 8903 * required. Set the expected maximum count and let the actual value 8904 * get set when extents are fully allocated. 8905 */ 8906 if (!phba->sli4_hba.rpi_hdrs_in_use) 8907 return NULL; 8908 if (phba->sli4_hba.extents_in_use) 8909 return NULL; 8910 8911 /* The limit on the logical index is just the max_rpi count. */ 8912 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8913 8914 spin_lock_irq(&phba->hbalock); 8915 /* 8916 * Establish the starting RPI in this header block. The starting 8917 * rpi is normalized to a zero base because the physical rpi is 8918 * port based. 8919 */ 8920 curr_rpi_range = phba->sli4_hba.next_rpi; 8921 spin_unlock_irq(&phba->hbalock); 8922 8923 /* Reached full RPI range */ 8924 if (curr_rpi_range == rpi_limit) 8925 return NULL; 8926 8927 /* 8928 * First allocate the protocol header region for the port. The 8929 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8930 */ 8931 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8932 if (!dmabuf) 8933 return NULL; 8934 8935 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8936 LPFC_HDR_TEMPLATE_SIZE, 8937 &dmabuf->phys, GFP_KERNEL); 8938 if (!dmabuf->virt) { 8939 rpi_hdr = NULL; 8940 goto err_free_dmabuf; 8941 } 8942 8943 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8944 rpi_hdr = NULL; 8945 goto err_free_coherent; 8946 } 8947 8948 /* Save the rpi header data for cleanup later. */ 8949 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8950 if (!rpi_hdr) 8951 goto err_free_coherent; 8952 8953 rpi_hdr->dmabuf = dmabuf; 8954 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8955 rpi_hdr->page_count = 1; 8956 spin_lock_irq(&phba->hbalock); 8957 8958 /* The rpi_hdr stores the logical index only. */ 8959 rpi_hdr->start_rpi = curr_rpi_range; 8960 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8962 8963 spin_unlock_irq(&phba->hbalock); 8964 return rpi_hdr; 8965 8966 err_free_coherent: 8967 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8968 dmabuf->virt, dmabuf->phys); 8969 err_free_dmabuf: 8970 kfree(dmabuf); 8971 return NULL; 8972 } 8973 8974 /** 8975 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8976 * @phba: pointer to lpfc hba data structure. 8977 * 8978 * This routine is invoked to remove all memory resources allocated 8979 * to support rpis for SLI4 ports not supporting extents. This routine 8980 * presumes the caller has released all rpis consumed by fabric or port 8981 * logins and is prepared to have the header pages removed. 8982 **/ 8983 void 8984 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8985 { 8986 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8987 8988 if (!phba->sli4_hba.rpi_hdrs_in_use) 8989 goto exit; 8990 8991 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8992 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8993 list_del(&rpi_hdr->list); 8994 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8995 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8996 kfree(rpi_hdr->dmabuf); 8997 kfree(rpi_hdr); 8998 } 8999 exit: 9000 /* There are no rpis available to the port now. */ 9001 phba->sli4_hba.next_rpi = 0; 9002 } 9003 9004 /** 9005 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 9006 * @pdev: pointer to pci device data structure. 9007 * 9008 * This routine is invoked to allocate the driver hba data structure for an 9009 * HBA device. If the allocation is successful, the phba reference to the 9010 * PCI device data structure is set. 9011 * 9012 * Return codes 9013 * pointer to @phba - successful 9014 * NULL - error 9015 **/ 9016 static struct lpfc_hba * 9017 lpfc_hba_alloc(struct pci_dev *pdev) 9018 { 9019 struct lpfc_hba *phba; 9020 9021 /* Allocate memory for HBA structure */ 9022 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 9023 if (!phba) { 9024 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 9025 return NULL; 9026 } 9027 9028 /* Set reference to PCI device in HBA structure */ 9029 phba->pcidev = pdev; 9030 9031 /* Assign an unused board number */ 9032 phba->brd_no = lpfc_get_instance(); 9033 if (phba->brd_no < 0) { 9034 kfree(phba); 9035 return NULL; 9036 } 9037 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 9038 9039 spin_lock_init(&phba->ct_ev_lock); 9040 INIT_LIST_HEAD(&phba->ct_ev_waiters); 9041 9042 return phba; 9043 } 9044 9045 /** 9046 * lpfc_hba_free - Free driver hba data structure with a device. 9047 * @phba: pointer to lpfc hba data structure. 9048 * 9049 * This routine is invoked to free the driver hba data structure with an 9050 * HBA device. 9051 **/ 9052 static void 9053 lpfc_hba_free(struct lpfc_hba *phba) 9054 { 9055 if (phba->sli_rev == LPFC_SLI_REV4) 9056 kfree(phba->sli4_hba.hdwq); 9057 9058 /* Release the driver assigned board number */ 9059 idr_remove(&lpfc_hba_index, phba->brd_no); 9060 9061 /* Free memory allocated with sli3 rings */ 9062 kfree(phba->sli.sli3_ring); 9063 phba->sli.sli3_ring = NULL; 9064 9065 kfree(phba); 9066 return; 9067 } 9068 9069 /** 9070 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes 9071 * @vport: pointer to lpfc vport data structure. 9072 * 9073 * This routine is will setup initial FDMI attribute masks for 9074 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt 9075 * to get these attributes first before falling back, the attribute 9076 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1 9077 **/ 9078 void 9079 lpfc_setup_fdmi_mask(struct lpfc_vport *vport) 9080 { 9081 struct lpfc_hba *phba = vport->phba; 9082 9083 vport->load_flag |= FC_ALLOW_FDMI; 9084 if (phba->cfg_enable_SmartSAN || 9085 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { 9086 /* Setup appropriate attribute masks */ 9087 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9088 if (phba->cfg_enable_SmartSAN) 9089 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9090 else 9091 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9092 } 9093 9094 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 9095 "6077 Setup FDMI mask: hba x%x port x%x\n", 9096 vport->fdmi_hba_mask, vport->fdmi_port_mask); 9097 } 9098 9099 /** 9100 * lpfc_create_shost - Create hba physical port with associated scsi host. 9101 * @phba: pointer to lpfc hba data structure. 9102 * 9103 * This routine is invoked to create HBA physical port and associate a SCSI 9104 * host with it. 9105 * 9106 * Return codes 9107 * 0 - successful 9108 * other values - error 9109 **/ 9110 static int 9111 lpfc_create_shost(struct lpfc_hba *phba) 9112 { 9113 struct lpfc_vport *vport; 9114 struct Scsi_Host *shost; 9115 9116 /* Initialize HBA FC structure */ 9117 phba->fc_edtov = FF_DEF_EDTOV; 9118 phba->fc_ratov = FF_DEF_RATOV; 9119 phba->fc_altov = FF_DEF_ALTOV; 9120 phba->fc_arbtov = FF_DEF_ARBTOV; 9121 9122 atomic_set(&phba->sdev_cnt, 0); 9123 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9124 if (!vport) 9125 return -ENODEV; 9126 9127 shost = lpfc_shost_from_vport(vport); 9128 phba->pport = vport; 9129 9130 if (phba->nvmet_support) { 9131 /* Only 1 vport (pport) will support NVME target */ 9132 phba->targetport = NULL; 9133 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9135 "6076 NVME Target Found\n"); 9136 } 9137 9138 lpfc_debugfs_initialize(vport); 9139 /* Put reference to SCSI host to driver's device private data */ 9140 pci_set_drvdata(phba->pcidev, shost); 9141 9142 lpfc_setup_fdmi_mask(vport); 9143 9144 /* 9145 * At this point we are fully registered with PSA. In addition, 9146 * any initial discovery should be completed. 9147 */ 9148 return 0; 9149 } 9150 9151 /** 9152 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9153 * @phba: pointer to lpfc hba data structure. 9154 * 9155 * This routine is invoked to destroy HBA physical port and the associated 9156 * SCSI host. 9157 **/ 9158 static void 9159 lpfc_destroy_shost(struct lpfc_hba *phba) 9160 { 9161 struct lpfc_vport *vport = phba->pport; 9162 9163 /* Destroy physical port that associated with the SCSI host */ 9164 destroy_port(vport); 9165 9166 return; 9167 } 9168 9169 /** 9170 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9171 * @phba: pointer to lpfc hba data structure. 9172 * @shost: the shost to be used to detect Block guard settings. 9173 * 9174 * This routine sets up the local Block guard protocol settings for @shost. 9175 * This routine also allocates memory for debugging bg buffers. 9176 **/ 9177 static void 9178 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9179 { 9180 uint32_t old_mask; 9181 uint32_t old_guard; 9182 9183 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9184 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9185 "1478 Registering BlockGuard with the " 9186 "SCSI layer\n"); 9187 9188 old_mask = phba->cfg_prot_mask; 9189 old_guard = phba->cfg_prot_guard; 9190 9191 /* Only allow supported values */ 9192 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9193 SHOST_DIX_TYPE0_PROTECTION | 9194 SHOST_DIX_TYPE1_PROTECTION); 9195 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9196 SHOST_DIX_GUARD_CRC); 9197 9198 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9199 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9200 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9201 9202 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9203 if ((old_mask != phba->cfg_prot_mask) || 9204 (old_guard != phba->cfg_prot_guard)) 9205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9206 "1475 Registering BlockGuard with the " 9207 "SCSI layer: mask %d guard %d\n", 9208 phba->cfg_prot_mask, 9209 phba->cfg_prot_guard); 9210 9211 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9212 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9213 } else 9214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9215 "1479 Not Registering BlockGuard with the SCSI " 9216 "layer, Bad protection parameters: %d %d\n", 9217 old_mask, old_guard); 9218 } 9219 } 9220 9221 /** 9222 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9223 * @phba: pointer to lpfc hba data structure. 9224 * 9225 * This routine is invoked to perform all the necessary post initialization 9226 * setup for the device. 9227 **/ 9228 static void 9229 lpfc_post_init_setup(struct lpfc_hba *phba) 9230 { 9231 struct Scsi_Host *shost; 9232 struct lpfc_adapter_event_header adapter_event; 9233 9234 /* Get the default values for Model Name and Description */ 9235 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9236 9237 /* 9238 * hba setup may have changed the hba_queue_depth so we need to 9239 * adjust the value of can_queue. 9240 */ 9241 shost = pci_get_drvdata(phba->pcidev); 9242 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9243 9244 lpfc_host_attrib_init(shost); 9245 9246 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9247 spin_lock_irq(shost->host_lock); 9248 lpfc_poll_start_timer(phba); 9249 spin_unlock_irq(shost->host_lock); 9250 } 9251 9252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9253 "0428 Perform SCSI scan\n"); 9254 /* Send board arrival event to upper layer */ 9255 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9256 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9257 fc_host_post_vendor_event(shost, fc_get_event_number(), 9258 sizeof(adapter_event), 9259 (char *) &adapter_event, 9260 LPFC_NL_VENDOR_ID); 9261 return; 9262 } 9263 9264 /** 9265 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9266 * @phba: pointer to lpfc hba data structure. 9267 * 9268 * This routine is invoked to set up the PCI device memory space for device 9269 * with SLI-3 interface spec. 9270 * 9271 * Return codes 9272 * 0 - successful 9273 * other values - error 9274 **/ 9275 static int 9276 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9277 { 9278 struct pci_dev *pdev = phba->pcidev; 9279 unsigned long bar0map_len, bar2map_len; 9280 int i, hbq_count; 9281 void *ptr; 9282 int error; 9283 9284 if (!pdev) 9285 return -ENODEV; 9286 9287 /* Set the device DMA mask size */ 9288 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9289 if (error) 9290 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9291 if (error) 9292 return error; 9293 error = -ENODEV; 9294 9295 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9296 * required by each mapping. 9297 */ 9298 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9299 bar0map_len = pci_resource_len(pdev, 0); 9300 9301 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9302 bar2map_len = pci_resource_len(pdev, 2); 9303 9304 /* Map HBA SLIM to a kernel virtual address. */ 9305 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9306 if (!phba->slim_memmap_p) { 9307 dev_printk(KERN_ERR, &pdev->dev, 9308 "ioremap failed for SLIM memory.\n"); 9309 goto out; 9310 } 9311 9312 /* Map HBA Control Registers to a kernel virtual address. */ 9313 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9314 if (!phba->ctrl_regs_memmap_p) { 9315 dev_printk(KERN_ERR, &pdev->dev, 9316 "ioremap failed for HBA control registers.\n"); 9317 goto out_iounmap_slim; 9318 } 9319 9320 /* Allocate memory for SLI-2 structures */ 9321 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9322 &phba->slim2p.phys, GFP_KERNEL); 9323 if (!phba->slim2p.virt) 9324 goto out_iounmap; 9325 9326 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9327 phba->mbox_ext = (phba->slim2p.virt + 9328 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9329 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9330 phba->IOCBs = (phba->slim2p.virt + 9331 offsetof(struct lpfc_sli2_slim, IOCBs)); 9332 9333 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9334 lpfc_sli_hbq_size(), 9335 &phba->hbqslimp.phys, 9336 GFP_KERNEL); 9337 if (!phba->hbqslimp.virt) 9338 goto out_free_slim; 9339 9340 hbq_count = lpfc_sli_hbq_count(); 9341 ptr = phba->hbqslimp.virt; 9342 for (i = 0; i < hbq_count; ++i) { 9343 phba->hbqs[i].hbq_virt = ptr; 9344 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9345 ptr += (lpfc_hbq_defs[i]->entry_count * 9346 sizeof(struct lpfc_hbq_entry)); 9347 } 9348 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9349 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9350 9351 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9352 9353 phba->MBslimaddr = phba->slim_memmap_p; 9354 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9355 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9356 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9357 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9358 9359 return 0; 9360 9361 out_free_slim: 9362 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9363 phba->slim2p.virt, phba->slim2p.phys); 9364 out_iounmap: 9365 iounmap(phba->ctrl_regs_memmap_p); 9366 out_iounmap_slim: 9367 iounmap(phba->slim_memmap_p); 9368 out: 9369 return error; 9370 } 9371 9372 /** 9373 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9374 * @phba: pointer to lpfc hba data structure. 9375 * 9376 * This routine is invoked to unset the PCI device memory space for device 9377 * with SLI-3 interface spec. 9378 **/ 9379 static void 9380 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9381 { 9382 struct pci_dev *pdev; 9383 9384 /* Obtain PCI device reference */ 9385 if (!phba->pcidev) 9386 return; 9387 else 9388 pdev = phba->pcidev; 9389 9390 /* Free coherent DMA memory allocated */ 9391 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9392 phba->hbqslimp.virt, phba->hbqslimp.phys); 9393 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9394 phba->slim2p.virt, phba->slim2p.phys); 9395 9396 /* I/O memory unmap */ 9397 iounmap(phba->ctrl_regs_memmap_p); 9398 iounmap(phba->slim_memmap_p); 9399 9400 return; 9401 } 9402 9403 /** 9404 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9405 * @phba: pointer to lpfc hba data structure. 9406 * 9407 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9408 * done and check status. 9409 * 9410 * Return 0 if successful, otherwise -ENODEV. 9411 **/ 9412 int 9413 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9414 { 9415 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9416 struct lpfc_register reg_data; 9417 int i, port_error = 0; 9418 uint32_t if_type; 9419 9420 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9421 memset(®_data, 0, sizeof(reg_data)); 9422 if (!phba->sli4_hba.PSMPHRregaddr) 9423 return -ENODEV; 9424 9425 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9426 for (i = 0; i < 3000; i++) { 9427 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9428 &portsmphr_reg.word0) || 9429 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9430 /* Port has a fatal POST error, break out */ 9431 port_error = -ENODEV; 9432 break; 9433 } 9434 if (LPFC_POST_STAGE_PORT_READY == 9435 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9436 break; 9437 msleep(10); 9438 } 9439 9440 /* 9441 * If there was a port error during POST, then don't proceed with 9442 * other register reads as the data may not be valid. Just exit. 9443 */ 9444 if (port_error) { 9445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9446 "1408 Port Failed POST - portsmphr=0x%x, " 9447 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9448 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9449 portsmphr_reg.word0, 9450 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9451 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9452 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9453 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9454 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9455 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9456 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9457 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9458 } else { 9459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9460 "2534 Device Info: SLIFamily=0x%x, " 9461 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9462 "SLIHint_2=0x%x, FT=0x%x\n", 9463 bf_get(lpfc_sli_intf_sli_family, 9464 &phba->sli4_hba.sli_intf), 9465 bf_get(lpfc_sli_intf_slirev, 9466 &phba->sli4_hba.sli_intf), 9467 bf_get(lpfc_sli_intf_if_type, 9468 &phba->sli4_hba.sli_intf), 9469 bf_get(lpfc_sli_intf_sli_hint1, 9470 &phba->sli4_hba.sli_intf), 9471 bf_get(lpfc_sli_intf_sli_hint2, 9472 &phba->sli4_hba.sli_intf), 9473 bf_get(lpfc_sli_intf_func_type, 9474 &phba->sli4_hba.sli_intf)); 9475 /* 9476 * Check for other Port errors during the initialization 9477 * process. Fail the load if the port did not come up 9478 * correctly. 9479 */ 9480 if_type = bf_get(lpfc_sli_intf_if_type, 9481 &phba->sli4_hba.sli_intf); 9482 switch (if_type) { 9483 case LPFC_SLI_INTF_IF_TYPE_0: 9484 phba->sli4_hba.ue_mask_lo = 9485 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9486 phba->sli4_hba.ue_mask_hi = 9487 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9488 uerrlo_reg.word0 = 9489 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9490 uerrhi_reg.word0 = 9491 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9492 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9493 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9494 lpfc_printf_log(phba, KERN_ERR, 9495 LOG_TRACE_EVENT, 9496 "1422 Unrecoverable Error " 9497 "Detected during POST " 9498 "uerr_lo_reg=0x%x, " 9499 "uerr_hi_reg=0x%x, " 9500 "ue_mask_lo_reg=0x%x, " 9501 "ue_mask_hi_reg=0x%x\n", 9502 uerrlo_reg.word0, 9503 uerrhi_reg.word0, 9504 phba->sli4_hba.ue_mask_lo, 9505 phba->sli4_hba.ue_mask_hi); 9506 port_error = -ENODEV; 9507 } 9508 break; 9509 case LPFC_SLI_INTF_IF_TYPE_2: 9510 case LPFC_SLI_INTF_IF_TYPE_6: 9511 /* Final checks. The port status should be clean. */ 9512 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9513 ®_data.word0) || 9514 (bf_get(lpfc_sliport_status_err, ®_data) && 9515 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9516 phba->work_status[0] = 9517 readl(phba->sli4_hba.u.if_type2. 9518 ERR1regaddr); 9519 phba->work_status[1] = 9520 readl(phba->sli4_hba.u.if_type2. 9521 ERR2regaddr); 9522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9523 "2888 Unrecoverable port error " 9524 "following POST: port status reg " 9525 "0x%x, port_smphr reg 0x%x, " 9526 "error 1=0x%x, error 2=0x%x\n", 9527 reg_data.word0, 9528 portsmphr_reg.word0, 9529 phba->work_status[0], 9530 phba->work_status[1]); 9531 port_error = -ENODEV; 9532 break; 9533 } 9534 9535 if (lpfc_pldv_detect && 9536 bf_get(lpfc_sli_intf_sli_family, 9537 &phba->sli4_hba.sli_intf) == 9538 LPFC_SLI_INTF_FAMILY_G6) 9539 pci_write_config_byte(phba->pcidev, 9540 LPFC_SLI_INTF, CFG_PLD); 9541 break; 9542 case LPFC_SLI_INTF_IF_TYPE_1: 9543 default: 9544 break; 9545 } 9546 } 9547 return port_error; 9548 } 9549 9550 /** 9551 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9552 * @phba: pointer to lpfc hba data structure. 9553 * @if_type: The SLI4 interface type getting configured. 9554 * 9555 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9556 * memory map. 9557 **/ 9558 static void 9559 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9560 { 9561 switch (if_type) { 9562 case LPFC_SLI_INTF_IF_TYPE_0: 9563 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9564 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9565 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9566 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9567 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9568 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9569 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9570 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9571 phba->sli4_hba.SLIINTFregaddr = 9572 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9573 break; 9574 case LPFC_SLI_INTF_IF_TYPE_2: 9575 phba->sli4_hba.u.if_type2.EQDregaddr = 9576 phba->sli4_hba.conf_regs_memmap_p + 9577 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9578 phba->sli4_hba.u.if_type2.ERR1regaddr = 9579 phba->sli4_hba.conf_regs_memmap_p + 9580 LPFC_CTL_PORT_ER1_OFFSET; 9581 phba->sli4_hba.u.if_type2.ERR2regaddr = 9582 phba->sli4_hba.conf_regs_memmap_p + 9583 LPFC_CTL_PORT_ER2_OFFSET; 9584 phba->sli4_hba.u.if_type2.CTRLregaddr = 9585 phba->sli4_hba.conf_regs_memmap_p + 9586 LPFC_CTL_PORT_CTL_OFFSET; 9587 phba->sli4_hba.u.if_type2.STATUSregaddr = 9588 phba->sli4_hba.conf_regs_memmap_p + 9589 LPFC_CTL_PORT_STA_OFFSET; 9590 phba->sli4_hba.SLIINTFregaddr = 9591 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9592 phba->sli4_hba.PSMPHRregaddr = 9593 phba->sli4_hba.conf_regs_memmap_p + 9594 LPFC_CTL_PORT_SEM_OFFSET; 9595 phba->sli4_hba.RQDBregaddr = 9596 phba->sli4_hba.conf_regs_memmap_p + 9597 LPFC_ULP0_RQ_DOORBELL; 9598 phba->sli4_hba.WQDBregaddr = 9599 phba->sli4_hba.conf_regs_memmap_p + 9600 LPFC_ULP0_WQ_DOORBELL; 9601 phba->sli4_hba.CQDBregaddr = 9602 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9603 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9604 phba->sli4_hba.MQDBregaddr = 9605 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9606 phba->sli4_hba.BMBXregaddr = 9607 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9608 break; 9609 case LPFC_SLI_INTF_IF_TYPE_6: 9610 phba->sli4_hba.u.if_type2.EQDregaddr = 9611 phba->sli4_hba.conf_regs_memmap_p + 9612 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9613 phba->sli4_hba.u.if_type2.ERR1regaddr = 9614 phba->sli4_hba.conf_regs_memmap_p + 9615 LPFC_CTL_PORT_ER1_OFFSET; 9616 phba->sli4_hba.u.if_type2.ERR2regaddr = 9617 phba->sli4_hba.conf_regs_memmap_p + 9618 LPFC_CTL_PORT_ER2_OFFSET; 9619 phba->sli4_hba.u.if_type2.CTRLregaddr = 9620 phba->sli4_hba.conf_regs_memmap_p + 9621 LPFC_CTL_PORT_CTL_OFFSET; 9622 phba->sli4_hba.u.if_type2.STATUSregaddr = 9623 phba->sli4_hba.conf_regs_memmap_p + 9624 LPFC_CTL_PORT_STA_OFFSET; 9625 phba->sli4_hba.PSMPHRregaddr = 9626 phba->sli4_hba.conf_regs_memmap_p + 9627 LPFC_CTL_PORT_SEM_OFFSET; 9628 phba->sli4_hba.BMBXregaddr = 9629 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9630 break; 9631 case LPFC_SLI_INTF_IF_TYPE_1: 9632 default: 9633 dev_printk(KERN_ERR, &phba->pcidev->dev, 9634 "FATAL - unsupported SLI4 interface type - %d\n", 9635 if_type); 9636 break; 9637 } 9638 } 9639 9640 /** 9641 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9642 * @phba: pointer to lpfc hba data structure. 9643 * @if_type: sli if type to operate on. 9644 * 9645 * This routine is invoked to set up SLI4 BAR1 register memory map. 9646 **/ 9647 static void 9648 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9649 { 9650 switch (if_type) { 9651 case LPFC_SLI_INTF_IF_TYPE_0: 9652 phba->sli4_hba.PSMPHRregaddr = 9653 phba->sli4_hba.ctrl_regs_memmap_p + 9654 LPFC_SLIPORT_IF0_SMPHR; 9655 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9656 LPFC_HST_ISR0; 9657 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9658 LPFC_HST_IMR0; 9659 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9660 LPFC_HST_ISCR0; 9661 break; 9662 case LPFC_SLI_INTF_IF_TYPE_6: 9663 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9664 LPFC_IF6_RQ_DOORBELL; 9665 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9666 LPFC_IF6_WQ_DOORBELL; 9667 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9668 LPFC_IF6_CQ_DOORBELL; 9669 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9670 LPFC_IF6_EQ_DOORBELL; 9671 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9672 LPFC_IF6_MQ_DOORBELL; 9673 break; 9674 case LPFC_SLI_INTF_IF_TYPE_2: 9675 case LPFC_SLI_INTF_IF_TYPE_1: 9676 default: 9677 dev_err(&phba->pcidev->dev, 9678 "FATAL - unsupported SLI4 interface type - %d\n", 9679 if_type); 9680 break; 9681 } 9682 } 9683 9684 /** 9685 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9686 * @phba: pointer to lpfc hba data structure. 9687 * @vf: virtual function number 9688 * 9689 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9690 * based on the given viftual function number, @vf. 9691 * 9692 * Return 0 if successful, otherwise -ENODEV. 9693 **/ 9694 static int 9695 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9696 { 9697 if (vf > LPFC_VIR_FUNC_MAX) 9698 return -ENODEV; 9699 9700 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9701 vf * LPFC_VFR_PAGE_SIZE + 9702 LPFC_ULP0_RQ_DOORBELL); 9703 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9704 vf * LPFC_VFR_PAGE_SIZE + 9705 LPFC_ULP0_WQ_DOORBELL); 9706 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9707 vf * LPFC_VFR_PAGE_SIZE + 9708 LPFC_EQCQ_DOORBELL); 9709 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9710 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9711 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9712 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9713 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9714 return 0; 9715 } 9716 9717 /** 9718 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9719 * @phba: pointer to lpfc hba data structure. 9720 * 9721 * This routine is invoked to create the bootstrap mailbox 9722 * region consistent with the SLI-4 interface spec. This 9723 * routine allocates all memory necessary to communicate 9724 * mailbox commands to the port and sets up all alignment 9725 * needs. No locks are expected to be held when calling 9726 * this routine. 9727 * 9728 * Return codes 9729 * 0 - successful 9730 * -ENOMEM - could not allocated memory. 9731 **/ 9732 static int 9733 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9734 { 9735 uint32_t bmbx_size; 9736 struct lpfc_dmabuf *dmabuf; 9737 struct dma_address *dma_address; 9738 uint32_t pa_addr; 9739 uint64_t phys_addr; 9740 9741 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9742 if (!dmabuf) 9743 return -ENOMEM; 9744 9745 /* 9746 * The bootstrap mailbox region is comprised of 2 parts 9747 * plus an alignment restriction of 16 bytes. 9748 */ 9749 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9750 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9751 &dmabuf->phys, GFP_KERNEL); 9752 if (!dmabuf->virt) { 9753 kfree(dmabuf); 9754 return -ENOMEM; 9755 } 9756 9757 /* 9758 * Initialize the bootstrap mailbox pointers now so that the register 9759 * operations are simple later. The mailbox dma address is required 9760 * to be 16-byte aligned. Also align the virtual memory as each 9761 * maibox is copied into the bmbx mailbox region before issuing the 9762 * command to the port. 9763 */ 9764 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9765 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9766 9767 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9768 LPFC_ALIGN_16_BYTE); 9769 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9770 LPFC_ALIGN_16_BYTE); 9771 9772 /* 9773 * Set the high and low physical addresses now. The SLI4 alignment 9774 * requirement is 16 bytes and the mailbox is posted to the port 9775 * as two 30-bit addresses. The other data is a bit marking whether 9776 * the 30-bit address is the high or low address. 9777 * Upcast bmbx aphys to 64bits so shift instruction compiles 9778 * clean on 32 bit machines. 9779 */ 9780 dma_address = &phba->sli4_hba.bmbx.dma_address; 9781 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9782 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9783 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9784 LPFC_BMBX_BIT1_ADDR_HI); 9785 9786 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9787 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9788 LPFC_BMBX_BIT1_ADDR_LO); 9789 return 0; 9790 } 9791 9792 /** 9793 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9794 * @phba: pointer to lpfc hba data structure. 9795 * 9796 * This routine is invoked to teardown the bootstrap mailbox 9797 * region and release all host resources. This routine requires 9798 * the caller to ensure all mailbox commands recovered, no 9799 * additional mailbox comands are sent, and interrupts are disabled 9800 * before calling this routine. 9801 * 9802 **/ 9803 static void 9804 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9805 { 9806 dma_free_coherent(&phba->pcidev->dev, 9807 phba->sli4_hba.bmbx.bmbx_size, 9808 phba->sli4_hba.bmbx.dmabuf->virt, 9809 phba->sli4_hba.bmbx.dmabuf->phys); 9810 9811 kfree(phba->sli4_hba.bmbx.dmabuf); 9812 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9813 } 9814 9815 static const char * const lpfc_topo_to_str[] = { 9816 "Loop then P2P", 9817 "Loopback", 9818 "P2P Only", 9819 "Unsupported", 9820 "Loop Only", 9821 "Unsupported", 9822 "P2P then Loop", 9823 }; 9824 9825 #define LINK_FLAGS_DEF 0x0 9826 #define LINK_FLAGS_P2P 0x1 9827 #define LINK_FLAGS_LOOP 0x2 9828 /** 9829 * lpfc_map_topology - Map the topology read from READ_CONFIG 9830 * @phba: pointer to lpfc hba data structure. 9831 * @rd_config: pointer to read config data 9832 * 9833 * This routine is invoked to map the topology values as read 9834 * from the read config mailbox command. If the persistent 9835 * topology feature is supported, the firmware will provide the 9836 * saved topology information to be used in INIT_LINK 9837 **/ 9838 static void 9839 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9840 { 9841 u8 ptv, tf, pt; 9842 9843 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9844 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9845 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9846 9847 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9848 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9849 ptv, tf, pt); 9850 if (!ptv) { 9851 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9852 "2019 FW does not support persistent topology " 9853 "Using driver parameter defined value [%s]", 9854 lpfc_topo_to_str[phba->cfg_topology]); 9855 return; 9856 } 9857 /* FW supports persistent topology - override module parameter value */ 9858 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9859 9860 /* if ASIC_GEN_NUM >= 0xC) */ 9861 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9862 LPFC_SLI_INTF_IF_TYPE_6) || 9863 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9864 LPFC_SLI_INTF_FAMILY_G6)) { 9865 if (!tf) { 9866 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9867 ? FLAGS_TOPOLOGY_MODE_LOOP 9868 : FLAGS_TOPOLOGY_MODE_PT_PT); 9869 } else { 9870 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9871 } 9872 } else { /* G5 */ 9873 if (tf) { 9874 /* If topology failover set - pt is '0' or '1' */ 9875 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9876 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9877 } else { 9878 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9879 ? FLAGS_TOPOLOGY_MODE_PT_PT 9880 : FLAGS_TOPOLOGY_MODE_LOOP); 9881 } 9882 } 9883 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9884 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9885 "2020 Using persistent topology value [%s]", 9886 lpfc_topo_to_str[phba->cfg_topology]); 9887 } else { 9888 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9889 "2021 Invalid topology values from FW " 9890 "Using driver parameter defined value [%s]", 9891 lpfc_topo_to_str[phba->cfg_topology]); 9892 } 9893 } 9894 9895 /** 9896 * lpfc_sli4_read_config - Get the config parameters. 9897 * @phba: pointer to lpfc hba data structure. 9898 * 9899 * This routine is invoked to read the configuration parameters from the HBA. 9900 * The configuration parameters are used to set the base and maximum values 9901 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9902 * allocation for the port. 9903 * 9904 * Return codes 9905 * 0 - successful 9906 * -ENOMEM - No available memory 9907 * -EIO - The mailbox failed to complete successfully. 9908 **/ 9909 int 9910 lpfc_sli4_read_config(struct lpfc_hba *phba) 9911 { 9912 LPFC_MBOXQ_t *pmb; 9913 struct lpfc_mbx_read_config *rd_config; 9914 union lpfc_sli4_cfg_shdr *shdr; 9915 uint32_t shdr_status, shdr_add_status; 9916 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9917 struct lpfc_rsrc_desc_fcfcoe *desc; 9918 char *pdesc_0; 9919 uint16_t forced_link_speed; 9920 uint32_t if_type, qmin, fawwpn; 9921 int length, i, rc = 0, rc2; 9922 9923 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9924 if (!pmb) { 9925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9926 "2011 Unable to allocate memory for issuing " 9927 "SLI_CONFIG_SPECIAL mailbox command\n"); 9928 return -ENOMEM; 9929 } 9930 9931 lpfc_read_config(phba, pmb); 9932 9933 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9934 if (rc != MBX_SUCCESS) { 9935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9936 "2012 Mailbox failed , mbxCmd x%x " 9937 "READ_CONFIG, mbxStatus x%x\n", 9938 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9939 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9940 rc = -EIO; 9941 } else { 9942 rd_config = &pmb->u.mqe.un.rd_config; 9943 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9944 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9945 phba->sli4_hba.lnk_info.lnk_tp = 9946 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9947 phba->sli4_hba.lnk_info.lnk_no = 9948 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9949 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9950 "3081 lnk_type:%d, lnk_numb:%d\n", 9951 phba->sli4_hba.lnk_info.lnk_tp, 9952 phba->sli4_hba.lnk_info.lnk_no); 9953 } else 9954 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9955 "3082 Mailbox (x%x) returned ldv:x0\n", 9956 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9957 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9958 phba->bbcredit_support = 1; 9959 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9960 } 9961 9962 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config); 9963 9964 if (fawwpn) { 9965 lpfc_printf_log(phba, KERN_INFO, 9966 LOG_INIT | LOG_DISCOVERY, 9967 "2702 READ_CONFIG: FA-PWWN is " 9968 "configured on\n"); 9969 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; 9970 } else { 9971 /* Clear FW configured flag, preserve driver flag */ 9972 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG; 9973 } 9974 9975 phba->sli4_hba.conf_trunk = 9976 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9977 phba->sli4_hba.extents_in_use = 9978 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9979 9980 phba->sli4_hba.max_cfg_param.max_xri = 9981 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9982 /* Reduce resource usage in kdump environment */ 9983 if (is_kdump_kernel() && 9984 phba->sli4_hba.max_cfg_param.max_xri > 512) 9985 phba->sli4_hba.max_cfg_param.max_xri = 512; 9986 phba->sli4_hba.max_cfg_param.xri_base = 9987 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9988 phba->sli4_hba.max_cfg_param.max_vpi = 9989 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9990 /* Limit the max we support */ 9991 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9992 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9993 phba->sli4_hba.max_cfg_param.vpi_base = 9994 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9995 phba->sli4_hba.max_cfg_param.max_rpi = 9996 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9997 phba->sli4_hba.max_cfg_param.rpi_base = 9998 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9999 phba->sli4_hba.max_cfg_param.max_vfi = 10000 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 10001 phba->sli4_hba.max_cfg_param.vfi_base = 10002 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 10003 phba->sli4_hba.max_cfg_param.max_fcfi = 10004 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 10005 phba->sli4_hba.max_cfg_param.max_eq = 10006 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 10007 phba->sli4_hba.max_cfg_param.max_rq = 10008 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 10009 phba->sli4_hba.max_cfg_param.max_wq = 10010 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 10011 phba->sli4_hba.max_cfg_param.max_cq = 10012 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 10013 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 10014 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 10015 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 10016 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 10017 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 10018 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 10019 phba->max_vports = phba->max_vpi; 10020 10021 /* Next decide on FPIN or Signal E2E CGN support 10022 * For congestion alarms and warnings valid combination are: 10023 * 1. FPIN alarms / FPIN warnings 10024 * 2. Signal alarms / Signal warnings 10025 * 3. FPIN alarms / Signal warnings 10026 * 4. Signal alarms / FPIN warnings 10027 * 10028 * Initialize the adapter frequency to 100 mSecs 10029 */ 10030 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 10031 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 10032 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 10033 10034 if (lpfc_use_cgn_signal) { 10035 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 10036 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 10037 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 10038 } 10039 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 10040 /* MUST support both alarm and warning 10041 * because EDC does not support alarm alone. 10042 */ 10043 if (phba->cgn_reg_signal != 10044 EDC_CG_SIG_WARN_ONLY) { 10045 /* Must support both or none */ 10046 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 10047 phba->cgn_reg_signal = 10048 EDC_CG_SIG_NOTSUPPORTED; 10049 } else { 10050 phba->cgn_reg_signal = 10051 EDC_CG_SIG_WARN_ALARM; 10052 phba->cgn_reg_fpin = 10053 LPFC_CGN_FPIN_NONE; 10054 } 10055 } 10056 } 10057 10058 /* Set the congestion initial signal and fpin values. */ 10059 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 10060 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 10061 10062 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 10063 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 10064 phba->cgn_reg_signal, phba->cgn_reg_fpin); 10065 10066 lpfc_map_topology(phba, rd_config); 10067 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10068 "2003 cfg params Extents? %d " 10069 "XRI(B:%d M:%d), " 10070 "VPI(B:%d M:%d) " 10071 "VFI(B:%d M:%d) " 10072 "RPI(B:%d M:%d) " 10073 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 10074 phba->sli4_hba.extents_in_use, 10075 phba->sli4_hba.max_cfg_param.xri_base, 10076 phba->sli4_hba.max_cfg_param.max_xri, 10077 phba->sli4_hba.max_cfg_param.vpi_base, 10078 phba->sli4_hba.max_cfg_param.max_vpi, 10079 phba->sli4_hba.max_cfg_param.vfi_base, 10080 phba->sli4_hba.max_cfg_param.max_vfi, 10081 phba->sli4_hba.max_cfg_param.rpi_base, 10082 phba->sli4_hba.max_cfg_param.max_rpi, 10083 phba->sli4_hba.max_cfg_param.max_fcfi, 10084 phba->sli4_hba.max_cfg_param.max_eq, 10085 phba->sli4_hba.max_cfg_param.max_cq, 10086 phba->sli4_hba.max_cfg_param.max_wq, 10087 phba->sli4_hba.max_cfg_param.max_rq, 10088 phba->lmt); 10089 10090 /* 10091 * Calculate queue resources based on how 10092 * many WQ/CQ/EQs are available. 10093 */ 10094 qmin = phba->sli4_hba.max_cfg_param.max_wq; 10095 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 10096 qmin = phba->sli4_hba.max_cfg_param.max_cq; 10097 /* 10098 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and 10099 * the remainder can be used for NVME / FCP. 10100 */ 10101 qmin -= 4; 10102 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 10103 qmin = phba->sli4_hba.max_cfg_param.max_eq; 10104 10105 /* Check to see if there is enough for default cfg */ 10106 if ((phba->cfg_irq_chann > qmin) || 10107 (phba->cfg_hdw_queue > qmin)) { 10108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10109 "2005 Reducing Queues - " 10110 "FW resource limitation: " 10111 "WQ %d CQ %d EQ %d: min %d: " 10112 "IRQ %d HDWQ %d\n", 10113 phba->sli4_hba.max_cfg_param.max_wq, 10114 phba->sli4_hba.max_cfg_param.max_cq, 10115 phba->sli4_hba.max_cfg_param.max_eq, 10116 qmin, phba->cfg_irq_chann, 10117 phba->cfg_hdw_queue); 10118 10119 if (phba->cfg_irq_chann > qmin) 10120 phba->cfg_irq_chann = qmin; 10121 if (phba->cfg_hdw_queue > qmin) 10122 phba->cfg_hdw_queue = qmin; 10123 } 10124 } 10125 10126 if (rc) 10127 goto read_cfg_out; 10128 10129 /* Update link speed if forced link speed is supported */ 10130 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10131 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10132 forced_link_speed = 10133 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10134 if (forced_link_speed) { 10135 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10136 10137 switch (forced_link_speed) { 10138 case LINK_SPEED_1G: 10139 phba->cfg_link_speed = 10140 LPFC_USER_LINK_SPEED_1G; 10141 break; 10142 case LINK_SPEED_2G: 10143 phba->cfg_link_speed = 10144 LPFC_USER_LINK_SPEED_2G; 10145 break; 10146 case LINK_SPEED_4G: 10147 phba->cfg_link_speed = 10148 LPFC_USER_LINK_SPEED_4G; 10149 break; 10150 case LINK_SPEED_8G: 10151 phba->cfg_link_speed = 10152 LPFC_USER_LINK_SPEED_8G; 10153 break; 10154 case LINK_SPEED_10G: 10155 phba->cfg_link_speed = 10156 LPFC_USER_LINK_SPEED_10G; 10157 break; 10158 case LINK_SPEED_16G: 10159 phba->cfg_link_speed = 10160 LPFC_USER_LINK_SPEED_16G; 10161 break; 10162 case LINK_SPEED_32G: 10163 phba->cfg_link_speed = 10164 LPFC_USER_LINK_SPEED_32G; 10165 break; 10166 case LINK_SPEED_64G: 10167 phba->cfg_link_speed = 10168 LPFC_USER_LINK_SPEED_64G; 10169 break; 10170 case 0xffff: 10171 phba->cfg_link_speed = 10172 LPFC_USER_LINK_SPEED_AUTO; 10173 break; 10174 default: 10175 lpfc_printf_log(phba, KERN_ERR, 10176 LOG_TRACE_EVENT, 10177 "0047 Unrecognized link " 10178 "speed : %d\n", 10179 forced_link_speed); 10180 phba->cfg_link_speed = 10181 LPFC_USER_LINK_SPEED_AUTO; 10182 } 10183 } 10184 } 10185 10186 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10187 length = phba->sli4_hba.max_cfg_param.max_xri - 10188 lpfc_sli4_get_els_iocb_cnt(phba); 10189 if (phba->cfg_hba_queue_depth > length) { 10190 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10191 "3361 HBA queue depth changed from %d to %d\n", 10192 phba->cfg_hba_queue_depth, length); 10193 phba->cfg_hba_queue_depth = length; 10194 } 10195 10196 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10197 LPFC_SLI_INTF_IF_TYPE_2) 10198 goto read_cfg_out; 10199 10200 /* get the pf# and vf# for SLI4 if_type 2 port */ 10201 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10202 sizeof(struct lpfc_sli4_cfg_mhdr)); 10203 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10204 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10205 length, LPFC_SLI4_MBX_EMBED); 10206 10207 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10208 shdr = (union lpfc_sli4_cfg_shdr *) 10209 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10210 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10211 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10212 if (rc2 || shdr_status || shdr_add_status) { 10213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10214 "3026 Mailbox failed , mbxCmd x%x " 10215 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10216 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10217 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10218 goto read_cfg_out; 10219 } 10220 10221 /* search for fc_fcoe resrouce descriptor */ 10222 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10223 10224 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10225 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10226 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10227 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10228 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10229 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10230 goto read_cfg_out; 10231 10232 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10233 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10234 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10235 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10236 phba->sli4_hba.iov.pf_number = 10237 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10238 phba->sli4_hba.iov.vf_number = 10239 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10240 break; 10241 } 10242 } 10243 10244 if (i < LPFC_RSRC_DESC_MAX_NUM) 10245 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10246 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10247 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10248 phba->sli4_hba.iov.vf_number); 10249 else 10250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10251 "3028 GET_FUNCTION_CONFIG: failed to find " 10252 "Resource Descriptor:x%x\n", 10253 LPFC_RSRC_DESC_TYPE_FCFCOE); 10254 10255 read_cfg_out: 10256 mempool_free(pmb, phba->mbox_mem_pool); 10257 return rc; 10258 } 10259 10260 /** 10261 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10262 * @phba: pointer to lpfc hba data structure. 10263 * 10264 * This routine is invoked to setup the port-side endian order when 10265 * the port if_type is 0. This routine has no function for other 10266 * if_types. 10267 * 10268 * Return codes 10269 * 0 - successful 10270 * -ENOMEM - No available memory 10271 * -EIO - The mailbox failed to complete successfully. 10272 **/ 10273 static int 10274 lpfc_setup_endian_order(struct lpfc_hba *phba) 10275 { 10276 LPFC_MBOXQ_t *mboxq; 10277 uint32_t if_type, rc = 0; 10278 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10279 HOST_ENDIAN_HIGH_WORD1}; 10280 10281 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10282 switch (if_type) { 10283 case LPFC_SLI_INTF_IF_TYPE_0: 10284 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10285 GFP_KERNEL); 10286 if (!mboxq) { 10287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10288 "0492 Unable to allocate memory for " 10289 "issuing SLI_CONFIG_SPECIAL mailbox " 10290 "command\n"); 10291 return -ENOMEM; 10292 } 10293 10294 /* 10295 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10296 * two words to contain special data values and no other data. 10297 */ 10298 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10299 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10300 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10301 if (rc != MBX_SUCCESS) { 10302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10303 "0493 SLI_CONFIG_SPECIAL mailbox " 10304 "failed with status x%x\n", 10305 rc); 10306 rc = -EIO; 10307 } 10308 mempool_free(mboxq, phba->mbox_mem_pool); 10309 break; 10310 case LPFC_SLI_INTF_IF_TYPE_6: 10311 case LPFC_SLI_INTF_IF_TYPE_2: 10312 case LPFC_SLI_INTF_IF_TYPE_1: 10313 default: 10314 break; 10315 } 10316 return rc; 10317 } 10318 10319 /** 10320 * lpfc_sli4_queue_verify - Verify and update EQ counts 10321 * @phba: pointer to lpfc hba data structure. 10322 * 10323 * This routine is invoked to check the user settable queue counts for EQs. 10324 * After this routine is called the counts will be set to valid values that 10325 * adhere to the constraints of the system's interrupt vectors and the port's 10326 * queue resources. 10327 * 10328 * Return codes 10329 * 0 - successful 10330 * -ENOMEM - No available memory 10331 **/ 10332 static int 10333 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10334 { 10335 /* 10336 * Sanity check for configured queue parameters against the run-time 10337 * device parameters 10338 */ 10339 10340 if (phba->nvmet_support) { 10341 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10342 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10343 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10344 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10345 } 10346 10347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10348 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10349 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10350 phba->cfg_nvmet_mrq); 10351 10352 /* Get EQ depth from module parameter, fake the default for now */ 10353 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10354 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10355 10356 /* Get CQ depth from module parameter, fake the default for now */ 10357 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10358 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10359 return 0; 10360 } 10361 10362 static int 10363 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10364 { 10365 struct lpfc_queue *qdesc; 10366 u32 wqesize; 10367 int cpu; 10368 10369 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10370 /* Create Fast Path IO CQs */ 10371 if (phba->enab_exp_wqcq_pages) 10372 /* Increase the CQ size when WQEs contain an embedded cdb */ 10373 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10374 phba->sli4_hba.cq_esize, 10375 LPFC_CQE_EXP_COUNT, cpu); 10376 10377 else 10378 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10379 phba->sli4_hba.cq_esize, 10380 phba->sli4_hba.cq_ecount, cpu); 10381 if (!qdesc) { 10382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10383 "0499 Failed allocate fast-path IO CQ (%d)\n", 10384 idx); 10385 return 1; 10386 } 10387 qdesc->qe_valid = 1; 10388 qdesc->hdwq = idx; 10389 qdesc->chann = cpu; 10390 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10391 10392 /* Create Fast Path IO WQs */ 10393 if (phba->enab_exp_wqcq_pages) { 10394 /* Increase the WQ size when WQEs contain an embedded cdb */ 10395 wqesize = (phba->fcp_embed_io) ? 10396 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10397 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10398 wqesize, 10399 LPFC_WQE_EXP_COUNT, cpu); 10400 } else 10401 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10402 phba->sli4_hba.wq_esize, 10403 phba->sli4_hba.wq_ecount, cpu); 10404 10405 if (!qdesc) { 10406 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10407 "0503 Failed allocate fast-path IO WQ (%d)\n", 10408 idx); 10409 return 1; 10410 } 10411 qdesc->hdwq = idx; 10412 qdesc->chann = cpu; 10413 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10414 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10415 return 0; 10416 } 10417 10418 /** 10419 * lpfc_sli4_queue_create - Create all the SLI4 queues 10420 * @phba: pointer to lpfc hba data structure. 10421 * 10422 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10423 * operation. For each SLI4 queue type, the parameters such as queue entry 10424 * count (queue depth) shall be taken from the module parameter. For now, 10425 * we just use some constant number as place holder. 10426 * 10427 * Return codes 10428 * 0 - successful 10429 * -ENOMEM - No availble memory 10430 * -EIO - The mailbox failed to complete successfully. 10431 **/ 10432 int 10433 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10434 { 10435 struct lpfc_queue *qdesc; 10436 int idx, cpu, eqcpu; 10437 struct lpfc_sli4_hdw_queue *qp; 10438 struct lpfc_vector_map_info *cpup; 10439 struct lpfc_vector_map_info *eqcpup; 10440 struct lpfc_eq_intr_info *eqi; 10441 10442 /* 10443 * Create HBA Record arrays. 10444 * Both NVME and FCP will share that same vectors / EQs 10445 */ 10446 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10447 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10448 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10449 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10450 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10451 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10452 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10453 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10454 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10455 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10456 10457 if (!phba->sli4_hba.hdwq) { 10458 phba->sli4_hba.hdwq = kcalloc( 10459 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10460 GFP_KERNEL); 10461 if (!phba->sli4_hba.hdwq) { 10462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10463 "6427 Failed allocate memory for " 10464 "fast-path Hardware Queue array\n"); 10465 goto out_error; 10466 } 10467 /* Prepare hardware queues to take IO buffers */ 10468 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10469 qp = &phba->sli4_hba.hdwq[idx]; 10470 spin_lock_init(&qp->io_buf_list_get_lock); 10471 spin_lock_init(&qp->io_buf_list_put_lock); 10472 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10473 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10474 qp->get_io_bufs = 0; 10475 qp->put_io_bufs = 0; 10476 qp->total_io_bufs = 0; 10477 spin_lock_init(&qp->abts_io_buf_list_lock); 10478 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10479 qp->abts_scsi_io_bufs = 0; 10480 qp->abts_nvme_io_bufs = 0; 10481 INIT_LIST_HEAD(&qp->sgl_list); 10482 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10483 spin_lock_init(&qp->hdwq_lock); 10484 } 10485 } 10486 10487 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10488 if (phba->nvmet_support) { 10489 phba->sli4_hba.nvmet_cqset = kcalloc( 10490 phba->cfg_nvmet_mrq, 10491 sizeof(struct lpfc_queue *), 10492 GFP_KERNEL); 10493 if (!phba->sli4_hba.nvmet_cqset) { 10494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10495 "3121 Fail allocate memory for " 10496 "fast-path CQ set array\n"); 10497 goto out_error; 10498 } 10499 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10500 phba->cfg_nvmet_mrq, 10501 sizeof(struct lpfc_queue *), 10502 GFP_KERNEL); 10503 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10505 "3122 Fail allocate memory for " 10506 "fast-path RQ set hdr array\n"); 10507 goto out_error; 10508 } 10509 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10510 phba->cfg_nvmet_mrq, 10511 sizeof(struct lpfc_queue *), 10512 GFP_KERNEL); 10513 if (!phba->sli4_hba.nvmet_mrq_data) { 10514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10515 "3124 Fail allocate memory for " 10516 "fast-path RQ set data array\n"); 10517 goto out_error; 10518 } 10519 } 10520 } 10521 10522 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10523 10524 /* Create HBA Event Queues (EQs) */ 10525 for_each_present_cpu(cpu) { 10526 /* We only want to create 1 EQ per vector, even though 10527 * multiple CPUs might be using that vector. so only 10528 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10529 */ 10530 cpup = &phba->sli4_hba.cpu_map[cpu]; 10531 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10532 continue; 10533 10534 /* Get a ptr to the Hardware Queue associated with this CPU */ 10535 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10536 10537 /* Allocate an EQ */ 10538 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10539 phba->sli4_hba.eq_esize, 10540 phba->sli4_hba.eq_ecount, cpu); 10541 if (!qdesc) { 10542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10543 "0497 Failed allocate EQ (%d)\n", 10544 cpup->hdwq); 10545 goto out_error; 10546 } 10547 qdesc->qe_valid = 1; 10548 qdesc->hdwq = cpup->hdwq; 10549 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10550 qdesc->last_cpu = qdesc->chann; 10551 10552 /* Save the allocated EQ in the Hardware Queue */ 10553 qp->hba_eq = qdesc; 10554 10555 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10556 list_add(&qdesc->cpu_list, &eqi->list); 10557 } 10558 10559 /* Now we need to populate the other Hardware Queues, that share 10560 * an IRQ vector, with the associated EQ ptr. 10561 */ 10562 for_each_present_cpu(cpu) { 10563 cpup = &phba->sli4_hba.cpu_map[cpu]; 10564 10565 /* Check for EQ already allocated in previous loop */ 10566 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10567 continue; 10568 10569 /* Check for multiple CPUs per hdwq */ 10570 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10571 if (qp->hba_eq) 10572 continue; 10573 10574 /* We need to share an EQ for this hdwq */ 10575 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10576 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10577 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10578 } 10579 10580 /* Allocate IO Path SLI4 CQ/WQs */ 10581 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10582 if (lpfc_alloc_io_wq_cq(phba, idx)) 10583 goto out_error; 10584 } 10585 10586 if (phba->nvmet_support) { 10587 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10588 cpu = lpfc_find_cpu_handle(phba, idx, 10589 LPFC_FIND_BY_HDWQ); 10590 qdesc = lpfc_sli4_queue_alloc(phba, 10591 LPFC_DEFAULT_PAGE_SIZE, 10592 phba->sli4_hba.cq_esize, 10593 phba->sli4_hba.cq_ecount, 10594 cpu); 10595 if (!qdesc) { 10596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10597 "3142 Failed allocate NVME " 10598 "CQ Set (%d)\n", idx); 10599 goto out_error; 10600 } 10601 qdesc->qe_valid = 1; 10602 qdesc->hdwq = idx; 10603 qdesc->chann = cpu; 10604 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10605 } 10606 } 10607 10608 /* 10609 * Create Slow Path Completion Queues (CQs) 10610 */ 10611 10612 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10613 /* Create slow-path Mailbox Command Complete Queue */ 10614 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10615 phba->sli4_hba.cq_esize, 10616 phba->sli4_hba.cq_ecount, cpu); 10617 if (!qdesc) { 10618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10619 "0500 Failed allocate slow-path mailbox CQ\n"); 10620 goto out_error; 10621 } 10622 qdesc->qe_valid = 1; 10623 phba->sli4_hba.mbx_cq = qdesc; 10624 10625 /* Create slow-path ELS Complete Queue */ 10626 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10627 phba->sli4_hba.cq_esize, 10628 phba->sli4_hba.cq_ecount, cpu); 10629 if (!qdesc) { 10630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10631 "0501 Failed allocate slow-path ELS CQ\n"); 10632 goto out_error; 10633 } 10634 qdesc->qe_valid = 1; 10635 qdesc->chann = cpu; 10636 phba->sli4_hba.els_cq = qdesc; 10637 10638 10639 /* 10640 * Create Slow Path Work Queues (WQs) 10641 */ 10642 10643 /* Create Mailbox Command Queue */ 10644 10645 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10646 phba->sli4_hba.mq_esize, 10647 phba->sli4_hba.mq_ecount, cpu); 10648 if (!qdesc) { 10649 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10650 "0505 Failed allocate slow-path MQ\n"); 10651 goto out_error; 10652 } 10653 qdesc->chann = cpu; 10654 phba->sli4_hba.mbx_wq = qdesc; 10655 10656 /* 10657 * Create ELS Work Queues 10658 */ 10659 10660 /* Create slow-path ELS Work Queue */ 10661 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10662 phba->sli4_hba.wq_esize, 10663 phba->sli4_hba.wq_ecount, cpu); 10664 if (!qdesc) { 10665 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10666 "0504 Failed allocate slow-path ELS WQ\n"); 10667 goto out_error; 10668 } 10669 qdesc->chann = cpu; 10670 phba->sli4_hba.els_wq = qdesc; 10671 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10672 10673 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10674 /* Create NVME LS Complete Queue */ 10675 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10676 phba->sli4_hba.cq_esize, 10677 phba->sli4_hba.cq_ecount, cpu); 10678 if (!qdesc) { 10679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10680 "6079 Failed allocate NVME LS CQ\n"); 10681 goto out_error; 10682 } 10683 qdesc->chann = cpu; 10684 qdesc->qe_valid = 1; 10685 phba->sli4_hba.nvmels_cq = qdesc; 10686 10687 /* Create NVME LS Work Queue */ 10688 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10689 phba->sli4_hba.wq_esize, 10690 phba->sli4_hba.wq_ecount, cpu); 10691 if (!qdesc) { 10692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10693 "6080 Failed allocate NVME LS WQ\n"); 10694 goto out_error; 10695 } 10696 qdesc->chann = cpu; 10697 phba->sli4_hba.nvmels_wq = qdesc; 10698 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10699 } 10700 10701 /* 10702 * Create Receive Queue (RQ) 10703 */ 10704 10705 /* Create Receive Queue for header */ 10706 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10707 phba->sli4_hba.rq_esize, 10708 phba->sli4_hba.rq_ecount, cpu); 10709 if (!qdesc) { 10710 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10711 "0506 Failed allocate receive HRQ\n"); 10712 goto out_error; 10713 } 10714 phba->sli4_hba.hdr_rq = qdesc; 10715 10716 /* Create Receive Queue for data */ 10717 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10718 phba->sli4_hba.rq_esize, 10719 phba->sli4_hba.rq_ecount, cpu); 10720 if (!qdesc) { 10721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10722 "0507 Failed allocate receive DRQ\n"); 10723 goto out_error; 10724 } 10725 phba->sli4_hba.dat_rq = qdesc; 10726 10727 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10728 phba->nvmet_support) { 10729 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10730 cpu = lpfc_find_cpu_handle(phba, idx, 10731 LPFC_FIND_BY_HDWQ); 10732 /* Create NVMET Receive Queue for header */ 10733 qdesc = lpfc_sli4_queue_alloc(phba, 10734 LPFC_DEFAULT_PAGE_SIZE, 10735 phba->sli4_hba.rq_esize, 10736 LPFC_NVMET_RQE_DEF_COUNT, 10737 cpu); 10738 if (!qdesc) { 10739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10740 "3146 Failed allocate " 10741 "receive HRQ\n"); 10742 goto out_error; 10743 } 10744 qdesc->hdwq = idx; 10745 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10746 10747 /* Only needed for header of RQ pair */ 10748 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10749 GFP_KERNEL, 10750 cpu_to_node(cpu)); 10751 if (qdesc->rqbp == NULL) { 10752 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10753 "6131 Failed allocate " 10754 "Header RQBP\n"); 10755 goto out_error; 10756 } 10757 10758 /* Put list in known state in case driver load fails. */ 10759 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10760 10761 /* Create NVMET Receive Queue for data */ 10762 qdesc = lpfc_sli4_queue_alloc(phba, 10763 LPFC_DEFAULT_PAGE_SIZE, 10764 phba->sli4_hba.rq_esize, 10765 LPFC_NVMET_RQE_DEF_COUNT, 10766 cpu); 10767 if (!qdesc) { 10768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10769 "3156 Failed allocate " 10770 "receive DRQ\n"); 10771 goto out_error; 10772 } 10773 qdesc->hdwq = idx; 10774 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10775 } 10776 } 10777 10778 /* Clear NVME stats */ 10779 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10780 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10781 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10782 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10783 } 10784 } 10785 10786 /* Clear SCSI stats */ 10787 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10788 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10789 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10790 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10791 } 10792 } 10793 10794 return 0; 10795 10796 out_error: 10797 lpfc_sli4_queue_destroy(phba); 10798 return -ENOMEM; 10799 } 10800 10801 static inline void 10802 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10803 { 10804 if (*qp != NULL) { 10805 lpfc_sli4_queue_free(*qp); 10806 *qp = NULL; 10807 } 10808 } 10809 10810 static inline void 10811 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10812 { 10813 int idx; 10814 10815 if (*qs == NULL) 10816 return; 10817 10818 for (idx = 0; idx < max; idx++) 10819 __lpfc_sli4_release_queue(&(*qs)[idx]); 10820 10821 kfree(*qs); 10822 *qs = NULL; 10823 } 10824 10825 static inline void 10826 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10827 { 10828 struct lpfc_sli4_hdw_queue *hdwq; 10829 struct lpfc_queue *eq; 10830 uint32_t idx; 10831 10832 hdwq = phba->sli4_hba.hdwq; 10833 10834 /* Loop thru all Hardware Queues */ 10835 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10836 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10837 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10838 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10839 hdwq[idx].hba_eq = NULL; 10840 hdwq[idx].io_cq = NULL; 10841 hdwq[idx].io_wq = NULL; 10842 if (phba->cfg_xpsgl && !phba->nvmet_support) 10843 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10844 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10845 } 10846 /* Loop thru all IRQ vectors */ 10847 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10848 /* Free the EQ corresponding to the IRQ vector */ 10849 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10850 lpfc_sli4_queue_free(eq); 10851 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10852 } 10853 } 10854 10855 /** 10856 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10857 * @phba: pointer to lpfc hba data structure. 10858 * 10859 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10860 * operation. 10861 * 10862 * Return codes 10863 * 0 - successful 10864 * -ENOMEM - No available memory 10865 * -EIO - The mailbox failed to complete successfully. 10866 **/ 10867 void 10868 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10869 { 10870 /* 10871 * Set FREE_INIT before beginning to free the queues. 10872 * Wait until the users of queues to acknowledge to 10873 * release queues by clearing FREE_WAIT. 10874 */ 10875 spin_lock_irq(&phba->hbalock); 10876 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10877 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10878 spin_unlock_irq(&phba->hbalock); 10879 msleep(20); 10880 spin_lock_irq(&phba->hbalock); 10881 } 10882 spin_unlock_irq(&phba->hbalock); 10883 10884 lpfc_sli4_cleanup_poll_list(phba); 10885 10886 /* Release HBA eqs */ 10887 if (phba->sli4_hba.hdwq) 10888 lpfc_sli4_release_hdwq(phba); 10889 10890 if (phba->nvmet_support) { 10891 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10892 phba->cfg_nvmet_mrq); 10893 10894 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10895 phba->cfg_nvmet_mrq); 10896 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10897 phba->cfg_nvmet_mrq); 10898 } 10899 10900 /* Release mailbox command work queue */ 10901 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10902 10903 /* Release ELS work queue */ 10904 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10905 10906 /* Release ELS work queue */ 10907 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10908 10909 /* Release unsolicited receive queue */ 10910 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10911 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10912 10913 /* Release ELS complete queue */ 10914 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10915 10916 /* Release NVME LS complete queue */ 10917 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10918 10919 /* Release mailbox command complete queue */ 10920 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10921 10922 /* Everything on this list has been freed */ 10923 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10924 10925 /* Done with freeing the queues */ 10926 spin_lock_irq(&phba->hbalock); 10927 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10928 spin_unlock_irq(&phba->hbalock); 10929 } 10930 10931 int 10932 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10933 { 10934 struct lpfc_rqb *rqbp; 10935 struct lpfc_dmabuf *h_buf; 10936 struct rqb_dmabuf *rqb_buffer; 10937 10938 rqbp = rq->rqbp; 10939 while (!list_empty(&rqbp->rqb_buffer_list)) { 10940 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10941 struct lpfc_dmabuf, list); 10942 10943 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10944 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10945 rqbp->buffer_count--; 10946 } 10947 return 1; 10948 } 10949 10950 static int 10951 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10952 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10953 int qidx, uint32_t qtype) 10954 { 10955 struct lpfc_sli_ring *pring; 10956 int rc; 10957 10958 if (!eq || !cq || !wq) { 10959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10960 "6085 Fast-path %s (%d) not allocated\n", 10961 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10962 return -ENOMEM; 10963 } 10964 10965 /* create the Cq first */ 10966 rc = lpfc_cq_create(phba, cq, eq, 10967 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10968 if (rc) { 10969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10970 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10971 qidx, (uint32_t)rc); 10972 return rc; 10973 } 10974 10975 if (qtype != LPFC_MBOX) { 10976 /* Setup cq_map for fast lookup */ 10977 if (cq_map) 10978 *cq_map = cq->queue_id; 10979 10980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10981 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10982 qidx, cq->queue_id, qidx, eq->queue_id); 10983 10984 /* create the wq */ 10985 rc = lpfc_wq_create(phba, wq, cq, qtype); 10986 if (rc) { 10987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10988 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10989 qidx, (uint32_t)rc); 10990 /* no need to tear down cq - caller will do so */ 10991 return rc; 10992 } 10993 10994 /* Bind this CQ/WQ to the NVME ring */ 10995 pring = wq->pring; 10996 pring->sli.sli4.wqp = (void *)wq; 10997 cq->pring = pring; 10998 10999 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11000 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 11001 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 11002 } else { 11003 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 11004 if (rc) { 11005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11006 "0539 Failed setup of slow-path MQ: " 11007 "rc = 0x%x\n", rc); 11008 /* no need to tear down cq - caller will do so */ 11009 return rc; 11010 } 11011 11012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11013 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 11014 phba->sli4_hba.mbx_wq->queue_id, 11015 phba->sli4_hba.mbx_cq->queue_id); 11016 } 11017 11018 return 0; 11019 } 11020 11021 /** 11022 * lpfc_setup_cq_lookup - Setup the CQ lookup table 11023 * @phba: pointer to lpfc hba data structure. 11024 * 11025 * This routine will populate the cq_lookup table by all 11026 * available CQ queue_id's. 11027 **/ 11028 static void 11029 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 11030 { 11031 struct lpfc_queue *eq, *childq; 11032 int qidx; 11033 11034 memset(phba->sli4_hba.cq_lookup, 0, 11035 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 11036 /* Loop thru all IRQ vectors */ 11037 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11038 /* Get the EQ corresponding to the IRQ vector */ 11039 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11040 if (!eq) 11041 continue; 11042 /* Loop through all CQs associated with that EQ */ 11043 list_for_each_entry(childq, &eq->child_list, list) { 11044 if (childq->queue_id > phba->sli4_hba.cq_max) 11045 continue; 11046 if (childq->subtype == LPFC_IO) 11047 phba->sli4_hba.cq_lookup[childq->queue_id] = 11048 childq; 11049 } 11050 } 11051 } 11052 11053 /** 11054 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 11055 * @phba: pointer to lpfc hba data structure. 11056 * 11057 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 11058 * operation. 11059 * 11060 * Return codes 11061 * 0 - successful 11062 * -ENOMEM - No available memory 11063 * -EIO - The mailbox failed to complete successfully. 11064 **/ 11065 int 11066 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 11067 { 11068 uint32_t shdr_status, shdr_add_status; 11069 union lpfc_sli4_cfg_shdr *shdr; 11070 struct lpfc_vector_map_info *cpup; 11071 struct lpfc_sli4_hdw_queue *qp; 11072 LPFC_MBOXQ_t *mboxq; 11073 int qidx, cpu; 11074 uint32_t length, usdelay; 11075 int rc = -ENOMEM; 11076 11077 /* Check for dual-ULP support */ 11078 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11079 if (!mboxq) { 11080 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11081 "3249 Unable to allocate memory for " 11082 "QUERY_FW_CFG mailbox command\n"); 11083 return -ENOMEM; 11084 } 11085 length = (sizeof(struct lpfc_mbx_query_fw_config) - 11086 sizeof(struct lpfc_sli4_cfg_mhdr)); 11087 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11088 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 11089 length, LPFC_SLI4_MBX_EMBED); 11090 11091 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11092 11093 shdr = (union lpfc_sli4_cfg_shdr *) 11094 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11095 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11096 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11097 if (shdr_status || shdr_add_status || rc) { 11098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11099 "3250 QUERY_FW_CFG mailbox failed with status " 11100 "x%x add_status x%x, mbx status x%x\n", 11101 shdr_status, shdr_add_status, rc); 11102 mempool_free(mboxq, phba->mbox_mem_pool); 11103 rc = -ENXIO; 11104 goto out_error; 11105 } 11106 11107 phba->sli4_hba.fw_func_mode = 11108 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 11109 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 11110 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 11111 phba->sli4_hba.physical_port = 11112 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 11113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11114 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 11115 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 11116 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 11117 11118 mempool_free(mboxq, phba->mbox_mem_pool); 11119 11120 /* 11121 * Set up HBA Event Queues (EQs) 11122 */ 11123 qp = phba->sli4_hba.hdwq; 11124 11125 /* Set up HBA event queue */ 11126 if (!qp) { 11127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11128 "3147 Fast-path EQs not allocated\n"); 11129 rc = -ENOMEM; 11130 goto out_error; 11131 } 11132 11133 /* Loop thru all IRQ vectors */ 11134 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11135 /* Create HBA Event Queues (EQs) in order */ 11136 for_each_present_cpu(cpu) { 11137 cpup = &phba->sli4_hba.cpu_map[cpu]; 11138 11139 /* Look for the CPU thats using that vector with 11140 * LPFC_CPU_FIRST_IRQ set. 11141 */ 11142 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11143 continue; 11144 if (qidx != cpup->eq) 11145 continue; 11146 11147 /* Create an EQ for that vector */ 11148 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11149 phba->cfg_fcp_imax); 11150 if (rc) { 11151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11152 "0523 Failed setup of fast-path" 11153 " EQ (%d), rc = 0x%x\n", 11154 cpup->eq, (uint32_t)rc); 11155 goto out_destroy; 11156 } 11157 11158 /* Save the EQ for that vector in the hba_eq_hdl */ 11159 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11160 qp[cpup->hdwq].hba_eq; 11161 11162 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11163 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11164 cpup->eq, 11165 qp[cpup->hdwq].hba_eq->queue_id); 11166 } 11167 } 11168 11169 /* Loop thru all Hardware Queues */ 11170 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11171 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11172 cpup = &phba->sli4_hba.cpu_map[cpu]; 11173 11174 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11175 rc = lpfc_create_wq_cq(phba, 11176 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11177 qp[qidx].io_cq, 11178 qp[qidx].io_wq, 11179 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11180 qidx, 11181 LPFC_IO); 11182 if (rc) { 11183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11184 "0535 Failed to setup fastpath " 11185 "IO WQ/CQ (%d), rc = 0x%x\n", 11186 qidx, (uint32_t)rc); 11187 goto out_destroy; 11188 } 11189 } 11190 11191 /* 11192 * Set up Slow Path Complete Queues (CQs) 11193 */ 11194 11195 /* Set up slow-path MBOX CQ/MQ */ 11196 11197 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11199 "0528 %s not allocated\n", 11200 phba->sli4_hba.mbx_cq ? 11201 "Mailbox WQ" : "Mailbox CQ"); 11202 rc = -ENOMEM; 11203 goto out_destroy; 11204 } 11205 11206 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11207 phba->sli4_hba.mbx_cq, 11208 phba->sli4_hba.mbx_wq, 11209 NULL, 0, LPFC_MBOX); 11210 if (rc) { 11211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11212 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11213 (uint32_t)rc); 11214 goto out_destroy; 11215 } 11216 if (phba->nvmet_support) { 11217 if (!phba->sli4_hba.nvmet_cqset) { 11218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11219 "3165 Fast-path NVME CQ Set " 11220 "array not allocated\n"); 11221 rc = -ENOMEM; 11222 goto out_destroy; 11223 } 11224 if (phba->cfg_nvmet_mrq > 1) { 11225 rc = lpfc_cq_create_set(phba, 11226 phba->sli4_hba.nvmet_cqset, 11227 qp, 11228 LPFC_WCQ, LPFC_NVMET); 11229 if (rc) { 11230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11231 "3164 Failed setup of NVME CQ " 11232 "Set, rc = 0x%x\n", 11233 (uint32_t)rc); 11234 goto out_destroy; 11235 } 11236 } else { 11237 /* Set up NVMET Receive Complete Queue */ 11238 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11239 qp[0].hba_eq, 11240 LPFC_WCQ, LPFC_NVMET); 11241 if (rc) { 11242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11243 "6089 Failed setup NVMET CQ: " 11244 "rc = 0x%x\n", (uint32_t)rc); 11245 goto out_destroy; 11246 } 11247 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11248 11249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11250 "6090 NVMET CQ setup: cq-id=%d, " 11251 "parent eq-id=%d\n", 11252 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11253 qp[0].hba_eq->queue_id); 11254 } 11255 } 11256 11257 /* Set up slow-path ELS WQ/CQ */ 11258 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11260 "0530 ELS %s not allocated\n", 11261 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11262 rc = -ENOMEM; 11263 goto out_destroy; 11264 } 11265 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11266 phba->sli4_hba.els_cq, 11267 phba->sli4_hba.els_wq, 11268 NULL, 0, LPFC_ELS); 11269 if (rc) { 11270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11271 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11272 (uint32_t)rc); 11273 goto out_destroy; 11274 } 11275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11276 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11277 phba->sli4_hba.els_wq->queue_id, 11278 phba->sli4_hba.els_cq->queue_id); 11279 11280 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11281 /* Set up NVME LS Complete Queue */ 11282 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11284 "6091 LS %s not allocated\n", 11285 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11286 rc = -ENOMEM; 11287 goto out_destroy; 11288 } 11289 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11290 phba->sli4_hba.nvmels_cq, 11291 phba->sli4_hba.nvmels_wq, 11292 NULL, 0, LPFC_NVME_LS); 11293 if (rc) { 11294 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11295 "0526 Failed setup of NVVME LS WQ/CQ: " 11296 "rc = 0x%x\n", (uint32_t)rc); 11297 goto out_destroy; 11298 } 11299 11300 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11301 "6096 ELS WQ setup: wq-id=%d, " 11302 "parent cq-id=%d\n", 11303 phba->sli4_hba.nvmels_wq->queue_id, 11304 phba->sli4_hba.nvmels_cq->queue_id); 11305 } 11306 11307 /* 11308 * Create NVMET Receive Queue (RQ) 11309 */ 11310 if (phba->nvmet_support) { 11311 if ((!phba->sli4_hba.nvmet_cqset) || 11312 (!phba->sli4_hba.nvmet_mrq_hdr) || 11313 (!phba->sli4_hba.nvmet_mrq_data)) { 11314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11315 "6130 MRQ CQ Queues not " 11316 "allocated\n"); 11317 rc = -ENOMEM; 11318 goto out_destroy; 11319 } 11320 if (phba->cfg_nvmet_mrq > 1) { 11321 rc = lpfc_mrq_create(phba, 11322 phba->sli4_hba.nvmet_mrq_hdr, 11323 phba->sli4_hba.nvmet_mrq_data, 11324 phba->sli4_hba.nvmet_cqset, 11325 LPFC_NVMET); 11326 if (rc) { 11327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11328 "6098 Failed setup of NVMET " 11329 "MRQ: rc = 0x%x\n", 11330 (uint32_t)rc); 11331 goto out_destroy; 11332 } 11333 11334 } else { 11335 rc = lpfc_rq_create(phba, 11336 phba->sli4_hba.nvmet_mrq_hdr[0], 11337 phba->sli4_hba.nvmet_mrq_data[0], 11338 phba->sli4_hba.nvmet_cqset[0], 11339 LPFC_NVMET); 11340 if (rc) { 11341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11342 "6057 Failed setup of NVMET " 11343 "Receive Queue: rc = 0x%x\n", 11344 (uint32_t)rc); 11345 goto out_destroy; 11346 } 11347 11348 lpfc_printf_log( 11349 phba, KERN_INFO, LOG_INIT, 11350 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11351 "dat-rq-id=%d parent cq-id=%d\n", 11352 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11353 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11354 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11355 11356 } 11357 } 11358 11359 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11361 "0540 Receive Queue not allocated\n"); 11362 rc = -ENOMEM; 11363 goto out_destroy; 11364 } 11365 11366 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11367 phba->sli4_hba.els_cq, LPFC_USOL); 11368 if (rc) { 11369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11370 "0541 Failed setup of Receive Queue: " 11371 "rc = 0x%x\n", (uint32_t)rc); 11372 goto out_destroy; 11373 } 11374 11375 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11376 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11377 "parent cq-id=%d\n", 11378 phba->sli4_hba.hdr_rq->queue_id, 11379 phba->sli4_hba.dat_rq->queue_id, 11380 phba->sli4_hba.els_cq->queue_id); 11381 11382 if (phba->cfg_fcp_imax) 11383 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11384 else 11385 usdelay = 0; 11386 11387 for (qidx = 0; qidx < phba->cfg_irq_chann; 11388 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11389 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11390 usdelay); 11391 11392 if (phba->sli4_hba.cq_max) { 11393 kfree(phba->sli4_hba.cq_lookup); 11394 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11395 sizeof(struct lpfc_queue *), GFP_KERNEL); 11396 if (!phba->sli4_hba.cq_lookup) { 11397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11398 "0549 Failed setup of CQ Lookup table: " 11399 "size 0x%x\n", phba->sli4_hba.cq_max); 11400 rc = -ENOMEM; 11401 goto out_destroy; 11402 } 11403 lpfc_setup_cq_lookup(phba); 11404 } 11405 return 0; 11406 11407 out_destroy: 11408 lpfc_sli4_queue_unset(phba); 11409 out_error: 11410 return rc; 11411 } 11412 11413 /** 11414 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11415 * @phba: pointer to lpfc hba data structure. 11416 * 11417 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11418 * operation. 11419 * 11420 * Return codes 11421 * 0 - successful 11422 * -ENOMEM - No available memory 11423 * -EIO - The mailbox failed to complete successfully. 11424 **/ 11425 void 11426 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11427 { 11428 struct lpfc_sli4_hdw_queue *qp; 11429 struct lpfc_queue *eq; 11430 int qidx; 11431 11432 /* Unset mailbox command work queue */ 11433 if (phba->sli4_hba.mbx_wq) 11434 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11435 11436 /* Unset NVME LS work queue */ 11437 if (phba->sli4_hba.nvmels_wq) 11438 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11439 11440 /* Unset ELS work queue */ 11441 if (phba->sli4_hba.els_wq) 11442 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11443 11444 /* Unset unsolicited receive queue */ 11445 if (phba->sli4_hba.hdr_rq) 11446 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11447 phba->sli4_hba.dat_rq); 11448 11449 /* Unset mailbox command complete queue */ 11450 if (phba->sli4_hba.mbx_cq) 11451 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11452 11453 /* Unset ELS complete queue */ 11454 if (phba->sli4_hba.els_cq) 11455 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11456 11457 /* Unset NVME LS complete queue */ 11458 if (phba->sli4_hba.nvmels_cq) 11459 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11460 11461 if (phba->nvmet_support) { 11462 /* Unset NVMET MRQ queue */ 11463 if (phba->sli4_hba.nvmet_mrq_hdr) { 11464 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11465 lpfc_rq_destroy( 11466 phba, 11467 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11468 phba->sli4_hba.nvmet_mrq_data[qidx]); 11469 } 11470 11471 /* Unset NVMET CQ Set complete queue */ 11472 if (phba->sli4_hba.nvmet_cqset) { 11473 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11474 lpfc_cq_destroy( 11475 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11476 } 11477 } 11478 11479 /* Unset fast-path SLI4 queues */ 11480 if (phba->sli4_hba.hdwq) { 11481 /* Loop thru all Hardware Queues */ 11482 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11483 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11484 qp = &phba->sli4_hba.hdwq[qidx]; 11485 lpfc_wq_destroy(phba, qp->io_wq); 11486 lpfc_cq_destroy(phba, qp->io_cq); 11487 } 11488 /* Loop thru all IRQ vectors */ 11489 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11490 /* Destroy the EQ corresponding to the IRQ vector */ 11491 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11492 lpfc_eq_destroy(phba, eq); 11493 } 11494 } 11495 11496 kfree(phba->sli4_hba.cq_lookup); 11497 phba->sli4_hba.cq_lookup = NULL; 11498 phba->sli4_hba.cq_max = 0; 11499 } 11500 11501 /** 11502 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11503 * @phba: pointer to lpfc hba data structure. 11504 * 11505 * This routine is invoked to allocate and set up a pool of completion queue 11506 * events. The body of the completion queue event is a completion queue entry 11507 * CQE. For now, this pool is used for the interrupt service routine to queue 11508 * the following HBA completion queue events for the worker thread to process: 11509 * - Mailbox asynchronous events 11510 * - Receive queue completion unsolicited events 11511 * Later, this can be used for all the slow-path events. 11512 * 11513 * Return codes 11514 * 0 - successful 11515 * -ENOMEM - No available memory 11516 **/ 11517 static int 11518 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11519 { 11520 struct lpfc_cq_event *cq_event; 11521 int i; 11522 11523 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11524 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11525 if (!cq_event) 11526 goto out_pool_create_fail; 11527 list_add_tail(&cq_event->list, 11528 &phba->sli4_hba.sp_cqe_event_pool); 11529 } 11530 return 0; 11531 11532 out_pool_create_fail: 11533 lpfc_sli4_cq_event_pool_destroy(phba); 11534 return -ENOMEM; 11535 } 11536 11537 /** 11538 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11539 * @phba: pointer to lpfc hba data structure. 11540 * 11541 * This routine is invoked to free the pool of completion queue events at 11542 * driver unload time. Note that, it is the responsibility of the driver 11543 * cleanup routine to free all the outstanding completion-queue events 11544 * allocated from this pool back into the pool before invoking this routine 11545 * to destroy the pool. 11546 **/ 11547 static void 11548 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11549 { 11550 struct lpfc_cq_event *cq_event, *next_cq_event; 11551 11552 list_for_each_entry_safe(cq_event, next_cq_event, 11553 &phba->sli4_hba.sp_cqe_event_pool, list) { 11554 list_del(&cq_event->list); 11555 kfree(cq_event); 11556 } 11557 } 11558 11559 /** 11560 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11561 * @phba: pointer to lpfc hba data structure. 11562 * 11563 * This routine is the lock free version of the API invoked to allocate a 11564 * completion-queue event from the free pool. 11565 * 11566 * Return: Pointer to the newly allocated completion-queue event if successful 11567 * NULL otherwise. 11568 **/ 11569 struct lpfc_cq_event * 11570 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11571 { 11572 struct lpfc_cq_event *cq_event = NULL; 11573 11574 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11575 struct lpfc_cq_event, list); 11576 return cq_event; 11577 } 11578 11579 /** 11580 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11581 * @phba: pointer to lpfc hba data structure. 11582 * 11583 * This routine is the lock version of the API invoked to allocate a 11584 * completion-queue event from the free pool. 11585 * 11586 * Return: Pointer to the newly allocated completion-queue event if successful 11587 * NULL otherwise. 11588 **/ 11589 struct lpfc_cq_event * 11590 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11591 { 11592 struct lpfc_cq_event *cq_event; 11593 unsigned long iflags; 11594 11595 spin_lock_irqsave(&phba->hbalock, iflags); 11596 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11597 spin_unlock_irqrestore(&phba->hbalock, iflags); 11598 return cq_event; 11599 } 11600 11601 /** 11602 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11603 * @phba: pointer to lpfc hba data structure. 11604 * @cq_event: pointer to the completion queue event to be freed. 11605 * 11606 * This routine is the lock free version of the API invoked to release a 11607 * completion-queue event back into the free pool. 11608 **/ 11609 void 11610 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11611 struct lpfc_cq_event *cq_event) 11612 { 11613 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11614 } 11615 11616 /** 11617 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11618 * @phba: pointer to lpfc hba data structure. 11619 * @cq_event: pointer to the completion queue event to be freed. 11620 * 11621 * This routine is the lock version of the API invoked to release a 11622 * completion-queue event back into the free pool. 11623 **/ 11624 void 11625 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11626 struct lpfc_cq_event *cq_event) 11627 { 11628 unsigned long iflags; 11629 spin_lock_irqsave(&phba->hbalock, iflags); 11630 __lpfc_sli4_cq_event_release(phba, cq_event); 11631 spin_unlock_irqrestore(&phba->hbalock, iflags); 11632 } 11633 11634 /** 11635 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11636 * @phba: pointer to lpfc hba data structure. 11637 * 11638 * This routine is to free all the pending completion-queue events to the 11639 * back into the free pool for device reset. 11640 **/ 11641 static void 11642 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11643 { 11644 LIST_HEAD(cq_event_list); 11645 struct lpfc_cq_event *cq_event; 11646 unsigned long iflags; 11647 11648 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11649 11650 /* Pending ELS XRI abort events */ 11651 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11652 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11653 &cq_event_list); 11654 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11655 11656 /* Pending asynnc events */ 11657 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11658 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11659 &cq_event_list); 11660 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11661 11662 while (!list_empty(&cq_event_list)) { 11663 list_remove_head(&cq_event_list, cq_event, 11664 struct lpfc_cq_event, list); 11665 lpfc_sli4_cq_event_release(phba, cq_event); 11666 } 11667 } 11668 11669 /** 11670 * lpfc_pci_function_reset - Reset pci function. 11671 * @phba: pointer to lpfc hba data structure. 11672 * 11673 * This routine is invoked to request a PCI function reset. It will destroys 11674 * all resources assigned to the PCI function which originates this request. 11675 * 11676 * Return codes 11677 * 0 - successful 11678 * -ENOMEM - No available memory 11679 * -EIO - The mailbox failed to complete successfully. 11680 **/ 11681 int 11682 lpfc_pci_function_reset(struct lpfc_hba *phba) 11683 { 11684 LPFC_MBOXQ_t *mboxq; 11685 uint32_t rc = 0, if_type; 11686 uint32_t shdr_status, shdr_add_status; 11687 uint32_t rdy_chk; 11688 uint32_t port_reset = 0; 11689 union lpfc_sli4_cfg_shdr *shdr; 11690 struct lpfc_register reg_data; 11691 uint16_t devid; 11692 11693 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11694 switch (if_type) { 11695 case LPFC_SLI_INTF_IF_TYPE_0: 11696 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11697 GFP_KERNEL); 11698 if (!mboxq) { 11699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11700 "0494 Unable to allocate memory for " 11701 "issuing SLI_FUNCTION_RESET mailbox " 11702 "command\n"); 11703 return -ENOMEM; 11704 } 11705 11706 /* Setup PCI function reset mailbox-ioctl command */ 11707 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11708 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11709 LPFC_SLI4_MBX_EMBED); 11710 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11711 shdr = (union lpfc_sli4_cfg_shdr *) 11712 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11714 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11715 &shdr->response); 11716 mempool_free(mboxq, phba->mbox_mem_pool); 11717 if (shdr_status || shdr_add_status || rc) { 11718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11719 "0495 SLI_FUNCTION_RESET mailbox " 11720 "failed with status x%x add_status x%x," 11721 " mbx status x%x\n", 11722 shdr_status, shdr_add_status, rc); 11723 rc = -ENXIO; 11724 } 11725 break; 11726 case LPFC_SLI_INTF_IF_TYPE_2: 11727 case LPFC_SLI_INTF_IF_TYPE_6: 11728 wait: 11729 /* 11730 * Poll the Port Status Register and wait for RDY for 11731 * up to 30 seconds. If the port doesn't respond, treat 11732 * it as an error. 11733 */ 11734 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11735 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11736 STATUSregaddr, ®_data.word0)) { 11737 rc = -ENODEV; 11738 goto out; 11739 } 11740 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11741 break; 11742 msleep(20); 11743 } 11744 11745 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11746 phba->work_status[0] = readl( 11747 phba->sli4_hba.u.if_type2.ERR1regaddr); 11748 phba->work_status[1] = readl( 11749 phba->sli4_hba.u.if_type2.ERR2regaddr); 11750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11751 "2890 Port not ready, port status reg " 11752 "0x%x error 1=0x%x, error 2=0x%x\n", 11753 reg_data.word0, 11754 phba->work_status[0], 11755 phba->work_status[1]); 11756 rc = -ENODEV; 11757 goto out; 11758 } 11759 11760 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11761 lpfc_pldv_detect = true; 11762 11763 if (!port_reset) { 11764 /* 11765 * Reset the port now 11766 */ 11767 reg_data.word0 = 0; 11768 bf_set(lpfc_sliport_ctrl_end, ®_data, 11769 LPFC_SLIPORT_LITTLE_ENDIAN); 11770 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11771 LPFC_SLIPORT_INIT_PORT); 11772 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11773 CTRLregaddr); 11774 /* flush */ 11775 pci_read_config_word(phba->pcidev, 11776 PCI_DEVICE_ID, &devid); 11777 11778 port_reset = 1; 11779 msleep(20); 11780 goto wait; 11781 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11782 rc = -ENODEV; 11783 goto out; 11784 } 11785 break; 11786 11787 case LPFC_SLI_INTF_IF_TYPE_1: 11788 default: 11789 break; 11790 } 11791 11792 out: 11793 /* Catch the not-ready port failure after a port reset. */ 11794 if (rc) { 11795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11796 "3317 HBA not functional: IP Reset Failed " 11797 "try: echo fw_reset > board_mode\n"); 11798 rc = -ENODEV; 11799 } 11800 11801 return rc; 11802 } 11803 11804 /** 11805 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11806 * @phba: pointer to lpfc hba data structure. 11807 * 11808 * This routine is invoked to set up the PCI device memory space for device 11809 * with SLI-4 interface spec. 11810 * 11811 * Return codes 11812 * 0 - successful 11813 * other values - error 11814 **/ 11815 static int 11816 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11817 { 11818 struct pci_dev *pdev = phba->pcidev; 11819 unsigned long bar0map_len, bar1map_len, bar2map_len; 11820 int error; 11821 uint32_t if_type; 11822 11823 if (!pdev) 11824 return -ENODEV; 11825 11826 /* Set the device DMA mask size */ 11827 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11828 if (error) 11829 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11830 if (error) 11831 return error; 11832 11833 /* 11834 * The BARs and register set definitions and offset locations are 11835 * dependent on the if_type. 11836 */ 11837 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11838 &phba->sli4_hba.sli_intf.word0)) { 11839 return -ENODEV; 11840 } 11841 11842 /* There is no SLI3 failback for SLI4 devices. */ 11843 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11844 LPFC_SLI_INTF_VALID) { 11845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11846 "2894 SLI_INTF reg contents invalid " 11847 "sli_intf reg 0x%x\n", 11848 phba->sli4_hba.sli_intf.word0); 11849 return -ENODEV; 11850 } 11851 11852 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11853 /* 11854 * Get the bus address of SLI4 device Bar regions and the 11855 * number of bytes required by each mapping. The mapping of the 11856 * particular PCI BARs regions is dependent on the type of 11857 * SLI4 device. 11858 */ 11859 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11860 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11861 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11862 11863 /* 11864 * Map SLI4 PCI Config Space Register base to a kernel virtual 11865 * addr 11866 */ 11867 phba->sli4_hba.conf_regs_memmap_p = 11868 ioremap(phba->pci_bar0_map, bar0map_len); 11869 if (!phba->sli4_hba.conf_regs_memmap_p) { 11870 dev_printk(KERN_ERR, &pdev->dev, 11871 "ioremap failed for SLI4 PCI config " 11872 "registers.\n"); 11873 return -ENODEV; 11874 } 11875 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11876 /* Set up BAR0 PCI config space register memory map */ 11877 lpfc_sli4_bar0_register_memmap(phba, if_type); 11878 } else { 11879 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11880 bar0map_len = pci_resource_len(pdev, 1); 11881 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11882 dev_printk(KERN_ERR, &pdev->dev, 11883 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11884 return -ENODEV; 11885 } 11886 phba->sli4_hba.conf_regs_memmap_p = 11887 ioremap(phba->pci_bar0_map, bar0map_len); 11888 if (!phba->sli4_hba.conf_regs_memmap_p) { 11889 dev_printk(KERN_ERR, &pdev->dev, 11890 "ioremap failed for SLI4 PCI config " 11891 "registers.\n"); 11892 return -ENODEV; 11893 } 11894 lpfc_sli4_bar0_register_memmap(phba, if_type); 11895 } 11896 11897 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11898 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11899 /* 11900 * Map SLI4 if type 0 HBA Control Register base to a 11901 * kernel virtual address and setup the registers. 11902 */ 11903 phba->pci_bar1_map = pci_resource_start(pdev, 11904 PCI_64BIT_BAR2); 11905 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11906 phba->sli4_hba.ctrl_regs_memmap_p = 11907 ioremap(phba->pci_bar1_map, 11908 bar1map_len); 11909 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11910 dev_err(&pdev->dev, 11911 "ioremap failed for SLI4 HBA " 11912 "control registers.\n"); 11913 error = -ENOMEM; 11914 goto out_iounmap_conf; 11915 } 11916 phba->pci_bar2_memmap_p = 11917 phba->sli4_hba.ctrl_regs_memmap_p; 11918 lpfc_sli4_bar1_register_memmap(phba, if_type); 11919 } else { 11920 error = -ENOMEM; 11921 goto out_iounmap_conf; 11922 } 11923 } 11924 11925 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11926 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11927 /* 11928 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11929 * virtual address and setup the registers. 11930 */ 11931 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11932 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11933 phba->sli4_hba.drbl_regs_memmap_p = 11934 ioremap(phba->pci_bar1_map, bar1map_len); 11935 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11936 dev_err(&pdev->dev, 11937 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11938 error = -ENOMEM; 11939 goto out_iounmap_conf; 11940 } 11941 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11942 lpfc_sli4_bar1_register_memmap(phba, if_type); 11943 } 11944 11945 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11946 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11947 /* 11948 * Map SLI4 if type 0 HBA Doorbell Register base to 11949 * a kernel virtual address and setup the registers. 11950 */ 11951 phba->pci_bar2_map = pci_resource_start(pdev, 11952 PCI_64BIT_BAR4); 11953 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11954 phba->sli4_hba.drbl_regs_memmap_p = 11955 ioremap(phba->pci_bar2_map, 11956 bar2map_len); 11957 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11958 dev_err(&pdev->dev, 11959 "ioremap failed for SLI4 HBA" 11960 " doorbell registers.\n"); 11961 error = -ENOMEM; 11962 goto out_iounmap_ctrl; 11963 } 11964 phba->pci_bar4_memmap_p = 11965 phba->sli4_hba.drbl_regs_memmap_p; 11966 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11967 if (error) 11968 goto out_iounmap_all; 11969 } else { 11970 error = -ENOMEM; 11971 goto out_iounmap_all; 11972 } 11973 } 11974 11975 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11976 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11977 /* 11978 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11979 * virtual address and setup the registers. 11980 */ 11981 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11982 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11983 phba->sli4_hba.dpp_regs_memmap_p = 11984 ioremap(phba->pci_bar2_map, bar2map_len); 11985 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11986 dev_err(&pdev->dev, 11987 "ioremap failed for SLI4 HBA dpp registers.\n"); 11988 error = -ENOMEM; 11989 goto out_iounmap_ctrl; 11990 } 11991 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11992 } 11993 11994 /* Set up the EQ/CQ register handeling functions now */ 11995 switch (if_type) { 11996 case LPFC_SLI_INTF_IF_TYPE_0: 11997 case LPFC_SLI_INTF_IF_TYPE_2: 11998 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11999 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 12000 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 12001 break; 12002 case LPFC_SLI_INTF_IF_TYPE_6: 12003 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 12004 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 12005 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 12006 break; 12007 default: 12008 break; 12009 } 12010 12011 return 0; 12012 12013 out_iounmap_all: 12014 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12015 out_iounmap_ctrl: 12016 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 12017 out_iounmap_conf: 12018 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12019 12020 return error; 12021 } 12022 12023 /** 12024 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 12025 * @phba: pointer to lpfc hba data structure. 12026 * 12027 * This routine is invoked to unset the PCI device memory space for device 12028 * with SLI-4 interface spec. 12029 **/ 12030 static void 12031 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 12032 { 12033 uint32_t if_type; 12034 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12035 12036 switch (if_type) { 12037 case LPFC_SLI_INTF_IF_TYPE_0: 12038 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12039 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 12040 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12041 break; 12042 case LPFC_SLI_INTF_IF_TYPE_2: 12043 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12044 break; 12045 case LPFC_SLI_INTF_IF_TYPE_6: 12046 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12047 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12048 if (phba->sli4_hba.dpp_regs_memmap_p) 12049 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 12050 break; 12051 case LPFC_SLI_INTF_IF_TYPE_1: 12052 default: 12053 dev_printk(KERN_ERR, &phba->pcidev->dev, 12054 "FATAL - unsupported SLI4 interface type - %d\n", 12055 if_type); 12056 break; 12057 } 12058 } 12059 12060 /** 12061 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 12062 * @phba: pointer to lpfc hba data structure. 12063 * 12064 * This routine is invoked to enable the MSI-X interrupt vectors to device 12065 * with SLI-3 interface specs. 12066 * 12067 * Return codes 12068 * 0 - successful 12069 * other values - error 12070 **/ 12071 static int 12072 lpfc_sli_enable_msix(struct lpfc_hba *phba) 12073 { 12074 int rc; 12075 LPFC_MBOXQ_t *pmb; 12076 12077 /* Set up MSI-X multi-message vectors */ 12078 rc = pci_alloc_irq_vectors(phba->pcidev, 12079 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 12080 if (rc < 0) { 12081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12082 "0420 PCI enable MSI-X failed (%d)\n", rc); 12083 goto vec_fail_out; 12084 } 12085 12086 /* 12087 * Assign MSI-X vectors to interrupt handlers 12088 */ 12089 12090 /* vector-0 is associated to slow-path handler */ 12091 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 12092 &lpfc_sli_sp_intr_handler, 0, 12093 LPFC_SP_DRIVER_HANDLER_NAME, phba); 12094 if (rc) { 12095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12096 "0421 MSI-X slow-path request_irq failed " 12097 "(%d)\n", rc); 12098 goto msi_fail_out; 12099 } 12100 12101 /* vector-1 is associated to fast-path handler */ 12102 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 12103 &lpfc_sli_fp_intr_handler, 0, 12104 LPFC_FP_DRIVER_HANDLER_NAME, phba); 12105 12106 if (rc) { 12107 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12108 "0429 MSI-X fast-path request_irq failed " 12109 "(%d)\n", rc); 12110 goto irq_fail_out; 12111 } 12112 12113 /* 12114 * Configure HBA MSI-X attention conditions to messages 12115 */ 12116 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12117 12118 if (!pmb) { 12119 rc = -ENOMEM; 12120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12121 "0474 Unable to allocate memory for issuing " 12122 "MBOX_CONFIG_MSI command\n"); 12123 goto mem_fail_out; 12124 } 12125 rc = lpfc_config_msi(phba, pmb); 12126 if (rc) 12127 goto mbx_fail_out; 12128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12129 if (rc != MBX_SUCCESS) { 12130 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12131 "0351 Config MSI mailbox command failed, " 12132 "mbxCmd x%x, mbxStatus x%x\n", 12133 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12134 goto mbx_fail_out; 12135 } 12136 12137 /* Free memory allocated for mailbox command */ 12138 mempool_free(pmb, phba->mbox_mem_pool); 12139 return rc; 12140 12141 mbx_fail_out: 12142 /* Free memory allocated for mailbox command */ 12143 mempool_free(pmb, phba->mbox_mem_pool); 12144 12145 mem_fail_out: 12146 /* free the irq already requested */ 12147 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12148 12149 irq_fail_out: 12150 /* free the irq already requested */ 12151 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12152 12153 msi_fail_out: 12154 /* Unconfigure MSI-X capability structure */ 12155 pci_free_irq_vectors(phba->pcidev); 12156 12157 vec_fail_out: 12158 return rc; 12159 } 12160 12161 /** 12162 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12163 * @phba: pointer to lpfc hba data structure. 12164 * 12165 * This routine is invoked to enable the MSI interrupt mode to device with 12166 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12167 * enable the MSI vector. The device driver is responsible for calling the 12168 * request_irq() to register MSI vector with a interrupt the handler, which 12169 * is done in this function. 12170 * 12171 * Return codes 12172 * 0 - successful 12173 * other values - error 12174 */ 12175 static int 12176 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12177 { 12178 int rc; 12179 12180 rc = pci_enable_msi(phba->pcidev); 12181 if (!rc) 12182 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12183 "0012 PCI enable MSI mode success.\n"); 12184 else { 12185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12186 "0471 PCI enable MSI mode failed (%d)\n", rc); 12187 return rc; 12188 } 12189 12190 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12191 0, LPFC_DRIVER_NAME, phba); 12192 if (rc) { 12193 pci_disable_msi(phba->pcidev); 12194 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12195 "0478 MSI request_irq failed (%d)\n", rc); 12196 } 12197 return rc; 12198 } 12199 12200 /** 12201 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12202 * @phba: pointer to lpfc hba data structure. 12203 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12204 * 12205 * This routine is invoked to enable device interrupt and associate driver's 12206 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12207 * spec. Depends on the interrupt mode configured to the driver, the driver 12208 * will try to fallback from the configured interrupt mode to an interrupt 12209 * mode which is supported by the platform, kernel, and device in the order 12210 * of: 12211 * MSI-X -> MSI -> IRQ. 12212 * 12213 * Return codes 12214 * 0 - successful 12215 * other values - error 12216 **/ 12217 static uint32_t 12218 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12219 { 12220 uint32_t intr_mode = LPFC_INTR_ERROR; 12221 int retval; 12222 12223 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12224 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12225 if (retval) 12226 return intr_mode; 12227 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12228 12229 if (cfg_mode == 2) { 12230 /* Now, try to enable MSI-X interrupt mode */ 12231 retval = lpfc_sli_enable_msix(phba); 12232 if (!retval) { 12233 /* Indicate initialization to MSI-X mode */ 12234 phba->intr_type = MSIX; 12235 intr_mode = 2; 12236 } 12237 } 12238 12239 /* Fallback to MSI if MSI-X initialization failed */ 12240 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12241 retval = lpfc_sli_enable_msi(phba); 12242 if (!retval) { 12243 /* Indicate initialization to MSI mode */ 12244 phba->intr_type = MSI; 12245 intr_mode = 1; 12246 } 12247 } 12248 12249 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12250 if (phba->intr_type == NONE) { 12251 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12252 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12253 if (!retval) { 12254 /* Indicate initialization to INTx mode */ 12255 phba->intr_type = INTx; 12256 intr_mode = 0; 12257 } 12258 } 12259 return intr_mode; 12260 } 12261 12262 /** 12263 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12264 * @phba: pointer to lpfc hba data structure. 12265 * 12266 * This routine is invoked to disable device interrupt and disassociate the 12267 * driver's interrupt handler(s) from interrupt vector(s) to device with 12268 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12269 * release the interrupt vector(s) for the message signaled interrupt. 12270 **/ 12271 static void 12272 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12273 { 12274 int nr_irqs, i; 12275 12276 if (phba->intr_type == MSIX) 12277 nr_irqs = LPFC_MSIX_VECTORS; 12278 else 12279 nr_irqs = 1; 12280 12281 for (i = 0; i < nr_irqs; i++) 12282 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12283 pci_free_irq_vectors(phba->pcidev); 12284 12285 /* Reset interrupt management states */ 12286 phba->intr_type = NONE; 12287 phba->sli.slistat.sli_intr = 0; 12288 } 12289 12290 /** 12291 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12292 * @phba: pointer to lpfc hba data structure. 12293 * @id: EQ vector index or Hardware Queue index 12294 * @match: LPFC_FIND_BY_EQ = match by EQ 12295 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12296 * Return the CPU that matches the selection criteria 12297 */ 12298 static uint16_t 12299 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12300 { 12301 struct lpfc_vector_map_info *cpup; 12302 int cpu; 12303 12304 /* Loop through all CPUs */ 12305 for_each_present_cpu(cpu) { 12306 cpup = &phba->sli4_hba.cpu_map[cpu]; 12307 12308 /* If we are matching by EQ, there may be multiple CPUs using 12309 * using the same vector, so select the one with 12310 * LPFC_CPU_FIRST_IRQ set. 12311 */ 12312 if ((match == LPFC_FIND_BY_EQ) && 12313 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12314 (cpup->eq == id)) 12315 return cpu; 12316 12317 /* If matching by HDWQ, select the first CPU that matches */ 12318 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12319 return cpu; 12320 } 12321 return 0; 12322 } 12323 12324 #ifdef CONFIG_X86 12325 /** 12326 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12327 * @phba: pointer to lpfc hba data structure. 12328 * @cpu: CPU map index 12329 * @phys_id: CPU package physical id 12330 * @core_id: CPU core id 12331 */ 12332 static int 12333 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12334 uint16_t phys_id, uint16_t core_id) 12335 { 12336 struct lpfc_vector_map_info *cpup; 12337 int idx; 12338 12339 for_each_present_cpu(idx) { 12340 cpup = &phba->sli4_hba.cpu_map[idx]; 12341 /* Does the cpup match the one we are looking for */ 12342 if ((cpup->phys_id == phys_id) && 12343 (cpup->core_id == core_id) && 12344 (cpu != idx)) 12345 return 1; 12346 } 12347 return 0; 12348 } 12349 #endif 12350 12351 /* 12352 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12353 * @phba: pointer to lpfc hba data structure. 12354 * @eqidx: index for eq and irq vector 12355 * @flag: flags to set for vector_map structure 12356 * @cpu: cpu used to index vector_map structure 12357 * 12358 * The routine assigns eq info into vector_map structure 12359 */ 12360 static inline void 12361 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12362 unsigned int cpu) 12363 { 12364 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12365 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12366 12367 cpup->eq = eqidx; 12368 cpup->flag |= flag; 12369 12370 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12371 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12372 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12373 } 12374 12375 /** 12376 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12377 * @phba: pointer to lpfc hba data structure. 12378 * 12379 * The routine initializes the cpu_map array structure 12380 */ 12381 static void 12382 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12383 { 12384 struct lpfc_vector_map_info *cpup; 12385 struct lpfc_eq_intr_info *eqi; 12386 int cpu; 12387 12388 for_each_possible_cpu(cpu) { 12389 cpup = &phba->sli4_hba.cpu_map[cpu]; 12390 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12391 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12392 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12393 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12394 cpup->flag = 0; 12395 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12396 INIT_LIST_HEAD(&eqi->list); 12397 eqi->icnt = 0; 12398 } 12399 } 12400 12401 /** 12402 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12403 * @phba: pointer to lpfc hba data structure. 12404 * 12405 * The routine initializes the hba_eq_hdl array structure 12406 */ 12407 static void 12408 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12409 { 12410 struct lpfc_hba_eq_hdl *eqhdl; 12411 int i; 12412 12413 for (i = 0; i < phba->cfg_irq_chann; i++) { 12414 eqhdl = lpfc_get_eq_hdl(i); 12415 eqhdl->irq = LPFC_IRQ_EMPTY; 12416 eqhdl->phba = phba; 12417 } 12418 } 12419 12420 /** 12421 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12422 * @phba: pointer to lpfc hba data structure. 12423 * @vectors: number of msix vectors allocated. 12424 * 12425 * The routine will figure out the CPU affinity assignment for every 12426 * MSI-X vector allocated for the HBA. 12427 * In addition, the CPU to IO channel mapping will be calculated 12428 * and the phba->sli4_hba.cpu_map array will reflect this. 12429 */ 12430 static void 12431 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12432 { 12433 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12434 int max_phys_id, min_phys_id; 12435 int max_core_id, min_core_id; 12436 struct lpfc_vector_map_info *cpup; 12437 struct lpfc_vector_map_info *new_cpup; 12438 #ifdef CONFIG_X86 12439 struct cpuinfo_x86 *cpuinfo; 12440 #endif 12441 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12442 struct lpfc_hdwq_stat *c_stat; 12443 #endif 12444 12445 max_phys_id = 0; 12446 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12447 max_core_id = 0; 12448 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12449 12450 /* Update CPU map with physical id and core id of each CPU */ 12451 for_each_present_cpu(cpu) { 12452 cpup = &phba->sli4_hba.cpu_map[cpu]; 12453 #ifdef CONFIG_X86 12454 cpuinfo = &cpu_data(cpu); 12455 cpup->phys_id = cpuinfo->phys_proc_id; 12456 cpup->core_id = cpuinfo->cpu_core_id; 12457 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12458 cpup->flag |= LPFC_CPU_MAP_HYPER; 12459 #else 12460 /* No distinction between CPUs for other platforms */ 12461 cpup->phys_id = 0; 12462 cpup->core_id = cpu; 12463 #endif 12464 12465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12466 "3328 CPU %d physid %d coreid %d flag x%x\n", 12467 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12468 12469 if (cpup->phys_id > max_phys_id) 12470 max_phys_id = cpup->phys_id; 12471 if (cpup->phys_id < min_phys_id) 12472 min_phys_id = cpup->phys_id; 12473 12474 if (cpup->core_id > max_core_id) 12475 max_core_id = cpup->core_id; 12476 if (cpup->core_id < min_core_id) 12477 min_core_id = cpup->core_id; 12478 } 12479 12480 /* After looking at each irq vector assigned to this pcidev, its 12481 * possible to see that not ALL CPUs have been accounted for. 12482 * Next we will set any unassigned (unaffinitized) cpu map 12483 * entries to a IRQ on the same phys_id. 12484 */ 12485 first_cpu = cpumask_first(cpu_present_mask); 12486 start_cpu = first_cpu; 12487 12488 for_each_present_cpu(cpu) { 12489 cpup = &phba->sli4_hba.cpu_map[cpu]; 12490 12491 /* Is this CPU entry unassigned */ 12492 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12493 /* Mark CPU as IRQ not assigned by the kernel */ 12494 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12495 12496 /* If so, find a new_cpup thats on the the SAME 12497 * phys_id as cpup. start_cpu will start where we 12498 * left off so all unassigned entries don't get assgined 12499 * the IRQ of the first entry. 12500 */ 12501 new_cpu = start_cpu; 12502 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12503 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12504 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12505 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12506 (new_cpup->phys_id == cpup->phys_id)) 12507 goto found_same; 12508 new_cpu = cpumask_next( 12509 new_cpu, cpu_present_mask); 12510 if (new_cpu == nr_cpumask_bits) 12511 new_cpu = first_cpu; 12512 } 12513 /* At this point, we leave the CPU as unassigned */ 12514 continue; 12515 found_same: 12516 /* We found a matching phys_id, so copy the IRQ info */ 12517 cpup->eq = new_cpup->eq; 12518 12519 /* Bump start_cpu to the next slot to minmize the 12520 * chance of having multiple unassigned CPU entries 12521 * selecting the same IRQ. 12522 */ 12523 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12524 if (start_cpu == nr_cpumask_bits) 12525 start_cpu = first_cpu; 12526 12527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12528 "3337 Set Affinity: CPU %d " 12529 "eq %d from peer cpu %d same " 12530 "phys_id (%d)\n", 12531 cpu, cpup->eq, new_cpu, 12532 cpup->phys_id); 12533 } 12534 } 12535 12536 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12537 start_cpu = first_cpu; 12538 12539 for_each_present_cpu(cpu) { 12540 cpup = &phba->sli4_hba.cpu_map[cpu]; 12541 12542 /* Is this entry unassigned */ 12543 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12544 /* Mark it as IRQ not assigned by the kernel */ 12545 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12546 12547 /* If so, find a new_cpup thats on ANY phys_id 12548 * as the cpup. start_cpu will start where we 12549 * left off so all unassigned entries don't get 12550 * assigned the IRQ of the first entry. 12551 */ 12552 new_cpu = start_cpu; 12553 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12554 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12555 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12556 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12557 goto found_any; 12558 new_cpu = cpumask_next( 12559 new_cpu, cpu_present_mask); 12560 if (new_cpu == nr_cpumask_bits) 12561 new_cpu = first_cpu; 12562 } 12563 /* We should never leave an entry unassigned */ 12564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12565 "3339 Set Affinity: CPU %d " 12566 "eq %d UNASSIGNED\n", 12567 cpup->hdwq, cpup->eq); 12568 continue; 12569 found_any: 12570 /* We found an available entry, copy the IRQ info */ 12571 cpup->eq = new_cpup->eq; 12572 12573 /* Bump start_cpu to the next slot to minmize the 12574 * chance of having multiple unassigned CPU entries 12575 * selecting the same IRQ. 12576 */ 12577 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12578 if (start_cpu == nr_cpumask_bits) 12579 start_cpu = first_cpu; 12580 12581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12582 "3338 Set Affinity: CPU %d " 12583 "eq %d from peer cpu %d (%d/%d)\n", 12584 cpu, cpup->eq, new_cpu, 12585 new_cpup->phys_id, new_cpup->core_id); 12586 } 12587 } 12588 12589 /* Assign hdwq indices that are unique across all cpus in the map 12590 * that are also FIRST_CPUs. 12591 */ 12592 idx = 0; 12593 for_each_present_cpu(cpu) { 12594 cpup = &phba->sli4_hba.cpu_map[cpu]; 12595 12596 /* Only FIRST IRQs get a hdwq index assignment. */ 12597 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12598 continue; 12599 12600 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12601 cpup->hdwq = idx; 12602 idx++; 12603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12604 "3333 Set Affinity: CPU %d (phys %d core %d): " 12605 "hdwq %d eq %d flg x%x\n", 12606 cpu, cpup->phys_id, cpup->core_id, 12607 cpup->hdwq, cpup->eq, cpup->flag); 12608 } 12609 /* Associate a hdwq with each cpu_map entry 12610 * This will be 1 to 1 - hdwq to cpu, unless there are less 12611 * hardware queues then CPUs. For that case we will just round-robin 12612 * the available hardware queues as they get assigned to CPUs. 12613 * The next_idx is the idx from the FIRST_CPU loop above to account 12614 * for irq_chann < hdwq. The idx is used for round-robin assignments 12615 * and needs to start at 0. 12616 */ 12617 next_idx = idx; 12618 start_cpu = 0; 12619 idx = 0; 12620 for_each_present_cpu(cpu) { 12621 cpup = &phba->sli4_hba.cpu_map[cpu]; 12622 12623 /* FIRST cpus are already mapped. */ 12624 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12625 continue; 12626 12627 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12628 * of the unassigned cpus to the next idx so that all 12629 * hdw queues are fully utilized. 12630 */ 12631 if (next_idx < phba->cfg_hdw_queue) { 12632 cpup->hdwq = next_idx; 12633 next_idx++; 12634 continue; 12635 } 12636 12637 /* Not a First CPU and all hdw_queues are used. Reuse a 12638 * Hardware Queue for another CPU, so be smart about it 12639 * and pick one that has its IRQ/EQ mapped to the same phys_id 12640 * (CPU package) and core_id. 12641 */ 12642 new_cpu = start_cpu; 12643 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12644 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12645 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12646 new_cpup->phys_id == cpup->phys_id && 12647 new_cpup->core_id == cpup->core_id) { 12648 goto found_hdwq; 12649 } 12650 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12651 if (new_cpu == nr_cpumask_bits) 12652 new_cpu = first_cpu; 12653 } 12654 12655 /* If we can't match both phys_id and core_id, 12656 * settle for just a phys_id match. 12657 */ 12658 new_cpu = start_cpu; 12659 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12660 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12661 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12662 new_cpup->phys_id == cpup->phys_id) 12663 goto found_hdwq; 12664 12665 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12666 if (new_cpu == nr_cpumask_bits) 12667 new_cpu = first_cpu; 12668 } 12669 12670 /* Otherwise just round robin on cfg_hdw_queue */ 12671 cpup->hdwq = idx % phba->cfg_hdw_queue; 12672 idx++; 12673 goto logit; 12674 found_hdwq: 12675 /* We found an available entry, copy the IRQ info */ 12676 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12677 if (start_cpu == nr_cpumask_bits) 12678 start_cpu = first_cpu; 12679 cpup->hdwq = new_cpup->hdwq; 12680 logit: 12681 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12682 "3335 Set Affinity: CPU %d (phys %d core %d): " 12683 "hdwq %d eq %d flg x%x\n", 12684 cpu, cpup->phys_id, cpup->core_id, 12685 cpup->hdwq, cpup->eq, cpup->flag); 12686 } 12687 12688 /* 12689 * Initialize the cpu_map slots for not-present cpus in case 12690 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12691 */ 12692 idx = 0; 12693 for_each_possible_cpu(cpu) { 12694 cpup = &phba->sli4_hba.cpu_map[cpu]; 12695 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12696 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12697 c_stat->hdwq_no = cpup->hdwq; 12698 #endif 12699 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12700 continue; 12701 12702 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12703 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12704 c_stat->hdwq_no = cpup->hdwq; 12705 #endif 12706 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12707 "3340 Set Affinity: not present " 12708 "CPU %d hdwq %d\n", 12709 cpu, cpup->hdwq); 12710 } 12711 12712 /* The cpu_map array will be used later during initialization 12713 * when EQ / CQ / WQs are allocated and configured. 12714 */ 12715 return; 12716 } 12717 12718 /** 12719 * lpfc_cpuhp_get_eq 12720 * 12721 * @phba: pointer to lpfc hba data structure. 12722 * @cpu: cpu going offline 12723 * @eqlist: eq list to append to 12724 */ 12725 static int 12726 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12727 struct list_head *eqlist) 12728 { 12729 const struct cpumask *maskp; 12730 struct lpfc_queue *eq; 12731 struct cpumask *tmp; 12732 u16 idx; 12733 12734 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12735 if (!tmp) 12736 return -ENOMEM; 12737 12738 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12739 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12740 if (!maskp) 12741 continue; 12742 /* 12743 * if irq is not affinitized to the cpu going 12744 * then we don't need to poll the eq attached 12745 * to it. 12746 */ 12747 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12748 continue; 12749 /* get the cpus that are online and are affini- 12750 * tized to this irq vector. If the count is 12751 * more than 1 then cpuhp is not going to shut- 12752 * down this vector. Since this cpu has not 12753 * gone offline yet, we need >1. 12754 */ 12755 cpumask_and(tmp, maskp, cpu_online_mask); 12756 if (cpumask_weight(tmp) > 1) 12757 continue; 12758 12759 /* Now that we have an irq to shutdown, get the eq 12760 * mapped to this irq. Note: multiple hdwq's in 12761 * the software can share an eq, but eventually 12762 * only eq will be mapped to this vector 12763 */ 12764 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12765 list_add(&eq->_poll_list, eqlist); 12766 } 12767 kfree(tmp); 12768 return 0; 12769 } 12770 12771 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12772 { 12773 if (phba->sli_rev != LPFC_SLI_REV4) 12774 return; 12775 12776 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12777 &phba->cpuhp); 12778 /* 12779 * unregistering the instance doesn't stop the polling 12780 * timer. Wait for the poll timer to retire. 12781 */ 12782 synchronize_rcu(); 12783 del_timer_sync(&phba->cpuhp_poll_timer); 12784 } 12785 12786 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12787 { 12788 if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE)) 12789 return; 12790 12791 __lpfc_cpuhp_remove(phba); 12792 } 12793 12794 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12795 { 12796 if (phba->sli_rev != LPFC_SLI_REV4) 12797 return; 12798 12799 rcu_read_lock(); 12800 12801 if (!list_empty(&phba->poll_list)) 12802 mod_timer(&phba->cpuhp_poll_timer, 12803 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12804 12805 rcu_read_unlock(); 12806 12807 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12808 &phba->cpuhp); 12809 } 12810 12811 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12812 { 12813 if (phba->pport->load_flag & FC_UNLOADING) { 12814 *retval = -EAGAIN; 12815 return true; 12816 } 12817 12818 if (phba->sli_rev != LPFC_SLI_REV4) { 12819 *retval = 0; 12820 return true; 12821 } 12822 12823 /* proceed with the hotplug */ 12824 return false; 12825 } 12826 12827 /** 12828 * lpfc_irq_set_aff - set IRQ affinity 12829 * @eqhdl: EQ handle 12830 * @cpu: cpu to set affinity 12831 * 12832 **/ 12833 static inline void 12834 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12835 { 12836 cpumask_clear(&eqhdl->aff_mask); 12837 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12838 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12839 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); 12840 } 12841 12842 /** 12843 * lpfc_irq_clear_aff - clear IRQ affinity 12844 * @eqhdl: EQ handle 12845 * 12846 **/ 12847 static inline void 12848 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12849 { 12850 cpumask_clear(&eqhdl->aff_mask); 12851 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12852 } 12853 12854 /** 12855 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12856 * @phba: pointer to HBA context object. 12857 * @cpu: cpu going offline/online 12858 * @offline: true, cpu is going offline. false, cpu is coming online. 12859 * 12860 * If cpu is going offline, we'll try our best effort to find the next 12861 * online cpu on the phba's original_mask and migrate all offlining IRQ 12862 * affinities. 12863 * 12864 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12865 * 12866 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12867 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12868 * 12869 **/ 12870 static void 12871 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12872 { 12873 struct lpfc_vector_map_info *cpup; 12874 struct cpumask *aff_mask; 12875 unsigned int cpu_select, cpu_next, idx; 12876 const struct cpumask *orig_mask; 12877 12878 if (phba->irq_chann_mode == NORMAL_MODE) 12879 return; 12880 12881 orig_mask = &phba->sli4_hba.irq_aff_mask; 12882 12883 if (!cpumask_test_cpu(cpu, orig_mask)) 12884 return; 12885 12886 cpup = &phba->sli4_hba.cpu_map[cpu]; 12887 12888 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12889 return; 12890 12891 if (offline) { 12892 /* Find next online CPU on original mask */ 12893 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12894 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12895 12896 /* Found a valid CPU */ 12897 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12898 /* Go through each eqhdl and ensure offlining 12899 * cpu aff_mask is migrated 12900 */ 12901 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12902 aff_mask = lpfc_get_aff_mask(idx); 12903 12904 /* Migrate affinity */ 12905 if (cpumask_test_cpu(cpu, aff_mask)) 12906 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12907 cpu_select); 12908 } 12909 } else { 12910 /* Rely on irqbalance if no online CPUs left on NUMA */ 12911 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12912 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12913 } 12914 } else { 12915 /* Migrate affinity back to this CPU */ 12916 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12917 } 12918 } 12919 12920 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12921 { 12922 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12923 struct lpfc_queue *eq, *next; 12924 LIST_HEAD(eqlist); 12925 int retval; 12926 12927 if (!phba) { 12928 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12929 return 0; 12930 } 12931 12932 if (__lpfc_cpuhp_checks(phba, &retval)) 12933 return retval; 12934 12935 lpfc_irq_rebalance(phba, cpu, true); 12936 12937 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12938 if (retval) 12939 return retval; 12940 12941 /* start polling on these eq's */ 12942 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12943 list_del_init(&eq->_poll_list); 12944 lpfc_sli4_start_polling(eq); 12945 } 12946 12947 return 0; 12948 } 12949 12950 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12951 { 12952 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12953 struct lpfc_queue *eq, *next; 12954 unsigned int n; 12955 int retval; 12956 12957 if (!phba) { 12958 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12959 return 0; 12960 } 12961 12962 if (__lpfc_cpuhp_checks(phba, &retval)) 12963 return retval; 12964 12965 lpfc_irq_rebalance(phba, cpu, false); 12966 12967 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12968 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12969 if (n == cpu) 12970 lpfc_sli4_stop_polling(eq); 12971 } 12972 12973 return 0; 12974 } 12975 12976 /** 12977 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12978 * @phba: pointer to lpfc hba data structure. 12979 * 12980 * This routine is invoked to enable the MSI-X interrupt vectors to device 12981 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12982 * to cpus on the system. 12983 * 12984 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12985 * the number of cpus on the same numa node as this adapter. The vectors are 12986 * allocated without requesting OS affinity mapping. A vector will be 12987 * allocated and assigned to each online and offline cpu. If the cpu is 12988 * online, then affinity will be set to that cpu. If the cpu is offline, then 12989 * affinity will be set to the nearest peer cpu within the numa node that is 12990 * online. If there are no online cpus within the numa node, affinity is not 12991 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12992 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12993 * configured. 12994 * 12995 * If numa mode is not enabled and there is more than 1 vector allocated, then 12996 * the driver relies on the managed irq interface where the OS assigns vector to 12997 * cpu affinity. The driver will then use that affinity mapping to setup its 12998 * cpu mapping table. 12999 * 13000 * Return codes 13001 * 0 - successful 13002 * other values - error 13003 **/ 13004 static int 13005 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 13006 { 13007 int vectors, rc, index; 13008 char *name; 13009 const struct cpumask *aff_mask = NULL; 13010 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 13011 struct lpfc_vector_map_info *cpup; 13012 struct lpfc_hba_eq_hdl *eqhdl; 13013 const struct cpumask *maskp; 13014 unsigned int flags = PCI_IRQ_MSIX; 13015 13016 /* Set up MSI-X multi-message vectors */ 13017 vectors = phba->cfg_irq_chann; 13018 13019 if (phba->irq_chann_mode != NORMAL_MODE) 13020 aff_mask = &phba->sli4_hba.irq_aff_mask; 13021 13022 if (aff_mask) { 13023 cpu_cnt = cpumask_weight(aff_mask); 13024 vectors = min(phba->cfg_irq_chann, cpu_cnt); 13025 13026 /* cpu: iterates over aff_mask including offline or online 13027 * cpu_select: iterates over online aff_mask to set affinity 13028 */ 13029 cpu = cpumask_first(aff_mask); 13030 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 13031 } else { 13032 flags |= PCI_IRQ_AFFINITY; 13033 } 13034 13035 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 13036 if (rc < 0) { 13037 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13038 "0484 PCI enable MSI-X failed (%d)\n", rc); 13039 goto vec_fail_out; 13040 } 13041 vectors = rc; 13042 13043 /* Assign MSI-X vectors to interrupt handlers */ 13044 for (index = 0; index < vectors; index++) { 13045 eqhdl = lpfc_get_eq_hdl(index); 13046 name = eqhdl->handler_name; 13047 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 13048 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 13049 LPFC_DRIVER_HANDLER_NAME"%d", index); 13050 13051 eqhdl->idx = index; 13052 rc = pci_irq_vector(phba->pcidev, index); 13053 if (rc < 0) { 13054 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13055 "0489 MSI-X fast-path (%d) " 13056 "pci_irq_vec failed (%d)\n", index, rc); 13057 goto cfg_fail_out; 13058 } 13059 eqhdl->irq = rc; 13060 13061 rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, 13062 name, eqhdl); 13063 if (rc) { 13064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13065 "0486 MSI-X fast-path (%d) " 13066 "request_irq failed (%d)\n", index, rc); 13067 goto cfg_fail_out; 13068 } 13069 13070 if (aff_mask) { 13071 /* If found a neighboring online cpu, set affinity */ 13072 if (cpu_select < nr_cpu_ids) 13073 lpfc_irq_set_aff(eqhdl, cpu_select); 13074 13075 /* Assign EQ to cpu_map */ 13076 lpfc_assign_eq_map_info(phba, index, 13077 LPFC_CPU_FIRST_IRQ, 13078 cpu); 13079 13080 /* Iterate to next offline or online cpu in aff_mask */ 13081 cpu = cpumask_next(cpu, aff_mask); 13082 13083 /* Find next online cpu in aff_mask to set affinity */ 13084 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 13085 } else if (vectors == 1) { 13086 cpu = cpumask_first(cpu_present_mask); 13087 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 13088 cpu); 13089 } else { 13090 maskp = pci_irq_get_affinity(phba->pcidev, index); 13091 13092 /* Loop through all CPUs associated with vector index */ 13093 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 13094 cpup = &phba->sli4_hba.cpu_map[cpu]; 13095 13096 /* If this is the first CPU thats assigned to 13097 * this vector, set LPFC_CPU_FIRST_IRQ. 13098 * 13099 * With certain platforms its possible that irq 13100 * vectors are affinitized to all the cpu's. 13101 * This can result in each cpu_map.eq to be set 13102 * to the last vector, resulting in overwrite 13103 * of all the previous cpu_map.eq. Ensure that 13104 * each vector receives a place in cpu_map. 13105 * Later call to lpfc_cpu_affinity_check will 13106 * ensure we are nicely balanced out. 13107 */ 13108 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 13109 continue; 13110 lpfc_assign_eq_map_info(phba, index, 13111 LPFC_CPU_FIRST_IRQ, 13112 cpu); 13113 break; 13114 } 13115 } 13116 } 13117 13118 if (vectors != phba->cfg_irq_chann) { 13119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13120 "3238 Reducing IO channels to match number of " 13121 "MSI-X vectors, requested %d got %d\n", 13122 phba->cfg_irq_chann, vectors); 13123 if (phba->cfg_irq_chann > vectors) 13124 phba->cfg_irq_chann = vectors; 13125 } 13126 13127 return rc; 13128 13129 cfg_fail_out: 13130 /* free the irq already requested */ 13131 for (--index; index >= 0; index--) { 13132 eqhdl = lpfc_get_eq_hdl(index); 13133 lpfc_irq_clear_aff(eqhdl); 13134 free_irq(eqhdl->irq, eqhdl); 13135 } 13136 13137 /* Unconfigure MSI-X capability structure */ 13138 pci_free_irq_vectors(phba->pcidev); 13139 13140 vec_fail_out: 13141 return rc; 13142 } 13143 13144 /** 13145 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13146 * @phba: pointer to lpfc hba data structure. 13147 * 13148 * This routine is invoked to enable the MSI interrupt mode to device with 13149 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13150 * called to enable the MSI vector. The device driver is responsible for 13151 * calling the request_irq() to register MSI vector with a interrupt the 13152 * handler, which is done in this function. 13153 * 13154 * Return codes 13155 * 0 - successful 13156 * other values - error 13157 **/ 13158 static int 13159 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13160 { 13161 int rc, index; 13162 unsigned int cpu; 13163 struct lpfc_hba_eq_hdl *eqhdl; 13164 13165 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13166 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13167 if (rc > 0) 13168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13169 "0487 PCI enable MSI mode success.\n"); 13170 else { 13171 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13172 "0488 PCI enable MSI mode failed (%d)\n", rc); 13173 return rc ? rc : -1; 13174 } 13175 13176 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13177 0, LPFC_DRIVER_NAME, phba); 13178 if (rc) { 13179 pci_free_irq_vectors(phba->pcidev); 13180 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13181 "0490 MSI request_irq failed (%d)\n", rc); 13182 return rc; 13183 } 13184 13185 eqhdl = lpfc_get_eq_hdl(0); 13186 rc = pci_irq_vector(phba->pcidev, 0); 13187 if (rc < 0) { 13188 pci_free_irq_vectors(phba->pcidev); 13189 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13190 "0496 MSI pci_irq_vec failed (%d)\n", rc); 13191 return rc; 13192 } 13193 eqhdl->irq = rc; 13194 13195 cpu = cpumask_first(cpu_present_mask); 13196 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13197 13198 for (index = 0; index < phba->cfg_irq_chann; index++) { 13199 eqhdl = lpfc_get_eq_hdl(index); 13200 eqhdl->idx = index; 13201 } 13202 13203 return 0; 13204 } 13205 13206 /** 13207 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13208 * @phba: pointer to lpfc hba data structure. 13209 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13210 * 13211 * This routine is invoked to enable device interrupt and associate driver's 13212 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13213 * interface spec. Depends on the interrupt mode configured to the driver, 13214 * the driver will try to fallback from the configured interrupt mode to an 13215 * interrupt mode which is supported by the platform, kernel, and device in 13216 * the order of: 13217 * MSI-X -> MSI -> IRQ. 13218 * 13219 * Return codes 13220 * Interrupt mode (2, 1, 0) - successful 13221 * LPFC_INTR_ERROR - error 13222 **/ 13223 static uint32_t 13224 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13225 { 13226 uint32_t intr_mode = LPFC_INTR_ERROR; 13227 int retval, idx; 13228 13229 if (cfg_mode == 2) { 13230 /* Preparation before conf_msi mbox cmd */ 13231 retval = 0; 13232 if (!retval) { 13233 /* Now, try to enable MSI-X interrupt mode */ 13234 retval = lpfc_sli4_enable_msix(phba); 13235 if (!retval) { 13236 /* Indicate initialization to MSI-X mode */ 13237 phba->intr_type = MSIX; 13238 intr_mode = 2; 13239 } 13240 } 13241 } 13242 13243 /* Fallback to MSI if MSI-X initialization failed */ 13244 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13245 retval = lpfc_sli4_enable_msi(phba); 13246 if (!retval) { 13247 /* Indicate initialization to MSI mode */ 13248 phba->intr_type = MSI; 13249 intr_mode = 1; 13250 } 13251 } 13252 13253 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13254 if (phba->intr_type == NONE) { 13255 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13256 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13257 if (!retval) { 13258 struct lpfc_hba_eq_hdl *eqhdl; 13259 unsigned int cpu; 13260 13261 /* Indicate initialization to INTx mode */ 13262 phba->intr_type = INTx; 13263 intr_mode = 0; 13264 13265 eqhdl = lpfc_get_eq_hdl(0); 13266 retval = pci_irq_vector(phba->pcidev, 0); 13267 if (retval < 0) { 13268 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13269 "0502 INTR pci_irq_vec failed (%d)\n", 13270 retval); 13271 return LPFC_INTR_ERROR; 13272 } 13273 eqhdl->irq = retval; 13274 13275 cpu = cpumask_first(cpu_present_mask); 13276 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13277 cpu); 13278 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13279 eqhdl = lpfc_get_eq_hdl(idx); 13280 eqhdl->idx = idx; 13281 } 13282 } 13283 } 13284 return intr_mode; 13285 } 13286 13287 /** 13288 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13289 * @phba: pointer to lpfc hba data structure. 13290 * 13291 * This routine is invoked to disable device interrupt and disassociate 13292 * the driver's interrupt handler(s) from interrupt vector(s) to device 13293 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13294 * will release the interrupt vector(s) for the message signaled interrupt. 13295 **/ 13296 static void 13297 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13298 { 13299 /* Disable the currently initialized interrupt mode */ 13300 if (phba->intr_type == MSIX) { 13301 int index; 13302 struct lpfc_hba_eq_hdl *eqhdl; 13303 13304 /* Free up MSI-X multi-message vectors */ 13305 for (index = 0; index < phba->cfg_irq_chann; index++) { 13306 eqhdl = lpfc_get_eq_hdl(index); 13307 lpfc_irq_clear_aff(eqhdl); 13308 free_irq(eqhdl->irq, eqhdl); 13309 } 13310 } else { 13311 free_irq(phba->pcidev->irq, phba); 13312 } 13313 13314 pci_free_irq_vectors(phba->pcidev); 13315 13316 /* Reset interrupt management states */ 13317 phba->intr_type = NONE; 13318 phba->sli.slistat.sli_intr = 0; 13319 } 13320 13321 /** 13322 * lpfc_unset_hba - Unset SLI3 hba device initialization 13323 * @phba: pointer to lpfc hba data structure. 13324 * 13325 * This routine is invoked to unset the HBA device initialization steps to 13326 * a device with SLI-3 interface spec. 13327 **/ 13328 static void 13329 lpfc_unset_hba(struct lpfc_hba *phba) 13330 { 13331 struct lpfc_vport *vport = phba->pport; 13332 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13333 13334 spin_lock_irq(shost->host_lock); 13335 vport->load_flag |= FC_UNLOADING; 13336 spin_unlock_irq(shost->host_lock); 13337 13338 kfree(phba->vpi_bmask); 13339 kfree(phba->vpi_ids); 13340 13341 lpfc_stop_hba_timers(phba); 13342 13343 phba->pport->work_port_events = 0; 13344 13345 lpfc_sli_hba_down(phba); 13346 13347 lpfc_sli_brdrestart(phba); 13348 13349 lpfc_sli_disable_intr(phba); 13350 13351 return; 13352 } 13353 13354 /** 13355 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13356 * @phba: Pointer to HBA context object. 13357 * 13358 * This function is called in the SLI4 code path to wait for completion 13359 * of device's XRIs exchange busy. It will check the XRI exchange busy 13360 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13361 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13362 * I/Os every 30 seconds, log error message, and wait forever. Only when 13363 * all XRI exchange busy complete, the driver unload shall proceed with 13364 * invoking the function reset ioctl mailbox command to the CNA and the 13365 * the rest of the driver unload resource release. 13366 **/ 13367 static void 13368 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13369 { 13370 struct lpfc_sli4_hdw_queue *qp; 13371 int idx, ccnt; 13372 int wait_time = 0; 13373 int io_xri_cmpl = 1; 13374 int nvmet_xri_cmpl = 1; 13375 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13376 13377 /* Driver just aborted IOs during the hba_unset process. Pause 13378 * here to give the HBA time to complete the IO and get entries 13379 * into the abts lists. 13380 */ 13381 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13382 13383 /* Wait for NVME pending IO to flush back to transport. */ 13384 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13385 lpfc_nvme_wait_for_io_drain(phba); 13386 13387 ccnt = 0; 13388 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13389 qp = &phba->sli4_hba.hdwq[idx]; 13390 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13391 if (!io_xri_cmpl) /* if list is NOT empty */ 13392 ccnt++; 13393 } 13394 if (ccnt) 13395 io_xri_cmpl = 0; 13396 13397 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13398 nvmet_xri_cmpl = 13399 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13400 } 13401 13402 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13403 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13404 if (!nvmet_xri_cmpl) 13405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13406 "6424 NVMET XRI exchange busy " 13407 "wait time: %d seconds.\n", 13408 wait_time/1000); 13409 if (!io_xri_cmpl) 13410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13411 "6100 IO XRI exchange busy " 13412 "wait time: %d seconds.\n", 13413 wait_time/1000); 13414 if (!els_xri_cmpl) 13415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13416 "2878 ELS XRI exchange busy " 13417 "wait time: %d seconds.\n", 13418 wait_time/1000); 13419 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13420 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13421 } else { 13422 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13423 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13424 } 13425 13426 ccnt = 0; 13427 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13428 qp = &phba->sli4_hba.hdwq[idx]; 13429 io_xri_cmpl = list_empty( 13430 &qp->lpfc_abts_io_buf_list); 13431 if (!io_xri_cmpl) /* if list is NOT empty */ 13432 ccnt++; 13433 } 13434 if (ccnt) 13435 io_xri_cmpl = 0; 13436 13437 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13438 nvmet_xri_cmpl = list_empty( 13439 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13440 } 13441 els_xri_cmpl = 13442 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13443 13444 } 13445 } 13446 13447 /** 13448 * lpfc_sli4_hba_unset - Unset the fcoe hba 13449 * @phba: Pointer to HBA context object. 13450 * 13451 * This function is called in the SLI4 code path to reset the HBA's FCoE 13452 * function. The caller is not required to hold any lock. This routine 13453 * issues PCI function reset mailbox command to reset the FCoE function. 13454 * At the end of the function, it calls lpfc_hba_down_post function to 13455 * free any pending commands. 13456 **/ 13457 static void 13458 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13459 { 13460 int wait_cnt = 0; 13461 LPFC_MBOXQ_t *mboxq; 13462 struct pci_dev *pdev = phba->pcidev; 13463 13464 lpfc_stop_hba_timers(phba); 13465 hrtimer_cancel(&phba->cmf_timer); 13466 13467 if (phba->pport) 13468 phba->sli4_hba.intr_enable = 0; 13469 13470 /* 13471 * Gracefully wait out the potential current outstanding asynchronous 13472 * mailbox command. 13473 */ 13474 13475 /* First, block any pending async mailbox command from posted */ 13476 spin_lock_irq(&phba->hbalock); 13477 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13478 spin_unlock_irq(&phba->hbalock); 13479 /* Now, trying to wait it out if we can */ 13480 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13481 msleep(10); 13482 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13483 break; 13484 } 13485 /* Forcefully release the outstanding mailbox command if timed out */ 13486 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13487 spin_lock_irq(&phba->hbalock); 13488 mboxq = phba->sli.mbox_active; 13489 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13490 __lpfc_mbox_cmpl_put(phba, mboxq); 13491 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13492 phba->sli.mbox_active = NULL; 13493 spin_unlock_irq(&phba->hbalock); 13494 } 13495 13496 /* Abort all iocbs associated with the hba */ 13497 lpfc_sli_hba_iocb_abort(phba); 13498 13499 if (!pci_channel_offline(phba->pcidev)) 13500 /* Wait for completion of device XRI exchange busy */ 13501 lpfc_sli4_xri_exchange_busy_wait(phba); 13502 13503 /* per-phba callback de-registration for hotplug event */ 13504 if (phba->pport) 13505 lpfc_cpuhp_remove(phba); 13506 13507 /* Disable PCI subsystem interrupt */ 13508 lpfc_sli4_disable_intr(phba); 13509 13510 /* Disable SR-IOV if enabled */ 13511 if (phba->cfg_sriov_nr_virtfn) 13512 pci_disable_sriov(pdev); 13513 13514 /* Stop kthread signal shall trigger work_done one more time */ 13515 kthread_stop(phba->worker_thread); 13516 13517 /* Disable FW logging to host memory */ 13518 lpfc_ras_stop_fwlog(phba); 13519 13520 /* Reset SLI4 HBA FCoE function */ 13521 lpfc_pci_function_reset(phba); 13522 13523 /* release all queue allocated resources. */ 13524 lpfc_sli4_queue_destroy(phba); 13525 13526 /* Free RAS DMA memory */ 13527 if (phba->ras_fwlog.ras_enabled) 13528 lpfc_sli4_ras_dma_free(phba); 13529 13530 /* Stop the SLI4 device port */ 13531 if (phba->pport) 13532 phba->pport->work_port_events = 0; 13533 } 13534 13535 static uint32_t 13536 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13537 { 13538 uint32_t msb = 0; 13539 uint32_t bit; 13540 13541 for (bit = 0; bit < 8; bit++) { 13542 msb = (crc >> 31) & 1; 13543 crc <<= 1; 13544 13545 if (msb ^ (byte & 1)) { 13546 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13547 crc |= 1; 13548 } 13549 byte >>= 1; 13550 } 13551 return crc; 13552 } 13553 13554 static uint32_t 13555 lpfc_cgn_reverse_bits(uint32_t wd) 13556 { 13557 uint32_t result = 0; 13558 uint32_t i; 13559 13560 for (i = 0; i < 32; i++) { 13561 result <<= 1; 13562 result |= (1 & (wd >> i)); 13563 } 13564 return result; 13565 } 13566 13567 /* 13568 * The routine corresponds with the algorithm the HBA firmware 13569 * uses to validate the data integrity. 13570 */ 13571 uint32_t 13572 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13573 { 13574 uint32_t i; 13575 uint32_t result; 13576 uint8_t *data = (uint8_t *)ptr; 13577 13578 for (i = 0; i < byteLen; ++i) 13579 crc = lpfc_cgn_crc32(crc, data[i]); 13580 13581 result = ~lpfc_cgn_reverse_bits(crc); 13582 return result; 13583 } 13584 13585 void 13586 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13587 { 13588 struct lpfc_cgn_info *cp; 13589 struct timespec64 cmpl_time; 13590 struct tm broken; 13591 uint16_t size; 13592 uint32_t crc; 13593 13594 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13595 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13596 13597 if (!phba->cgn_i) 13598 return; 13599 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13600 13601 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13602 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13603 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13604 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13605 13606 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13607 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13608 atomic64_set(&phba->cgn_latency_evt, 0); 13609 phba->cgn_evt_minute = 0; 13610 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13611 13612 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); 13613 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13614 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13615 13616 /* cgn parameters */ 13617 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13618 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13619 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13620 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13621 13622 ktime_get_real_ts64(&cmpl_time); 13623 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13624 13625 cp->cgn_info_month = broken.tm_mon + 1; 13626 cp->cgn_info_day = broken.tm_mday; 13627 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13628 cp->cgn_info_hour = broken.tm_hour; 13629 cp->cgn_info_minute = broken.tm_min; 13630 cp->cgn_info_second = broken.tm_sec; 13631 13632 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13633 "2643 CGNInfo Init: Start Time " 13634 "%d/%d/%d %d:%d:%d\n", 13635 cp->cgn_info_day, cp->cgn_info_month, 13636 cp->cgn_info_year, cp->cgn_info_hour, 13637 cp->cgn_info_minute, cp->cgn_info_second); 13638 13639 /* Fill in default LUN qdepth */ 13640 if (phba->pport) { 13641 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13642 cp->cgn_lunq = cpu_to_le16(size); 13643 } 13644 13645 /* last used Index initialized to 0xff already */ 13646 13647 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13648 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13649 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13650 cp->cgn_info_crc = cpu_to_le32(crc); 13651 13652 phba->cgn_evt_timestamp = jiffies + 13653 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13654 } 13655 13656 void 13657 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13658 { 13659 struct lpfc_cgn_info *cp; 13660 struct timespec64 cmpl_time; 13661 struct tm broken; 13662 uint32_t crc; 13663 13664 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13665 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13666 13667 if (!phba->cgn_i) 13668 return; 13669 13670 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13671 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13672 13673 ktime_get_real_ts64(&cmpl_time); 13674 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13675 13676 cp->cgn_stat_month = broken.tm_mon + 1; 13677 cp->cgn_stat_day = broken.tm_mday; 13678 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13679 cp->cgn_stat_hour = broken.tm_hour; 13680 cp->cgn_stat_minute = broken.tm_min; 13681 13682 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13683 "2647 CGNstat Init: Start Time " 13684 "%d/%d/%d %d:%d\n", 13685 cp->cgn_stat_day, cp->cgn_stat_month, 13686 cp->cgn_stat_year, cp->cgn_stat_hour, 13687 cp->cgn_stat_minute); 13688 13689 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13690 cp->cgn_info_crc = cpu_to_le32(crc); 13691 } 13692 13693 /** 13694 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13695 * @phba: Pointer to hba context object. 13696 * @reg: flag to determine register or unregister. 13697 */ 13698 static int 13699 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13700 { 13701 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13702 union lpfc_sli4_cfg_shdr *shdr; 13703 uint32_t shdr_status, shdr_add_status; 13704 LPFC_MBOXQ_t *mboxq; 13705 int length, rc; 13706 13707 if (!phba->cgn_i) 13708 return -ENXIO; 13709 13710 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13711 if (!mboxq) { 13712 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13713 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13714 "HBA state x%x reg %d\n", 13715 phba->pport->port_state, reg); 13716 return -ENOMEM; 13717 } 13718 13719 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13720 sizeof(struct lpfc_sli4_cfg_mhdr)); 13721 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13722 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13723 LPFC_SLI4_MBX_EMBED); 13724 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13725 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13726 if (reg > 0) 13727 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13728 else 13729 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13730 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13731 reg_congestion_buf->addr_lo = 13732 putPaddrLow(phba->cgn_i->phys); 13733 reg_congestion_buf->addr_hi = 13734 putPaddrHigh(phba->cgn_i->phys); 13735 13736 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13737 shdr = (union lpfc_sli4_cfg_shdr *) 13738 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13739 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13740 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13741 &shdr->response); 13742 mempool_free(mboxq, phba->mbox_mem_pool); 13743 if (shdr_status || shdr_add_status || rc) { 13744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13745 "2642 REG_CONGESTION_BUF mailbox " 13746 "failed with status x%x add_status x%x," 13747 " mbx status x%x reg %d\n", 13748 shdr_status, shdr_add_status, rc, reg); 13749 return -ENXIO; 13750 } 13751 return 0; 13752 } 13753 13754 int 13755 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13756 { 13757 lpfc_cmf_stop(phba); 13758 return __lpfc_reg_congestion_buf(phba, 0); 13759 } 13760 13761 int 13762 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13763 { 13764 return __lpfc_reg_congestion_buf(phba, 1); 13765 } 13766 13767 /** 13768 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13769 * @phba: Pointer to HBA context object. 13770 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13771 * 13772 * This function is called in the SLI4 code path to read the port's 13773 * sli4 capabilities. 13774 * 13775 * This function may be be called from any context that can block-wait 13776 * for the completion. The expectation is that this routine is called 13777 * typically from probe_one or from the online routine. 13778 **/ 13779 int 13780 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13781 { 13782 int rc; 13783 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13784 struct lpfc_pc_sli4_params *sli4_params; 13785 uint32_t mbox_tmo; 13786 int length; 13787 bool exp_wqcq_pages = true; 13788 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13789 13790 /* 13791 * By default, the driver assumes the SLI4 port requires RPI 13792 * header postings. The SLI4_PARAM response will correct this 13793 * assumption. 13794 */ 13795 phba->sli4_hba.rpi_hdrs_in_use = 1; 13796 13797 /* Read the port's SLI4 Config Parameters */ 13798 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13799 sizeof(struct lpfc_sli4_cfg_mhdr)); 13800 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13801 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13802 length, LPFC_SLI4_MBX_EMBED); 13803 if (!phba->sli4_hba.intr_enable) 13804 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13805 else { 13806 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13807 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13808 } 13809 if (unlikely(rc)) 13810 return rc; 13811 sli4_params = &phba->sli4_hba.pc_sli4_params; 13812 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13813 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13814 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13815 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13816 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13817 mbx_sli4_parameters); 13818 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13819 mbx_sli4_parameters); 13820 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13821 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13822 else 13823 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13824 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13825 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13826 mbx_sli4_parameters); 13827 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13828 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13829 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13830 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13831 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13832 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13833 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13834 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13835 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13836 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13837 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13838 mbx_sli4_parameters); 13839 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13840 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13841 mbx_sli4_parameters); 13842 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13843 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13844 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); 13845 13846 /* Check for Extended Pre-Registered SGL support */ 13847 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13848 13849 /* Check for firmware nvme support */ 13850 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13851 bf_get(cfg_xib, mbx_sli4_parameters)); 13852 13853 if (rc) { 13854 /* Save this to indicate the Firmware supports NVME */ 13855 sli4_params->nvme = 1; 13856 13857 /* Firmware NVME support, check driver FC4 NVME support */ 13858 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13859 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13860 "6133 Disabling NVME support: " 13861 "FC4 type not supported: x%x\n", 13862 phba->cfg_enable_fc4_type); 13863 goto fcponly; 13864 } 13865 } else { 13866 /* No firmware NVME support, check driver FC4 NVME support */ 13867 sli4_params->nvme = 0; 13868 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13870 "6101 Disabling NVME support: Not " 13871 "supported by firmware (%d %d) x%x\n", 13872 bf_get(cfg_nvme, mbx_sli4_parameters), 13873 bf_get(cfg_xib, mbx_sli4_parameters), 13874 phba->cfg_enable_fc4_type); 13875 fcponly: 13876 phba->nvmet_support = 0; 13877 phba->cfg_nvmet_mrq = 0; 13878 phba->cfg_nvme_seg_cnt = 0; 13879 13880 /* If no FC4 type support, move to just SCSI support */ 13881 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13882 return -ENODEV; 13883 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13884 } 13885 } 13886 13887 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13888 * accommodate 512K and 1M IOs in a single nvme buf. 13889 */ 13890 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13891 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13892 13893 /* Enable embedded Payload BDE if support is indicated */ 13894 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13895 phba->cfg_enable_pbde = 1; 13896 else 13897 phba->cfg_enable_pbde = 0; 13898 13899 /* 13900 * To support Suppress Response feature we must satisfy 3 conditions. 13901 * lpfc_suppress_rsp module parameter must be set (default). 13902 * In SLI4-Parameters Descriptor: 13903 * Extended Inline Buffers (XIB) must be supported. 13904 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13905 * (double negative). 13906 */ 13907 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13908 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13909 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13910 else 13911 phba->cfg_suppress_rsp = 0; 13912 13913 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13914 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13915 13916 /* Make sure that sge_supp_len can be handled by the driver */ 13917 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13918 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13919 13920 /* 13921 * Check whether the adapter supports an embedded copy of the 13922 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13923 * to use this option, 128-byte WQEs must be used. 13924 */ 13925 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13926 phba->fcp_embed_io = 1; 13927 else 13928 phba->fcp_embed_io = 0; 13929 13930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13931 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13932 bf_get(cfg_xib, mbx_sli4_parameters), 13933 phba->cfg_enable_pbde, 13934 phba->fcp_embed_io, sli4_params->nvme, 13935 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13936 13937 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13938 LPFC_SLI_INTF_IF_TYPE_2) && 13939 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13940 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13941 exp_wqcq_pages = false; 13942 13943 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13944 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13945 exp_wqcq_pages && 13946 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13947 phba->enab_exp_wqcq_pages = 1; 13948 else 13949 phba->enab_exp_wqcq_pages = 0; 13950 /* 13951 * Check if the SLI port supports MDS Diagnostics 13952 */ 13953 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13954 phba->mds_diags_support = 1; 13955 else 13956 phba->mds_diags_support = 0; 13957 13958 /* 13959 * Check if the SLI port supports NSLER 13960 */ 13961 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13962 phba->nsler = 1; 13963 else 13964 phba->nsler = 0; 13965 13966 return 0; 13967 } 13968 13969 /** 13970 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13971 * @pdev: pointer to PCI device 13972 * @pid: pointer to PCI device identifier 13973 * 13974 * This routine is to be called to attach a device with SLI-3 interface spec 13975 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13976 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13977 * information of the device and driver to see if the driver state that it can 13978 * support this kind of device. If the match is successful, the driver core 13979 * invokes this routine. If this routine determines it can claim the HBA, it 13980 * does all the initialization that it needs to do to handle the HBA properly. 13981 * 13982 * Return code 13983 * 0 - driver can claim the device 13984 * negative value - driver can not claim the device 13985 **/ 13986 static int 13987 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13988 { 13989 struct lpfc_hba *phba; 13990 struct lpfc_vport *vport = NULL; 13991 struct Scsi_Host *shost = NULL; 13992 int error; 13993 uint32_t cfg_mode, intr_mode; 13994 13995 /* Allocate memory for HBA structure */ 13996 phba = lpfc_hba_alloc(pdev); 13997 if (!phba) 13998 return -ENOMEM; 13999 14000 /* Perform generic PCI device enabling operation */ 14001 error = lpfc_enable_pci_dev(phba); 14002 if (error) 14003 goto out_free_phba; 14004 14005 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 14006 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 14007 if (error) 14008 goto out_disable_pci_dev; 14009 14010 /* Set up SLI-3 specific device PCI memory space */ 14011 error = lpfc_sli_pci_mem_setup(phba); 14012 if (error) { 14013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14014 "1402 Failed to set up pci memory space.\n"); 14015 goto out_disable_pci_dev; 14016 } 14017 14018 /* Set up SLI-3 specific device driver resources */ 14019 error = lpfc_sli_driver_resource_setup(phba); 14020 if (error) { 14021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14022 "1404 Failed to set up driver resource.\n"); 14023 goto out_unset_pci_mem_s3; 14024 } 14025 14026 /* Initialize and populate the iocb list per host */ 14027 14028 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 14029 if (error) { 14030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14031 "1405 Failed to initialize iocb list.\n"); 14032 goto out_unset_driver_resource_s3; 14033 } 14034 14035 /* Set up common device driver resources */ 14036 error = lpfc_setup_driver_resource_phase2(phba); 14037 if (error) { 14038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14039 "1406 Failed to set up driver resource.\n"); 14040 goto out_free_iocb_list; 14041 } 14042 14043 /* Get the default values for Model Name and Description */ 14044 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14045 14046 /* Create SCSI host to the physical port */ 14047 error = lpfc_create_shost(phba); 14048 if (error) { 14049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14050 "1407 Failed to create scsi host.\n"); 14051 goto out_unset_driver_resource; 14052 } 14053 14054 /* Configure sysfs attributes */ 14055 vport = phba->pport; 14056 error = lpfc_alloc_sysfs_attr(vport); 14057 if (error) { 14058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14059 "1476 Failed to allocate sysfs attr\n"); 14060 goto out_destroy_shost; 14061 } 14062 14063 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14064 /* Now, trying to enable interrupt and bring up the device */ 14065 cfg_mode = phba->cfg_use_msi; 14066 while (true) { 14067 /* Put device to a known state before enabling interrupt */ 14068 lpfc_stop_port(phba); 14069 /* Configure and enable interrupt */ 14070 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 14071 if (intr_mode == LPFC_INTR_ERROR) { 14072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14073 "0431 Failed to enable interrupt.\n"); 14074 error = -ENODEV; 14075 goto out_free_sysfs_attr; 14076 } 14077 /* SLI-3 HBA setup */ 14078 if (lpfc_sli_hba_setup(phba)) { 14079 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14080 "1477 Failed to set up hba\n"); 14081 error = -ENODEV; 14082 goto out_remove_device; 14083 } 14084 14085 /* Wait 50ms for the interrupts of previous mailbox commands */ 14086 msleep(50); 14087 /* Check active interrupts on message signaled interrupts */ 14088 if (intr_mode == 0 || 14089 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 14090 /* Log the current active interrupt mode */ 14091 phba->intr_mode = intr_mode; 14092 lpfc_log_intr_mode(phba, intr_mode); 14093 break; 14094 } else { 14095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14096 "0447 Configure interrupt mode (%d) " 14097 "failed active interrupt test.\n", 14098 intr_mode); 14099 /* Disable the current interrupt mode */ 14100 lpfc_sli_disable_intr(phba); 14101 /* Try next level of interrupt mode */ 14102 cfg_mode = --intr_mode; 14103 } 14104 } 14105 14106 /* Perform post initialization setup */ 14107 lpfc_post_init_setup(phba); 14108 14109 /* Check if there are static vports to be created. */ 14110 lpfc_create_static_vport(phba); 14111 14112 return 0; 14113 14114 out_remove_device: 14115 lpfc_unset_hba(phba); 14116 out_free_sysfs_attr: 14117 lpfc_free_sysfs_attr(vport); 14118 out_destroy_shost: 14119 lpfc_destroy_shost(phba); 14120 out_unset_driver_resource: 14121 lpfc_unset_driver_resource_phase2(phba); 14122 out_free_iocb_list: 14123 lpfc_free_iocb_list(phba); 14124 out_unset_driver_resource_s3: 14125 lpfc_sli_driver_resource_unset(phba); 14126 out_unset_pci_mem_s3: 14127 lpfc_sli_pci_mem_unset(phba); 14128 out_disable_pci_dev: 14129 lpfc_disable_pci_dev(phba); 14130 if (shost) 14131 scsi_host_put(shost); 14132 out_free_phba: 14133 lpfc_hba_free(phba); 14134 return error; 14135 } 14136 14137 /** 14138 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 14139 * @pdev: pointer to PCI device 14140 * 14141 * This routine is to be called to disattach a device with SLI-3 interface 14142 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 14143 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14144 * device to be removed from the PCI subsystem properly. 14145 **/ 14146 static void 14147 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14148 { 14149 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14150 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14151 struct lpfc_vport **vports; 14152 struct lpfc_hba *phba = vport->phba; 14153 int i; 14154 14155 spin_lock_irq(&phba->hbalock); 14156 vport->load_flag |= FC_UNLOADING; 14157 spin_unlock_irq(&phba->hbalock); 14158 14159 lpfc_free_sysfs_attr(vport); 14160 14161 /* Release all the vports against this physical port */ 14162 vports = lpfc_create_vport_work_array(phba); 14163 if (vports != NULL) 14164 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14165 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14166 continue; 14167 fc_vport_terminate(vports[i]->fc_vport); 14168 } 14169 lpfc_destroy_vport_work_array(phba, vports); 14170 14171 /* Remove FC host with the physical port */ 14172 fc_remove_host(shost); 14173 scsi_remove_host(shost); 14174 14175 /* Clean up all nodes, mailboxes and IOs. */ 14176 lpfc_cleanup(vport); 14177 14178 /* 14179 * Bring down the SLI Layer. This step disable all interrupts, 14180 * clears the rings, discards all mailbox commands, and resets 14181 * the HBA. 14182 */ 14183 14184 /* HBA interrupt will be disabled after this call */ 14185 lpfc_sli_hba_down(phba); 14186 /* Stop kthread signal shall trigger work_done one more time */ 14187 kthread_stop(phba->worker_thread); 14188 /* Final cleanup of txcmplq and reset the HBA */ 14189 lpfc_sli_brdrestart(phba); 14190 14191 kfree(phba->vpi_bmask); 14192 kfree(phba->vpi_ids); 14193 14194 lpfc_stop_hba_timers(phba); 14195 spin_lock_irq(&phba->port_list_lock); 14196 list_del_init(&vport->listentry); 14197 spin_unlock_irq(&phba->port_list_lock); 14198 14199 lpfc_debugfs_terminate(vport); 14200 14201 /* Disable SR-IOV if enabled */ 14202 if (phba->cfg_sriov_nr_virtfn) 14203 pci_disable_sriov(pdev); 14204 14205 /* Disable interrupt */ 14206 lpfc_sli_disable_intr(phba); 14207 14208 scsi_host_put(shost); 14209 14210 /* 14211 * Call scsi_free before mem_free since scsi bufs are released to their 14212 * corresponding pools here. 14213 */ 14214 lpfc_scsi_free(phba); 14215 lpfc_free_iocb_list(phba); 14216 14217 lpfc_mem_free_all(phba); 14218 14219 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14220 phba->hbqslimp.virt, phba->hbqslimp.phys); 14221 14222 /* Free resources associated with SLI2 interface */ 14223 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14224 phba->slim2p.virt, phba->slim2p.phys); 14225 14226 /* unmap adapter SLIM and Control Registers */ 14227 iounmap(phba->ctrl_regs_memmap_p); 14228 iounmap(phba->slim_memmap_p); 14229 14230 lpfc_hba_free(phba); 14231 14232 pci_release_mem_regions(pdev); 14233 pci_disable_device(pdev); 14234 } 14235 14236 /** 14237 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14238 * @dev_d: pointer to device 14239 * 14240 * This routine is to be called from the kernel's PCI subsystem to support 14241 * system Power Management (PM) to device with SLI-3 interface spec. When 14242 * PM invokes this method, it quiesces the device by stopping the driver's 14243 * worker thread for the device, turning off device's interrupt and DMA, 14244 * and bring the device offline. Note that as the driver implements the 14245 * minimum PM requirements to a power-aware driver's PM support for the 14246 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14247 * to the suspend() method call will be treated as SUSPEND and the driver will 14248 * fully reinitialize its device during resume() method call, the driver will 14249 * set device to PCI_D3hot state in PCI config space instead of setting it 14250 * according to the @msg provided by the PM. 14251 * 14252 * Return code 14253 * 0 - driver suspended the device 14254 * Error otherwise 14255 **/ 14256 static int __maybe_unused 14257 lpfc_pci_suspend_one_s3(struct device *dev_d) 14258 { 14259 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14260 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14261 14262 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14263 "0473 PCI device Power Management suspend.\n"); 14264 14265 /* Bring down the device */ 14266 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14267 lpfc_offline(phba); 14268 kthread_stop(phba->worker_thread); 14269 14270 /* Disable interrupt from device */ 14271 lpfc_sli_disable_intr(phba); 14272 14273 return 0; 14274 } 14275 14276 /** 14277 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14278 * @dev_d: pointer to device 14279 * 14280 * This routine is to be called from the kernel's PCI subsystem to support 14281 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14282 * invokes this method, it restores the device's PCI config space state and 14283 * fully reinitializes the device and brings it online. Note that as the 14284 * driver implements the minimum PM requirements to a power-aware driver's 14285 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14286 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14287 * driver will fully reinitialize its device during resume() method call, 14288 * the device will be set to PCI_D0 directly in PCI config space before 14289 * restoring the state. 14290 * 14291 * Return code 14292 * 0 - driver suspended the device 14293 * Error otherwise 14294 **/ 14295 static int __maybe_unused 14296 lpfc_pci_resume_one_s3(struct device *dev_d) 14297 { 14298 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14299 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14300 uint32_t intr_mode; 14301 int error; 14302 14303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14304 "0452 PCI device Power Management resume.\n"); 14305 14306 /* Startup the kernel thread for this host adapter. */ 14307 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14308 "lpfc_worker_%d", phba->brd_no); 14309 if (IS_ERR(phba->worker_thread)) { 14310 error = PTR_ERR(phba->worker_thread); 14311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14312 "0434 PM resume failed to start worker " 14313 "thread: error=x%x.\n", error); 14314 return error; 14315 } 14316 14317 /* Init cpu_map array */ 14318 lpfc_cpu_map_array_init(phba); 14319 /* Init hba_eq_hdl array */ 14320 lpfc_hba_eq_hdl_array_init(phba); 14321 /* Configure and enable interrupt */ 14322 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14323 if (intr_mode == LPFC_INTR_ERROR) { 14324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14325 "0430 PM resume Failed to enable interrupt\n"); 14326 return -EIO; 14327 } else 14328 phba->intr_mode = intr_mode; 14329 14330 /* Restart HBA and bring it online */ 14331 lpfc_sli_brdrestart(phba); 14332 lpfc_online(phba); 14333 14334 /* Log the current active interrupt mode */ 14335 lpfc_log_intr_mode(phba, phba->intr_mode); 14336 14337 return 0; 14338 } 14339 14340 /** 14341 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14342 * @phba: pointer to lpfc hba data structure. 14343 * 14344 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14345 * aborts all the outstanding SCSI I/Os to the pci device. 14346 **/ 14347 static void 14348 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14349 { 14350 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14351 "2723 PCI channel I/O abort preparing for recovery\n"); 14352 14353 /* 14354 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14355 * and let the SCSI mid-layer to retry them to recover. 14356 */ 14357 lpfc_sli_abort_fcp_rings(phba); 14358 } 14359 14360 /** 14361 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14362 * @phba: pointer to lpfc hba data structure. 14363 * 14364 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14365 * disables the device interrupt and pci device, and aborts the internal FCP 14366 * pending I/Os. 14367 **/ 14368 static void 14369 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14370 { 14371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14372 "2710 PCI channel disable preparing for reset\n"); 14373 14374 /* Block any management I/Os to the device */ 14375 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14376 14377 /* Block all SCSI devices' I/Os on the host */ 14378 lpfc_scsi_dev_block(phba); 14379 14380 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14381 lpfc_sli_flush_io_rings(phba); 14382 14383 /* stop all timers */ 14384 lpfc_stop_hba_timers(phba); 14385 14386 /* Disable interrupt and pci device */ 14387 lpfc_sli_disable_intr(phba); 14388 pci_disable_device(phba->pcidev); 14389 } 14390 14391 /** 14392 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14393 * @phba: pointer to lpfc hba data structure. 14394 * 14395 * This routine is called to prepare the SLI3 device for PCI slot permanently 14396 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14397 * pending I/Os. 14398 **/ 14399 static void 14400 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14401 { 14402 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14403 "2711 PCI channel permanent disable for failure\n"); 14404 /* Block all SCSI devices' I/Os on the host */ 14405 lpfc_scsi_dev_block(phba); 14406 lpfc_sli4_prep_dev_for_reset(phba); 14407 14408 /* stop all timers */ 14409 lpfc_stop_hba_timers(phba); 14410 14411 /* Clean up all driver's outstanding SCSI I/Os */ 14412 lpfc_sli_flush_io_rings(phba); 14413 } 14414 14415 /** 14416 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14417 * @pdev: pointer to PCI device. 14418 * @state: the current PCI connection state. 14419 * 14420 * This routine is called from the PCI subsystem for I/O error handling to 14421 * device with SLI-3 interface spec. This function is called by the PCI 14422 * subsystem after a PCI bus error affecting this device has been detected. 14423 * When this function is invoked, it will need to stop all the I/Os and 14424 * interrupt(s) to the device. Once that is done, it will return 14425 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14426 * as desired. 14427 * 14428 * Return codes 14429 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14430 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14431 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14432 **/ 14433 static pci_ers_result_t 14434 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14435 { 14436 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14437 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14438 14439 switch (state) { 14440 case pci_channel_io_normal: 14441 /* Non-fatal error, prepare for recovery */ 14442 lpfc_sli_prep_dev_for_recover(phba); 14443 return PCI_ERS_RESULT_CAN_RECOVER; 14444 case pci_channel_io_frozen: 14445 /* Fatal error, prepare for slot reset */ 14446 lpfc_sli_prep_dev_for_reset(phba); 14447 return PCI_ERS_RESULT_NEED_RESET; 14448 case pci_channel_io_perm_failure: 14449 /* Permanent failure, prepare for device down */ 14450 lpfc_sli_prep_dev_for_perm_failure(phba); 14451 return PCI_ERS_RESULT_DISCONNECT; 14452 default: 14453 /* Unknown state, prepare and request slot reset */ 14454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14455 "0472 Unknown PCI error state: x%x\n", state); 14456 lpfc_sli_prep_dev_for_reset(phba); 14457 return PCI_ERS_RESULT_NEED_RESET; 14458 } 14459 } 14460 14461 /** 14462 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14463 * @pdev: pointer to PCI device. 14464 * 14465 * This routine is called from the PCI subsystem for error handling to 14466 * device with SLI-3 interface spec. This is called after PCI bus has been 14467 * reset to restart the PCI card from scratch, as if from a cold-boot. 14468 * During the PCI subsystem error recovery, after driver returns 14469 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14470 * recovery and then call this routine before calling the .resume method 14471 * to recover the device. This function will initialize the HBA device, 14472 * enable the interrupt, but it will just put the HBA to offline state 14473 * without passing any I/O traffic. 14474 * 14475 * Return codes 14476 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14477 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14478 */ 14479 static pci_ers_result_t 14480 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14481 { 14482 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14483 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14484 struct lpfc_sli *psli = &phba->sli; 14485 uint32_t intr_mode; 14486 14487 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14488 if (pci_enable_device_mem(pdev)) { 14489 printk(KERN_ERR "lpfc: Cannot re-enable " 14490 "PCI device after reset.\n"); 14491 return PCI_ERS_RESULT_DISCONNECT; 14492 } 14493 14494 pci_restore_state(pdev); 14495 14496 /* 14497 * As the new kernel behavior of pci_restore_state() API call clears 14498 * device saved_state flag, need to save the restored state again. 14499 */ 14500 pci_save_state(pdev); 14501 14502 if (pdev->is_busmaster) 14503 pci_set_master(pdev); 14504 14505 spin_lock_irq(&phba->hbalock); 14506 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14507 spin_unlock_irq(&phba->hbalock); 14508 14509 /* Configure and enable interrupt */ 14510 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14511 if (intr_mode == LPFC_INTR_ERROR) { 14512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14513 "0427 Cannot re-enable interrupt after " 14514 "slot reset.\n"); 14515 return PCI_ERS_RESULT_DISCONNECT; 14516 } else 14517 phba->intr_mode = intr_mode; 14518 14519 /* Take device offline, it will perform cleanup */ 14520 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14521 lpfc_offline(phba); 14522 lpfc_sli_brdrestart(phba); 14523 14524 /* Log the current active interrupt mode */ 14525 lpfc_log_intr_mode(phba, phba->intr_mode); 14526 14527 return PCI_ERS_RESULT_RECOVERED; 14528 } 14529 14530 /** 14531 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14532 * @pdev: pointer to PCI device 14533 * 14534 * This routine is called from the PCI subsystem for error handling to device 14535 * with SLI-3 interface spec. It is called when kernel error recovery tells 14536 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14537 * error recovery. After this call, traffic can start to flow from this device 14538 * again. 14539 */ 14540 static void 14541 lpfc_io_resume_s3(struct pci_dev *pdev) 14542 { 14543 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14544 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14545 14546 /* Bring device online, it will be no-op for non-fatal error resume */ 14547 lpfc_online(phba); 14548 } 14549 14550 /** 14551 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14552 * @phba: pointer to lpfc hba data structure. 14553 * 14554 * returns the number of ELS/CT IOCBs to reserve 14555 **/ 14556 int 14557 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14558 { 14559 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14560 14561 if (phba->sli_rev == LPFC_SLI_REV4) { 14562 if (max_xri <= 100) 14563 return 10; 14564 else if (max_xri <= 256) 14565 return 25; 14566 else if (max_xri <= 512) 14567 return 50; 14568 else if (max_xri <= 1024) 14569 return 100; 14570 else if (max_xri <= 1536) 14571 return 150; 14572 else if (max_xri <= 2048) 14573 return 200; 14574 else 14575 return 250; 14576 } else 14577 return 0; 14578 } 14579 14580 /** 14581 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14582 * @phba: pointer to lpfc hba data structure. 14583 * 14584 * returns the number of ELS/CT + NVMET IOCBs to reserve 14585 **/ 14586 int 14587 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14588 { 14589 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14590 14591 if (phba->nvmet_support) 14592 max_xri += LPFC_NVMET_BUF_POST; 14593 return max_xri; 14594 } 14595 14596 14597 static int 14598 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14599 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14600 const struct firmware *fw) 14601 { 14602 int rc; 14603 u8 sli_family; 14604 14605 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14606 /* Three cases: (1) FW was not supported on the detected adapter. 14607 * (2) FW update has been locked out administratively. 14608 * (3) Some other error during FW update. 14609 * In each case, an unmaskable message is written to the console 14610 * for admin diagnosis. 14611 */ 14612 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14613 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14614 magic_number != MAGIC_NUMBER_G6) || 14615 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14616 magic_number != MAGIC_NUMBER_G7) || 14617 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14618 magic_number != MAGIC_NUMBER_G7P)) { 14619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14620 "3030 This firmware version is not supported on" 14621 " this HBA model. Device:%x Magic:%x Type:%x " 14622 "ID:%x Size %d %zd\n", 14623 phba->pcidev->device, magic_number, ftype, fid, 14624 fsize, fw->size); 14625 rc = -EINVAL; 14626 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14627 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14628 "3021 Firmware downloads have been prohibited " 14629 "by a system configuration setting on " 14630 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14631 "%zd\n", 14632 phba->pcidev->device, magic_number, ftype, fid, 14633 fsize, fw->size); 14634 rc = -EACCES; 14635 } else { 14636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14637 "3022 FW Download failed. Add Status x%x " 14638 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14639 "%zd\n", 14640 offset, phba->pcidev->device, magic_number, 14641 ftype, fid, fsize, fw->size); 14642 rc = -EIO; 14643 } 14644 return rc; 14645 } 14646 14647 /** 14648 * lpfc_write_firmware - attempt to write a firmware image to the port 14649 * @fw: pointer to firmware image returned from request_firmware. 14650 * @context: pointer to firmware image returned from request_firmware. 14651 * 14652 **/ 14653 static void 14654 lpfc_write_firmware(const struct firmware *fw, void *context) 14655 { 14656 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14657 char fwrev[FW_REV_STR_SIZE]; 14658 struct lpfc_grp_hdr *image; 14659 struct list_head dma_buffer_list; 14660 int i, rc = 0; 14661 struct lpfc_dmabuf *dmabuf, *next; 14662 uint32_t offset = 0, temp_offset = 0; 14663 uint32_t magic_number, ftype, fid, fsize; 14664 14665 /* It can be null in no-wait mode, sanity check */ 14666 if (!fw) { 14667 rc = -ENXIO; 14668 goto out; 14669 } 14670 image = (struct lpfc_grp_hdr *)fw->data; 14671 14672 magic_number = be32_to_cpu(image->magic_number); 14673 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14674 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14675 fsize = be32_to_cpu(image->size); 14676 14677 INIT_LIST_HEAD(&dma_buffer_list); 14678 lpfc_decode_firmware_rev(phba, fwrev, 1); 14679 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14681 "3023 Updating Firmware, Current Version:%s " 14682 "New Version:%s\n", 14683 fwrev, image->revision); 14684 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14685 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14686 GFP_KERNEL); 14687 if (!dmabuf) { 14688 rc = -ENOMEM; 14689 goto release_out; 14690 } 14691 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14692 SLI4_PAGE_SIZE, 14693 &dmabuf->phys, 14694 GFP_KERNEL); 14695 if (!dmabuf->virt) { 14696 kfree(dmabuf); 14697 rc = -ENOMEM; 14698 goto release_out; 14699 } 14700 list_add_tail(&dmabuf->list, &dma_buffer_list); 14701 } 14702 while (offset < fw->size) { 14703 temp_offset = offset; 14704 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14705 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14706 memcpy(dmabuf->virt, 14707 fw->data + temp_offset, 14708 fw->size - temp_offset); 14709 temp_offset = fw->size; 14710 break; 14711 } 14712 memcpy(dmabuf->virt, fw->data + temp_offset, 14713 SLI4_PAGE_SIZE); 14714 temp_offset += SLI4_PAGE_SIZE; 14715 } 14716 rc = lpfc_wr_object(phba, &dma_buffer_list, 14717 (fw->size - offset), &offset); 14718 if (rc) { 14719 rc = lpfc_log_write_firmware_error(phba, offset, 14720 magic_number, 14721 ftype, 14722 fid, 14723 fsize, 14724 fw); 14725 goto release_out; 14726 } 14727 } 14728 rc = offset; 14729 } else 14730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14731 "3029 Skipped Firmware update, Current " 14732 "Version:%s New Version:%s\n", 14733 fwrev, image->revision); 14734 14735 release_out: 14736 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14737 list_del(&dmabuf->list); 14738 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14739 dmabuf->virt, dmabuf->phys); 14740 kfree(dmabuf); 14741 } 14742 release_firmware(fw); 14743 out: 14744 if (rc < 0) 14745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14746 "3062 Firmware update error, status %d.\n", rc); 14747 else 14748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14749 "3024 Firmware update success: size %d.\n", rc); 14750 } 14751 14752 /** 14753 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14754 * @phba: pointer to lpfc hba data structure. 14755 * @fw_upgrade: which firmware to update. 14756 * 14757 * This routine is called to perform Linux generic firmware upgrade on device 14758 * that supports such feature. 14759 **/ 14760 int 14761 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14762 { 14763 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14764 int ret; 14765 const struct firmware *fw; 14766 14767 /* Only supported on SLI4 interface type 2 for now */ 14768 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14769 LPFC_SLI_INTF_IF_TYPE_2) 14770 return -EPERM; 14771 14772 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14773 14774 if (fw_upgrade == INT_FW_UPGRADE) { 14775 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14776 file_name, &phba->pcidev->dev, 14777 GFP_KERNEL, (void *)phba, 14778 lpfc_write_firmware); 14779 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14780 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14781 if (!ret) 14782 lpfc_write_firmware(fw, (void *)phba); 14783 } else { 14784 ret = -EINVAL; 14785 } 14786 14787 return ret; 14788 } 14789 14790 /** 14791 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14792 * @pdev: pointer to PCI device 14793 * @pid: pointer to PCI device identifier 14794 * 14795 * This routine is called from the kernel's PCI subsystem to device with 14796 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14797 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14798 * information of the device and driver to see if the driver state that it 14799 * can support this kind of device. If the match is successful, the driver 14800 * core invokes this routine. If this routine determines it can claim the HBA, 14801 * it does all the initialization that it needs to do to handle the HBA 14802 * properly. 14803 * 14804 * Return code 14805 * 0 - driver can claim the device 14806 * negative value - driver can not claim the device 14807 **/ 14808 static int 14809 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14810 { 14811 struct lpfc_hba *phba; 14812 struct lpfc_vport *vport = NULL; 14813 struct Scsi_Host *shost = NULL; 14814 int error; 14815 uint32_t cfg_mode, intr_mode; 14816 14817 /* Allocate memory for HBA structure */ 14818 phba = lpfc_hba_alloc(pdev); 14819 if (!phba) 14820 return -ENOMEM; 14821 14822 INIT_LIST_HEAD(&phba->poll_list); 14823 14824 /* Perform generic PCI device enabling operation */ 14825 error = lpfc_enable_pci_dev(phba); 14826 if (error) 14827 goto out_free_phba; 14828 14829 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14830 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14831 if (error) 14832 goto out_disable_pci_dev; 14833 14834 /* Set up SLI-4 specific device PCI memory space */ 14835 error = lpfc_sli4_pci_mem_setup(phba); 14836 if (error) { 14837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14838 "1410 Failed to set up pci memory space.\n"); 14839 goto out_disable_pci_dev; 14840 } 14841 14842 /* Set up SLI-4 Specific device driver resources */ 14843 error = lpfc_sli4_driver_resource_setup(phba); 14844 if (error) { 14845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14846 "1412 Failed to set up driver resource.\n"); 14847 goto out_unset_pci_mem_s4; 14848 } 14849 14850 INIT_LIST_HEAD(&phba->active_rrq_list); 14851 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14852 14853 /* Set up common device driver resources */ 14854 error = lpfc_setup_driver_resource_phase2(phba); 14855 if (error) { 14856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14857 "1414 Failed to set up driver resource.\n"); 14858 goto out_unset_driver_resource_s4; 14859 } 14860 14861 /* Get the default values for Model Name and Description */ 14862 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14863 14864 /* Now, trying to enable interrupt and bring up the device */ 14865 cfg_mode = phba->cfg_use_msi; 14866 14867 /* Put device to a known state before enabling interrupt */ 14868 phba->pport = NULL; 14869 lpfc_stop_port(phba); 14870 14871 /* Init cpu_map array */ 14872 lpfc_cpu_map_array_init(phba); 14873 14874 /* Init hba_eq_hdl array */ 14875 lpfc_hba_eq_hdl_array_init(phba); 14876 14877 /* Configure and enable interrupt */ 14878 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14879 if (intr_mode == LPFC_INTR_ERROR) { 14880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14881 "0426 Failed to enable interrupt.\n"); 14882 error = -ENODEV; 14883 goto out_unset_driver_resource; 14884 } 14885 /* Default to single EQ for non-MSI-X */ 14886 if (phba->intr_type != MSIX) { 14887 phba->cfg_irq_chann = 1; 14888 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14889 if (phba->nvmet_support) 14890 phba->cfg_nvmet_mrq = 1; 14891 } 14892 } 14893 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14894 14895 /* Create SCSI host to the physical port */ 14896 error = lpfc_create_shost(phba); 14897 if (error) { 14898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14899 "1415 Failed to create scsi host.\n"); 14900 goto out_disable_intr; 14901 } 14902 vport = phba->pport; 14903 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14904 14905 /* Configure sysfs attributes */ 14906 error = lpfc_alloc_sysfs_attr(vport); 14907 if (error) { 14908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14909 "1416 Failed to allocate sysfs attr\n"); 14910 goto out_destroy_shost; 14911 } 14912 14913 /* Set up SLI-4 HBA */ 14914 if (lpfc_sli4_hba_setup(phba)) { 14915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14916 "1421 Failed to set up hba\n"); 14917 error = -ENODEV; 14918 goto out_free_sysfs_attr; 14919 } 14920 14921 /* Log the current active interrupt mode */ 14922 phba->intr_mode = intr_mode; 14923 lpfc_log_intr_mode(phba, intr_mode); 14924 14925 /* Perform post initialization setup */ 14926 lpfc_post_init_setup(phba); 14927 14928 /* NVME support in FW earlier in the driver load corrects the 14929 * FC4 type making a check for nvme_support unnecessary. 14930 */ 14931 if (phba->nvmet_support == 0) { 14932 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14933 /* Create NVME binding with nvme_fc_transport. This 14934 * ensures the vport is initialized. If the localport 14935 * create fails, it should not unload the driver to 14936 * support field issues. 14937 */ 14938 error = lpfc_nvme_create_localport(vport); 14939 if (error) { 14940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14941 "6004 NVME registration " 14942 "failed, error x%x\n", 14943 error); 14944 } 14945 } 14946 } 14947 14948 /* check for firmware upgrade or downgrade */ 14949 if (phba->cfg_request_firmware_upgrade) 14950 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14951 14952 /* Check if there are static vports to be created. */ 14953 lpfc_create_static_vport(phba); 14954 14955 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14956 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14957 14958 return 0; 14959 14960 out_free_sysfs_attr: 14961 lpfc_free_sysfs_attr(vport); 14962 out_destroy_shost: 14963 lpfc_destroy_shost(phba); 14964 out_disable_intr: 14965 lpfc_sli4_disable_intr(phba); 14966 out_unset_driver_resource: 14967 lpfc_unset_driver_resource_phase2(phba); 14968 out_unset_driver_resource_s4: 14969 lpfc_sli4_driver_resource_unset(phba); 14970 out_unset_pci_mem_s4: 14971 lpfc_sli4_pci_mem_unset(phba); 14972 out_disable_pci_dev: 14973 lpfc_disable_pci_dev(phba); 14974 if (shost) 14975 scsi_host_put(shost); 14976 out_free_phba: 14977 lpfc_hba_free(phba); 14978 return error; 14979 } 14980 14981 /** 14982 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14983 * @pdev: pointer to PCI device 14984 * 14985 * This routine is called from the kernel's PCI subsystem to device with 14986 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14987 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14988 * device to be removed from the PCI subsystem properly. 14989 **/ 14990 static void 14991 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14992 { 14993 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14994 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14995 struct lpfc_vport **vports; 14996 struct lpfc_hba *phba = vport->phba; 14997 int i; 14998 14999 /* Mark the device unloading flag */ 15000 spin_lock_irq(&phba->hbalock); 15001 vport->load_flag |= FC_UNLOADING; 15002 spin_unlock_irq(&phba->hbalock); 15003 if (phba->cgn_i) 15004 lpfc_unreg_congestion_buf(phba); 15005 15006 lpfc_free_sysfs_attr(vport); 15007 15008 /* Release all the vports against this physical port */ 15009 vports = lpfc_create_vport_work_array(phba); 15010 if (vports != NULL) 15011 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 15012 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 15013 continue; 15014 fc_vport_terminate(vports[i]->fc_vport); 15015 } 15016 lpfc_destroy_vport_work_array(phba, vports); 15017 15018 /* Remove FC host with the physical port */ 15019 fc_remove_host(shost); 15020 scsi_remove_host(shost); 15021 15022 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 15023 * localports are destroyed after to cleanup all transport memory. 15024 */ 15025 lpfc_cleanup(vport); 15026 lpfc_nvmet_destroy_targetport(phba); 15027 lpfc_nvme_destroy_localport(vport); 15028 15029 /* De-allocate multi-XRI pools */ 15030 if (phba->cfg_xri_rebalancing) 15031 lpfc_destroy_multixri_pools(phba); 15032 15033 /* 15034 * Bring down the SLI Layer. This step disables all interrupts, 15035 * clears the rings, discards all mailbox commands, and resets 15036 * the HBA FCoE function. 15037 */ 15038 lpfc_debugfs_terminate(vport); 15039 15040 lpfc_stop_hba_timers(phba); 15041 spin_lock_irq(&phba->port_list_lock); 15042 list_del_init(&vport->listentry); 15043 spin_unlock_irq(&phba->port_list_lock); 15044 15045 /* Perform scsi free before driver resource_unset since scsi 15046 * buffers are released to their corresponding pools here. 15047 */ 15048 lpfc_io_free(phba); 15049 lpfc_free_iocb_list(phba); 15050 lpfc_sli4_hba_unset(phba); 15051 15052 lpfc_unset_driver_resource_phase2(phba); 15053 lpfc_sli4_driver_resource_unset(phba); 15054 15055 /* Unmap adapter Control and Doorbell registers */ 15056 lpfc_sli4_pci_mem_unset(phba); 15057 15058 /* Release PCI resources and disable device's PCI function */ 15059 scsi_host_put(shost); 15060 lpfc_disable_pci_dev(phba); 15061 15062 /* Finally, free the driver's device data structure */ 15063 lpfc_hba_free(phba); 15064 15065 return; 15066 } 15067 15068 /** 15069 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 15070 * @dev_d: pointer to device 15071 * 15072 * This routine is called from the kernel's PCI subsystem to support system 15073 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 15074 * this method, it quiesces the device by stopping the driver's worker 15075 * thread for the device, turning off device's interrupt and DMA, and bring 15076 * the device offline. Note that as the driver implements the minimum PM 15077 * requirements to a power-aware driver's PM support for suspend/resume -- all 15078 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 15079 * method call will be treated as SUSPEND and the driver will fully 15080 * reinitialize its device during resume() method call, the driver will set 15081 * device to PCI_D3hot state in PCI config space instead of setting it 15082 * according to the @msg provided by the PM. 15083 * 15084 * Return code 15085 * 0 - driver suspended the device 15086 * Error otherwise 15087 **/ 15088 static int __maybe_unused 15089 lpfc_pci_suspend_one_s4(struct device *dev_d) 15090 { 15091 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15092 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15093 15094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15095 "2843 PCI device Power Management suspend.\n"); 15096 15097 /* Bring down the device */ 15098 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15099 lpfc_offline(phba); 15100 kthread_stop(phba->worker_thread); 15101 15102 /* Disable interrupt from device */ 15103 lpfc_sli4_disable_intr(phba); 15104 lpfc_sli4_queue_destroy(phba); 15105 15106 return 0; 15107 } 15108 15109 /** 15110 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 15111 * @dev_d: pointer to device 15112 * 15113 * This routine is called from the kernel's PCI subsystem to support system 15114 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 15115 * this method, it restores the device's PCI config space state and fully 15116 * reinitializes the device and brings it online. Note that as the driver 15117 * implements the minimum PM requirements to a power-aware driver's PM for 15118 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 15119 * to the suspend() method call will be treated as SUSPEND and the driver 15120 * will fully reinitialize its device during resume() method call, the device 15121 * will be set to PCI_D0 directly in PCI config space before restoring the 15122 * state. 15123 * 15124 * Return code 15125 * 0 - driver suspended the device 15126 * Error otherwise 15127 **/ 15128 static int __maybe_unused 15129 lpfc_pci_resume_one_s4(struct device *dev_d) 15130 { 15131 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15132 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15133 uint32_t intr_mode; 15134 int error; 15135 15136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15137 "0292 PCI device Power Management resume.\n"); 15138 15139 /* Startup the kernel thread for this host adapter. */ 15140 phba->worker_thread = kthread_run(lpfc_do_work, phba, 15141 "lpfc_worker_%d", phba->brd_no); 15142 if (IS_ERR(phba->worker_thread)) { 15143 error = PTR_ERR(phba->worker_thread); 15144 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15145 "0293 PM resume failed to start worker " 15146 "thread: error=x%x.\n", error); 15147 return error; 15148 } 15149 15150 /* Configure and enable interrupt */ 15151 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15152 if (intr_mode == LPFC_INTR_ERROR) { 15153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15154 "0294 PM resume Failed to enable interrupt\n"); 15155 return -EIO; 15156 } else 15157 phba->intr_mode = intr_mode; 15158 15159 /* Restart HBA and bring it online */ 15160 lpfc_sli_brdrestart(phba); 15161 lpfc_online(phba); 15162 15163 /* Log the current active interrupt mode */ 15164 lpfc_log_intr_mode(phba, phba->intr_mode); 15165 15166 return 0; 15167 } 15168 15169 /** 15170 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15171 * @phba: pointer to lpfc hba data structure. 15172 * 15173 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15174 * aborts all the outstanding SCSI I/Os to the pci device. 15175 **/ 15176 static void 15177 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15178 { 15179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15180 "2828 PCI channel I/O abort preparing for recovery\n"); 15181 /* 15182 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15183 * and let the SCSI mid-layer to retry them to recover. 15184 */ 15185 lpfc_sli_abort_fcp_rings(phba); 15186 } 15187 15188 /** 15189 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15190 * @phba: pointer to lpfc hba data structure. 15191 * 15192 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15193 * disables the device interrupt and pci device, and aborts the internal FCP 15194 * pending I/Os. 15195 **/ 15196 static void 15197 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15198 { 15199 int offline = pci_channel_offline(phba->pcidev); 15200 15201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15202 "2826 PCI channel disable preparing for reset offline" 15203 " %d\n", offline); 15204 15205 /* Block any management I/Os to the device */ 15206 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15207 15208 15209 /* HBA_PCI_ERR was set in io_error_detect */ 15210 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 15211 /* Flush all driver's outstanding I/Os as we are to reset */ 15212 lpfc_sli_flush_io_rings(phba); 15213 lpfc_offline(phba); 15214 15215 /* stop all timers */ 15216 lpfc_stop_hba_timers(phba); 15217 15218 lpfc_sli4_queue_destroy(phba); 15219 /* Disable interrupt and pci device */ 15220 lpfc_sli4_disable_intr(phba); 15221 pci_disable_device(phba->pcidev); 15222 } 15223 15224 /** 15225 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15226 * @phba: pointer to lpfc hba data structure. 15227 * 15228 * This routine is called to prepare the SLI4 device for PCI slot permanently 15229 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15230 * pending I/Os. 15231 **/ 15232 static void 15233 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15234 { 15235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15236 "2827 PCI channel permanent disable for failure\n"); 15237 15238 /* Block all SCSI devices' I/Os on the host */ 15239 lpfc_scsi_dev_block(phba); 15240 15241 /* stop all timers */ 15242 lpfc_stop_hba_timers(phba); 15243 15244 /* Clean up all driver's outstanding I/Os */ 15245 lpfc_sli_flush_io_rings(phba); 15246 } 15247 15248 /** 15249 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15250 * @pdev: pointer to PCI device. 15251 * @state: the current PCI connection state. 15252 * 15253 * This routine is called from the PCI subsystem for error handling to device 15254 * with SLI-4 interface spec. This function is called by the PCI subsystem 15255 * after a PCI bus error affecting this device has been detected. When this 15256 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15257 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15258 * for the PCI subsystem to perform proper recovery as desired. 15259 * 15260 * Return codes 15261 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15262 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15263 **/ 15264 static pci_ers_result_t 15265 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15266 { 15267 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15268 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15269 bool hba_pci_err; 15270 15271 switch (state) { 15272 case pci_channel_io_normal: 15273 /* Non-fatal error, prepare for recovery */ 15274 lpfc_sli4_prep_dev_for_recover(phba); 15275 return PCI_ERS_RESULT_CAN_RECOVER; 15276 case pci_channel_io_frozen: 15277 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15278 /* Fatal error, prepare for slot reset */ 15279 if (!hba_pci_err) 15280 lpfc_sli4_prep_dev_for_reset(phba); 15281 else 15282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15283 "2832 Already handling PCI error " 15284 "state: x%x\n", state); 15285 return PCI_ERS_RESULT_NEED_RESET; 15286 case pci_channel_io_perm_failure: 15287 set_bit(HBA_PCI_ERR, &phba->bit_flags); 15288 /* Permanent failure, prepare for device down */ 15289 lpfc_sli4_prep_dev_for_perm_failure(phba); 15290 return PCI_ERS_RESULT_DISCONNECT; 15291 default: 15292 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15293 if (!hba_pci_err) 15294 lpfc_sli4_prep_dev_for_reset(phba); 15295 /* Unknown state, prepare and request slot reset */ 15296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15297 "2825 Unknown PCI error state: x%x\n", state); 15298 lpfc_sli4_prep_dev_for_reset(phba); 15299 return PCI_ERS_RESULT_NEED_RESET; 15300 } 15301 } 15302 15303 /** 15304 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15305 * @pdev: pointer to PCI device. 15306 * 15307 * This routine is called from the PCI subsystem for error handling to device 15308 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15309 * restart the PCI card from scratch, as if from a cold-boot. During the 15310 * PCI subsystem error recovery, after the driver returns 15311 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15312 * recovery and then call this routine before calling the .resume method to 15313 * recover the device. This function will initialize the HBA device, enable 15314 * the interrupt, but it will just put the HBA to offline state without 15315 * passing any I/O traffic. 15316 * 15317 * Return codes 15318 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15319 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15320 */ 15321 static pci_ers_result_t 15322 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15323 { 15324 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15325 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15326 struct lpfc_sli *psli = &phba->sli; 15327 uint32_t intr_mode; 15328 bool hba_pci_err; 15329 15330 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15331 if (pci_enable_device_mem(pdev)) { 15332 printk(KERN_ERR "lpfc: Cannot re-enable " 15333 "PCI device after reset.\n"); 15334 return PCI_ERS_RESULT_DISCONNECT; 15335 } 15336 15337 pci_restore_state(pdev); 15338 15339 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); 15340 if (!hba_pci_err) 15341 dev_info(&pdev->dev, 15342 "hba_pci_err was not set, recovering slot reset.\n"); 15343 /* 15344 * As the new kernel behavior of pci_restore_state() API call clears 15345 * device saved_state flag, need to save the restored state again. 15346 */ 15347 pci_save_state(pdev); 15348 15349 if (pdev->is_busmaster) 15350 pci_set_master(pdev); 15351 15352 spin_lock_irq(&phba->hbalock); 15353 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15354 spin_unlock_irq(&phba->hbalock); 15355 15356 /* Init cpu_map array */ 15357 lpfc_cpu_map_array_init(phba); 15358 /* Configure and enable interrupt */ 15359 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15360 if (intr_mode == LPFC_INTR_ERROR) { 15361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15362 "2824 Cannot re-enable interrupt after " 15363 "slot reset.\n"); 15364 return PCI_ERS_RESULT_DISCONNECT; 15365 } else 15366 phba->intr_mode = intr_mode; 15367 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15368 15369 /* Log the current active interrupt mode */ 15370 lpfc_log_intr_mode(phba, phba->intr_mode); 15371 15372 return PCI_ERS_RESULT_RECOVERED; 15373 } 15374 15375 /** 15376 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15377 * @pdev: pointer to PCI device 15378 * 15379 * This routine is called from the PCI subsystem for error handling to device 15380 * with SLI-4 interface spec. It is called when kernel error recovery tells 15381 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15382 * error recovery. After this call, traffic can start to flow from this device 15383 * again. 15384 **/ 15385 static void 15386 lpfc_io_resume_s4(struct pci_dev *pdev) 15387 { 15388 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15389 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15390 15391 /* 15392 * In case of slot reset, as function reset is performed through 15393 * mailbox command which needs DMA to be enabled, this operation 15394 * has to be moved to the io resume phase. Taking device offline 15395 * will perform the necessary cleanup. 15396 */ 15397 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15398 /* Perform device reset */ 15399 lpfc_sli_brdrestart(phba); 15400 /* Bring the device back online */ 15401 lpfc_online(phba); 15402 } 15403 } 15404 15405 /** 15406 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15407 * @pdev: pointer to PCI device 15408 * @pid: pointer to PCI device identifier 15409 * 15410 * This routine is to be registered to the kernel's PCI subsystem. When an 15411 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15412 * at PCI device-specific information of the device and driver to see if the 15413 * driver state that it can support this kind of device. If the match is 15414 * successful, the driver core invokes this routine. This routine dispatches 15415 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15416 * do all the initialization that it needs to do to handle the HBA device 15417 * properly. 15418 * 15419 * Return code 15420 * 0 - driver can claim the device 15421 * negative value - driver can not claim the device 15422 **/ 15423 static int 15424 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15425 { 15426 int rc; 15427 struct lpfc_sli_intf intf; 15428 15429 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15430 return -ENODEV; 15431 15432 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15433 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15434 rc = lpfc_pci_probe_one_s4(pdev, pid); 15435 else 15436 rc = lpfc_pci_probe_one_s3(pdev, pid); 15437 15438 return rc; 15439 } 15440 15441 /** 15442 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15443 * @pdev: pointer to PCI device 15444 * 15445 * This routine is to be registered to the kernel's PCI subsystem. When an 15446 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15447 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15448 * remove routine, which will perform all the necessary cleanup for the 15449 * device to be removed from the PCI subsystem properly. 15450 **/ 15451 static void 15452 lpfc_pci_remove_one(struct pci_dev *pdev) 15453 { 15454 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15455 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15456 15457 switch (phba->pci_dev_grp) { 15458 case LPFC_PCI_DEV_LP: 15459 lpfc_pci_remove_one_s3(pdev); 15460 break; 15461 case LPFC_PCI_DEV_OC: 15462 lpfc_pci_remove_one_s4(pdev); 15463 break; 15464 default: 15465 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15466 "1424 Invalid PCI device group: 0x%x\n", 15467 phba->pci_dev_grp); 15468 break; 15469 } 15470 return; 15471 } 15472 15473 /** 15474 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15475 * @dev: pointer to device 15476 * 15477 * This routine is to be registered to the kernel's PCI subsystem to support 15478 * system Power Management (PM). When PM invokes this method, it dispatches 15479 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15480 * suspend the device. 15481 * 15482 * Return code 15483 * 0 - driver suspended the device 15484 * Error otherwise 15485 **/ 15486 static int __maybe_unused 15487 lpfc_pci_suspend_one(struct device *dev) 15488 { 15489 struct Scsi_Host *shost = dev_get_drvdata(dev); 15490 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15491 int rc = -ENODEV; 15492 15493 switch (phba->pci_dev_grp) { 15494 case LPFC_PCI_DEV_LP: 15495 rc = lpfc_pci_suspend_one_s3(dev); 15496 break; 15497 case LPFC_PCI_DEV_OC: 15498 rc = lpfc_pci_suspend_one_s4(dev); 15499 break; 15500 default: 15501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15502 "1425 Invalid PCI device group: 0x%x\n", 15503 phba->pci_dev_grp); 15504 break; 15505 } 15506 return rc; 15507 } 15508 15509 /** 15510 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15511 * @dev: pointer to device 15512 * 15513 * This routine is to be registered to the kernel's PCI subsystem to support 15514 * system Power Management (PM). When PM invokes this method, it dispatches 15515 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15516 * resume the device. 15517 * 15518 * Return code 15519 * 0 - driver suspended the device 15520 * Error otherwise 15521 **/ 15522 static int __maybe_unused 15523 lpfc_pci_resume_one(struct device *dev) 15524 { 15525 struct Scsi_Host *shost = dev_get_drvdata(dev); 15526 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15527 int rc = -ENODEV; 15528 15529 switch (phba->pci_dev_grp) { 15530 case LPFC_PCI_DEV_LP: 15531 rc = lpfc_pci_resume_one_s3(dev); 15532 break; 15533 case LPFC_PCI_DEV_OC: 15534 rc = lpfc_pci_resume_one_s4(dev); 15535 break; 15536 default: 15537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15538 "1426 Invalid PCI device group: 0x%x\n", 15539 phba->pci_dev_grp); 15540 break; 15541 } 15542 return rc; 15543 } 15544 15545 /** 15546 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15547 * @pdev: pointer to PCI device. 15548 * @state: the current PCI connection state. 15549 * 15550 * This routine is registered to the PCI subsystem for error handling. This 15551 * function is called by the PCI subsystem after a PCI bus error affecting 15552 * this device has been detected. When this routine is invoked, it dispatches 15553 * the action to the proper SLI-3 or SLI-4 device error detected handling 15554 * routine, which will perform the proper error detected operation. 15555 * 15556 * Return codes 15557 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15558 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15559 **/ 15560 static pci_ers_result_t 15561 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15562 { 15563 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15564 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15565 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15566 15567 if (phba->link_state == LPFC_HBA_ERROR && 15568 phba->hba_flag & HBA_IOQ_FLUSH) 15569 return PCI_ERS_RESULT_NEED_RESET; 15570 15571 switch (phba->pci_dev_grp) { 15572 case LPFC_PCI_DEV_LP: 15573 rc = lpfc_io_error_detected_s3(pdev, state); 15574 break; 15575 case LPFC_PCI_DEV_OC: 15576 rc = lpfc_io_error_detected_s4(pdev, state); 15577 break; 15578 default: 15579 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15580 "1427 Invalid PCI device group: 0x%x\n", 15581 phba->pci_dev_grp); 15582 break; 15583 } 15584 return rc; 15585 } 15586 15587 /** 15588 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15589 * @pdev: pointer to PCI device. 15590 * 15591 * This routine is registered to the PCI subsystem for error handling. This 15592 * function is called after PCI bus has been reset to restart the PCI card 15593 * from scratch, as if from a cold-boot. When this routine is invoked, it 15594 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15595 * routine, which will perform the proper device reset. 15596 * 15597 * Return codes 15598 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15599 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15600 **/ 15601 static pci_ers_result_t 15602 lpfc_io_slot_reset(struct pci_dev *pdev) 15603 { 15604 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15605 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15606 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15607 15608 switch (phba->pci_dev_grp) { 15609 case LPFC_PCI_DEV_LP: 15610 rc = lpfc_io_slot_reset_s3(pdev); 15611 break; 15612 case LPFC_PCI_DEV_OC: 15613 rc = lpfc_io_slot_reset_s4(pdev); 15614 break; 15615 default: 15616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15617 "1428 Invalid PCI device group: 0x%x\n", 15618 phba->pci_dev_grp); 15619 break; 15620 } 15621 return rc; 15622 } 15623 15624 /** 15625 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15626 * @pdev: pointer to PCI device 15627 * 15628 * This routine is registered to the PCI subsystem for error handling. It 15629 * is called when kernel error recovery tells the lpfc driver that it is 15630 * OK to resume normal PCI operation after PCI bus error recovery. When 15631 * this routine is invoked, it dispatches the action to the proper SLI-3 15632 * or SLI-4 device io_resume routine, which will resume the device operation. 15633 **/ 15634 static void 15635 lpfc_io_resume(struct pci_dev *pdev) 15636 { 15637 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15638 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15639 15640 switch (phba->pci_dev_grp) { 15641 case LPFC_PCI_DEV_LP: 15642 lpfc_io_resume_s3(pdev); 15643 break; 15644 case LPFC_PCI_DEV_OC: 15645 lpfc_io_resume_s4(pdev); 15646 break; 15647 default: 15648 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15649 "1429 Invalid PCI device group: 0x%x\n", 15650 phba->pci_dev_grp); 15651 break; 15652 } 15653 return; 15654 } 15655 15656 /** 15657 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15658 * @phba: pointer to lpfc hba data structure. 15659 * 15660 * This routine checks to see if OAS is supported for this adapter. If 15661 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15662 * the enable oas flag is cleared and the pool created for OAS device data 15663 * is destroyed. 15664 * 15665 **/ 15666 static void 15667 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15668 { 15669 15670 if (!phba->cfg_EnableXLane) 15671 return; 15672 15673 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15674 phba->cfg_fof = 1; 15675 } else { 15676 phba->cfg_fof = 0; 15677 mempool_destroy(phba->device_data_mem_pool); 15678 phba->device_data_mem_pool = NULL; 15679 } 15680 15681 return; 15682 } 15683 15684 /** 15685 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15686 * @phba: pointer to lpfc hba data structure. 15687 * 15688 * This routine checks to see if RAS is supported by the adapter. Check the 15689 * function through which RAS support enablement is to be done. 15690 **/ 15691 void 15692 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15693 { 15694 /* if ASIC_GEN_NUM >= 0xC) */ 15695 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15696 LPFC_SLI_INTF_IF_TYPE_6) || 15697 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15698 LPFC_SLI_INTF_FAMILY_G6)) { 15699 phba->ras_fwlog.ras_hwsupport = true; 15700 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15701 phba->cfg_ras_fwlog_buffsize) 15702 phba->ras_fwlog.ras_enabled = true; 15703 else 15704 phba->ras_fwlog.ras_enabled = false; 15705 } else { 15706 phba->ras_fwlog.ras_hwsupport = false; 15707 } 15708 } 15709 15710 15711 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15712 15713 static const struct pci_error_handlers lpfc_err_handler = { 15714 .error_detected = lpfc_io_error_detected, 15715 .slot_reset = lpfc_io_slot_reset, 15716 .resume = lpfc_io_resume, 15717 }; 15718 15719 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15720 lpfc_pci_suspend_one, 15721 lpfc_pci_resume_one); 15722 15723 static struct pci_driver lpfc_driver = { 15724 .name = LPFC_DRIVER_NAME, 15725 .id_table = lpfc_id_table, 15726 .probe = lpfc_pci_probe_one, 15727 .remove = lpfc_pci_remove_one, 15728 .shutdown = lpfc_pci_remove_one, 15729 .driver.pm = &lpfc_pci_pm_ops_one, 15730 .err_handler = &lpfc_err_handler, 15731 }; 15732 15733 static const struct file_operations lpfc_mgmt_fop = { 15734 .owner = THIS_MODULE, 15735 }; 15736 15737 static struct miscdevice lpfc_mgmt_dev = { 15738 .minor = MISC_DYNAMIC_MINOR, 15739 .name = "lpfcmgmt", 15740 .fops = &lpfc_mgmt_fop, 15741 }; 15742 15743 /** 15744 * lpfc_init - lpfc module initialization routine 15745 * 15746 * This routine is to be invoked when the lpfc module is loaded into the 15747 * kernel. The special kernel macro module_init() is used to indicate the 15748 * role of this routine to the kernel as lpfc module entry point. 15749 * 15750 * Return codes 15751 * 0 - successful 15752 * -ENOMEM - FC attach transport failed 15753 * all others - failed 15754 */ 15755 static int __init 15756 lpfc_init(void) 15757 { 15758 int error = 0; 15759 15760 pr_info(LPFC_MODULE_DESC "\n"); 15761 pr_info(LPFC_COPYRIGHT "\n"); 15762 15763 error = misc_register(&lpfc_mgmt_dev); 15764 if (error) 15765 printk(KERN_ERR "Could not register lpfcmgmt device, " 15766 "misc_register returned with status %d", error); 15767 15768 error = -ENOMEM; 15769 lpfc_transport_functions.vport_create = lpfc_vport_create; 15770 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15771 lpfc_transport_template = 15772 fc_attach_transport(&lpfc_transport_functions); 15773 if (lpfc_transport_template == NULL) 15774 goto unregister; 15775 lpfc_vport_transport_template = 15776 fc_attach_transport(&lpfc_vport_transport_functions); 15777 if (lpfc_vport_transport_template == NULL) { 15778 fc_release_transport(lpfc_transport_template); 15779 goto unregister; 15780 } 15781 lpfc_wqe_cmd_template(); 15782 lpfc_nvmet_cmd_template(); 15783 15784 /* Initialize in case vector mapping is needed */ 15785 lpfc_present_cpu = num_present_cpus(); 15786 15787 lpfc_pldv_detect = false; 15788 15789 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15790 "lpfc/sli4:online", 15791 lpfc_cpu_online, lpfc_cpu_offline); 15792 if (error < 0) 15793 goto cpuhp_failure; 15794 lpfc_cpuhp_state = error; 15795 15796 error = pci_register_driver(&lpfc_driver); 15797 if (error) 15798 goto unwind; 15799 15800 return error; 15801 15802 unwind: 15803 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15804 cpuhp_failure: 15805 fc_release_transport(lpfc_transport_template); 15806 fc_release_transport(lpfc_vport_transport_template); 15807 unregister: 15808 misc_deregister(&lpfc_mgmt_dev); 15809 15810 return error; 15811 } 15812 15813 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15814 { 15815 unsigned int start_idx; 15816 unsigned int dbg_cnt; 15817 unsigned int temp_idx; 15818 int i; 15819 int j = 0; 15820 unsigned long rem_nsec; 15821 15822 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15823 return; 15824 15825 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15826 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15827 if (!dbg_cnt) 15828 goto out; 15829 temp_idx = start_idx; 15830 if (dbg_cnt >= DBG_LOG_SZ) { 15831 dbg_cnt = DBG_LOG_SZ; 15832 temp_idx -= 1; 15833 } else { 15834 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15835 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15836 } else { 15837 if (start_idx < dbg_cnt) 15838 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15839 else 15840 start_idx -= dbg_cnt; 15841 } 15842 } 15843 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15844 start_idx, temp_idx, dbg_cnt); 15845 15846 for (i = 0; i < dbg_cnt; i++) { 15847 if ((start_idx + i) < DBG_LOG_SZ) 15848 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15849 else 15850 temp_idx = j++; 15851 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15852 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15853 temp_idx, 15854 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15855 rem_nsec / 1000, 15856 phba->dbg_log[temp_idx].log); 15857 } 15858 out: 15859 atomic_set(&phba->dbg_log_cnt, 0); 15860 atomic_set(&phba->dbg_log_dmping, 0); 15861 } 15862 15863 __printf(2, 3) 15864 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15865 { 15866 unsigned int idx; 15867 va_list args; 15868 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15869 struct va_format vaf; 15870 15871 15872 va_start(args, fmt); 15873 if (unlikely(dbg_dmping)) { 15874 vaf.fmt = fmt; 15875 vaf.va = &args; 15876 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15877 va_end(args); 15878 return; 15879 } 15880 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15881 DBG_LOG_SZ; 15882 15883 atomic_inc(&phba->dbg_log_cnt); 15884 15885 vscnprintf(phba->dbg_log[idx].log, 15886 sizeof(phba->dbg_log[idx].log), fmt, args); 15887 va_end(args); 15888 15889 phba->dbg_log[idx].t_ns = local_clock(); 15890 } 15891 15892 /** 15893 * lpfc_exit - lpfc module removal routine 15894 * 15895 * This routine is invoked when the lpfc module is removed from the kernel. 15896 * The special kernel macro module_exit() is used to indicate the role of 15897 * this routine to the kernel as lpfc module exit point. 15898 */ 15899 static void __exit 15900 lpfc_exit(void) 15901 { 15902 misc_deregister(&lpfc_mgmt_dev); 15903 pci_unregister_driver(&lpfc_driver); 15904 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15905 fc_release_transport(lpfc_transport_template); 15906 fc_release_transport(lpfc_vport_transport_template); 15907 idr_destroy(&lpfc_hba_index); 15908 } 15909 15910 module_init(lpfc_init); 15911 module_exit(lpfc_exit); 15912 MODULE_LICENSE("GPL"); 15913 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15914 MODULE_AUTHOR("Broadcom"); 15915 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15916