1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include "lpfc_hw4.h" 54 #include "lpfc_hw.h" 55 #include "lpfc_sli.h" 56 #include "lpfc_sli4.h" 57 #include "lpfc_nl.h" 58 #include "lpfc_disc.h" 59 #include "lpfc.h" 60 #include "lpfc_scsi.h" 61 #include "lpfc_nvme.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 static enum cpuhp_state lpfc_cpuhp_state; 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 static uint32_t lpfc_present_cpu; 71 static bool lpfc_pldv_detect; 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 75 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 77 static int lpfc_post_rcv_buf(struct lpfc_hba *); 78 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 80 static int lpfc_setup_endian_order(struct lpfc_hba *); 81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 82 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 84 static void lpfc_init_sgl_list(struct lpfc_hba *); 85 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 86 static void lpfc_free_active_sgl(struct lpfc_hba *); 87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 92 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 102 static DEFINE_IDR(lpfc_hba_index); 103 #define LPFC_NVMET_BUF_POST 254 104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 105 106 /** 107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 108 * @phba: pointer to lpfc hba data structure. 109 * 110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 111 * mailbox command. It retrieves the revision information from the HBA and 112 * collects the Vital Product Data (VPD) about the HBA for preparing the 113 * configuration of the HBA. 114 * 115 * Return codes: 116 * 0 - success. 117 * -ERESTART - requests the SLI layer to reset the HBA and try again. 118 * Any other value - indicates an error. 119 **/ 120 int 121 lpfc_config_port_prep(struct lpfc_hba *phba) 122 { 123 lpfc_vpd_t *vp = &phba->vpd; 124 int i = 0, rc; 125 LPFC_MBOXQ_t *pmb; 126 MAILBOX_t *mb; 127 char *lpfc_vpd_data = NULL; 128 uint16_t offset = 0; 129 static char licensed[56] = 130 "key unlock for use with gnu public licensed code only\0"; 131 static int init_key = 1; 132 133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 134 if (!pmb) { 135 phba->link_state = LPFC_HBA_ERROR; 136 return -ENOMEM; 137 } 138 139 mb = &pmb->u.mb; 140 phba->link_state = LPFC_INIT_MBX_CMDS; 141 142 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 143 if (init_key) { 144 uint32_t *ptext = (uint32_t *) licensed; 145 146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 147 *ptext = cpu_to_be32(*ptext); 148 init_key = 0; 149 } 150 151 lpfc_read_nv(phba, pmb); 152 memset((char*)mb->un.varRDnvp.rsvd3, 0, 153 sizeof (mb->un.varRDnvp.rsvd3)); 154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 155 sizeof (licensed)); 156 157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 158 159 if (rc != MBX_SUCCESS) { 160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 161 "0324 Config Port initialization " 162 "error, mbxCmd x%x READ_NVPARM, " 163 "mbxStatus x%x\n", 164 mb->mbxCommand, mb->mbxStatus); 165 mempool_free(pmb, phba->mbox_mem_pool); 166 return -ERESTART; 167 } 168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 169 sizeof(phba->wwnn)); 170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 171 sizeof(phba->wwpn)); 172 } 173 174 /* 175 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 176 * which was already set in lpfc_get_cfgparam() 177 */ 178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 179 180 /* Setup and issue mailbox READ REV command */ 181 lpfc_read_rev(phba, pmb); 182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 183 if (rc != MBX_SUCCESS) { 184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 185 "0439 Adapter failed to init, mbxCmd x%x " 186 "READ_REV, mbxStatus x%x\n", 187 mb->mbxCommand, mb->mbxStatus); 188 mempool_free( pmb, phba->mbox_mem_pool); 189 return -ERESTART; 190 } 191 192 193 /* 194 * The value of rr must be 1 since the driver set the cv field to 1. 195 * This setting requires the FW to set all revision fields. 196 */ 197 if (mb->un.varRdRev.rr == 0) { 198 vp->rev.rBit = 0; 199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 200 "0440 Adapter failed to init, READ_REV has " 201 "missing revision information.\n"); 202 mempool_free(pmb, phba->mbox_mem_pool); 203 return -ERESTART; 204 } 205 206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 207 mempool_free(pmb, phba->mbox_mem_pool); 208 return -EINVAL; 209 } 210 211 /* Save information as VPD data */ 212 vp->rev.rBit = 1; 213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 218 vp->rev.biuRev = mb->un.varRdRev.biuRev; 219 vp->rev.smRev = mb->un.varRdRev.smRev; 220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 221 vp->rev.endecRev = mb->un.varRdRev.endecRev; 222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 228 229 /* If the sli feature level is less then 9, we must 230 * tear down all RPIs and VPIs on link down if NPIV 231 * is enabled. 232 */ 233 if (vp->rev.feaLevelHigh < 9) 234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 235 236 if (lpfc_is_LC_HBA(phba->pcidev->device)) 237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 238 sizeof (phba->RandomData)); 239 240 /* Get adapter VPD information */ 241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 242 if (!lpfc_vpd_data) 243 goto out_free_mbox; 244 do { 245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 247 248 if (rc != MBX_SUCCESS) { 249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 250 "0441 VPD not present on adapter, " 251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 252 mb->mbxCommand, mb->mbxStatus); 253 mb->un.varDmp.word_cnt = 0; 254 } 255 /* dump mem may return a zero when finished or we got a 256 * mailbox error, either way we are done. 257 */ 258 if (mb->un.varDmp.word_cnt == 0) 259 break; 260 261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 264 lpfc_vpd_data + offset, 265 mb->un.varDmp.word_cnt); 266 offset += mb->un.varDmp.word_cnt; 267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 268 269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 270 271 kfree(lpfc_vpd_data); 272 out_free_mbox: 273 mempool_free(pmb, phba->mbox_mem_pool); 274 return 0; 275 } 276 277 /** 278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 279 * @phba: pointer to lpfc hba data structure. 280 * @pmboxq: pointer to the driver internal queue element for mailbox command. 281 * 282 * This is the completion handler for driver's configuring asynchronous event 283 * mailbox command to the device. If the mailbox command returns successfully, 284 * it will set internal async event support flag to 1; otherwise, it will 285 * set internal async event support flag to 0. 286 **/ 287 static void 288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 289 { 290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 291 phba->temp_sensor_support = 1; 292 else 293 phba->temp_sensor_support = 0; 294 mempool_free(pmboxq, phba->mbox_mem_pool); 295 return; 296 } 297 298 /** 299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 300 * @phba: pointer to lpfc hba data structure. 301 * @pmboxq: pointer to the driver internal queue element for mailbox command. 302 * 303 * This is the completion handler for dump mailbox command for getting 304 * wake up parameters. When this command complete, the response contain 305 * Option rom version of the HBA. This function translate the version number 306 * into a human readable string and store it in OptionROMVersion. 307 **/ 308 static void 309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 310 { 311 struct prog_id *prg; 312 uint32_t prog_id_word; 313 char dist = ' '; 314 /* character array used for decoding dist type. */ 315 char dist_char[] = "nabx"; 316 317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 318 mempool_free(pmboxq, phba->mbox_mem_pool); 319 return; 320 } 321 322 prg = (struct prog_id *) &prog_id_word; 323 324 /* word 7 contain option rom version */ 325 prog_id_word = pmboxq->u.mb.un.varWords[7]; 326 327 /* Decode the Option rom version word to a readable string */ 328 if (prg->dist < 4) 329 dist = dist_char[prg->dist]; 330 331 if ((prg->dist == 3) && (prg->num == 0)) 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 333 prg->ver, prg->rev, prg->lev); 334 else 335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 336 prg->ver, prg->rev, prg->lev, 337 dist, prg->num); 338 mempool_free(pmboxq, phba->mbox_mem_pool); 339 return; 340 } 341 342 /** 343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 344 * @vport: pointer to lpfc vport data structure. 345 * 346 * 347 * Return codes 348 * None. 349 **/ 350 void 351 lpfc_update_vport_wwn(struct lpfc_vport *vport) 352 { 353 struct lpfc_hba *phba = vport->phba; 354 355 /* 356 * If the name is empty or there exists a soft name 357 * then copy the service params name, otherwise use the fc name 358 */ 359 if (vport->fc_nodename.u.wwn[0] == 0) 360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 361 sizeof(struct lpfc_name)); 362 else 363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 364 sizeof(struct lpfc_name)); 365 366 /* 367 * If the port name has changed, then set the Param changes flag 368 * to unreg the login 369 */ 370 if (vport->fc_portname.u.wwn[0] != 0 && 371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 372 sizeof(struct lpfc_name))) { 373 vport->vport_flag |= FAWWPN_PARAM_CHG; 374 375 if (phba->sli_rev == LPFC_SLI_REV4 && 376 vport->port_type == LPFC_PHYSICAL_PORT && 377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { 378 lpfc_printf_log(phba, KERN_INFO, 379 LOG_SLI | LOG_DISCOVERY | LOG_ELS, 380 "2701 FA-PWWN change WWPN from %llx to " 381 "%llx: vflag x%x fawwpn_flag x%x\n", 382 wwn_to_u64(vport->fc_portname.u.wwn), 383 wwn_to_u64 384 (vport->fc_sparam.portName.u.wwn), 385 vport->vport_flag, 386 phba->sli4_hba.fawwpn_flag); 387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 388 sizeof(struct lpfc_name)); 389 } 390 } 391 392 if (vport->fc_portname.u.wwn[0] == 0) 393 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 394 sizeof(struct lpfc_name)); 395 else 396 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 397 sizeof(struct lpfc_name)); 398 } 399 400 /** 401 * lpfc_config_port_post - Perform lpfc initialization after config port 402 * @phba: pointer to lpfc hba data structure. 403 * 404 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 405 * command call. It performs all internal resource and state setups on the 406 * port: post IOCB buffers, enable appropriate host interrupt attentions, 407 * ELS ring timers, etc. 408 * 409 * Return codes 410 * 0 - success. 411 * Any other value - error. 412 **/ 413 int 414 lpfc_config_port_post(struct lpfc_hba *phba) 415 { 416 struct lpfc_vport *vport = phba->pport; 417 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 418 LPFC_MBOXQ_t *pmb; 419 MAILBOX_t *mb; 420 struct lpfc_dmabuf *mp; 421 struct lpfc_sli *psli = &phba->sli; 422 uint32_t status, timeout; 423 int i, j; 424 int rc; 425 426 spin_lock_irq(&phba->hbalock); 427 /* 428 * If the Config port completed correctly the HBA is not 429 * over heated any more. 430 */ 431 if (phba->over_temp_state == HBA_OVER_TEMP) 432 phba->over_temp_state = HBA_NORMAL_TEMP; 433 spin_unlock_irq(&phba->hbalock); 434 435 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 436 if (!pmb) { 437 phba->link_state = LPFC_HBA_ERROR; 438 return -ENOMEM; 439 } 440 mb = &pmb->u.mb; 441 442 /* Get login parameters for NID. */ 443 rc = lpfc_read_sparam(phba, pmb, 0); 444 if (rc) { 445 mempool_free(pmb, phba->mbox_mem_pool); 446 return -ENOMEM; 447 } 448 449 pmb->vport = vport; 450 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 452 "0448 Adapter failed init, mbxCmd x%x " 453 "READ_SPARM mbxStatus x%x\n", 454 mb->mbxCommand, mb->mbxStatus); 455 phba->link_state = LPFC_HBA_ERROR; 456 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 457 return -EIO; 458 } 459 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 461 462 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no 463 * longer needed. Prevent unintended ctx_buf access as the mbox is 464 * reused. 465 */ 466 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 467 lpfc_mbuf_free(phba, mp->virt, mp->phys); 468 kfree(mp); 469 pmb->ctx_buf = NULL; 470 lpfc_update_vport_wwn(vport); 471 472 /* Update the fc_host data structures with new wwn. */ 473 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 474 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 475 fc_host_max_npiv_vports(shost) = phba->max_vpi; 476 477 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 478 /* This should be consolidated into parse_vpd ? - mr */ 479 if (phba->SerialNumber[0] == 0) { 480 uint8_t *outptr; 481 482 outptr = &vport->fc_nodename.u.s.IEEE[0]; 483 for (i = 0; i < 12; i++) { 484 status = *outptr++; 485 j = ((status & 0xf0) >> 4); 486 if (j <= 9) 487 phba->SerialNumber[i] = 488 (char)((uint8_t) 0x30 + (uint8_t) j); 489 else 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 492 i++; 493 j = (status & 0xf); 494 if (j <= 9) 495 phba->SerialNumber[i] = 496 (char)((uint8_t) 0x30 + (uint8_t) j); 497 else 498 phba->SerialNumber[i] = 499 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 500 } 501 } 502 503 lpfc_read_config(phba, pmb); 504 pmb->vport = vport; 505 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 507 "0453 Adapter failed to init, mbxCmd x%x " 508 "READ_CONFIG, mbxStatus x%x\n", 509 mb->mbxCommand, mb->mbxStatus); 510 phba->link_state = LPFC_HBA_ERROR; 511 mempool_free( pmb, phba->mbox_mem_pool); 512 return -EIO; 513 } 514 515 /* Check if the port is disabled */ 516 lpfc_sli_read_link_ste(phba); 517 518 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 519 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 520 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 521 "3359 HBA queue depth changed from %d to %d\n", 522 phba->cfg_hba_queue_depth, 523 mb->un.varRdConfig.max_xri); 524 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 525 } 526 527 phba->lmt = mb->un.varRdConfig.lmt; 528 529 /* Get the default values for Model Name and Description */ 530 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 531 532 phba->link_state = LPFC_LINK_DOWN; 533 534 /* Only process IOCBs on ELS ring till hba_state is READY */ 535 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 536 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 537 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 538 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 539 540 /* Post receive buffers for desired rings */ 541 if (phba->sli_rev != 3) 542 lpfc_post_rcv_buf(phba); 543 544 /* 545 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 546 */ 547 if (phba->intr_type == MSIX) { 548 rc = lpfc_config_msi(phba, pmb); 549 if (rc) { 550 mempool_free(pmb, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 554 if (rc != MBX_SUCCESS) { 555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 556 "0352 Config MSI mailbox command " 557 "failed, mbxCmd x%x, mbxStatus x%x\n", 558 pmb->u.mb.mbxCommand, 559 pmb->u.mb.mbxStatus); 560 mempool_free(pmb, phba->mbox_mem_pool); 561 return -EIO; 562 } 563 } 564 565 spin_lock_irq(&phba->hbalock); 566 /* Initialize ERATT handling flag */ 567 phba->hba_flag &= ~HBA_ERATT_HANDLED; 568 569 /* Enable appropriate host interrupts */ 570 if (lpfc_readl(phba->HCregaddr, &status)) { 571 spin_unlock_irq(&phba->hbalock); 572 return -EIO; 573 } 574 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 575 if (psli->num_rings > 0) 576 status |= HC_R0INT_ENA; 577 if (psli->num_rings > 1) 578 status |= HC_R1INT_ENA; 579 if (psli->num_rings > 2) 580 status |= HC_R2INT_ENA; 581 if (psli->num_rings > 3) 582 status |= HC_R3INT_ENA; 583 584 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 585 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 586 status &= ~(HC_R0INT_ENA); 587 588 writel(status, phba->HCregaddr); 589 readl(phba->HCregaddr); /* flush */ 590 spin_unlock_irq(&phba->hbalock); 591 592 /* Set up ring-0 (ELS) timer */ 593 timeout = phba->fc_ratov * 2; 594 mod_timer(&vport->els_tmofunc, 595 jiffies + msecs_to_jiffies(1000 * timeout)); 596 /* Set up heart beat (HB) timer */ 597 mod_timer(&phba->hb_tmofunc, 598 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 599 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 600 phba->last_completion_time = jiffies; 601 /* Set up error attention (ERATT) polling timer */ 602 mod_timer(&phba->eratt_poll, 603 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 604 605 if (phba->hba_flag & LINK_DISABLED) { 606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 607 "2598 Adapter Link is disabled.\n"); 608 lpfc_down_link(phba, pmb); 609 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 613 "2599 Adapter failed to issue DOWN_LINK" 614 " mbox command rc 0x%x\n", rc); 615 616 mempool_free(pmb, phba->mbox_mem_pool); 617 return -EIO; 618 } 619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 620 mempool_free(pmb, phba->mbox_mem_pool); 621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 622 if (rc) 623 return rc; 624 } 625 /* MBOX buffer will be freed in mbox compl */ 626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 627 if (!pmb) { 628 phba->link_state = LPFC_HBA_ERROR; 629 return -ENOMEM; 630 } 631 632 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 633 pmb->mbox_cmpl = lpfc_config_async_cmpl; 634 pmb->vport = phba->pport; 635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 636 637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 639 "0456 Adapter failed to issue " 640 "ASYNCEVT_ENABLE mbox status x%x\n", 641 rc); 642 mempool_free(pmb, phba->mbox_mem_pool); 643 } 644 645 /* Get Option rom version */ 646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 647 if (!pmb) { 648 phba->link_state = LPFC_HBA_ERROR; 649 return -ENOMEM; 650 } 651 652 lpfc_dump_wakeup_param(phba, pmb); 653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 654 pmb->vport = phba->pport; 655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 656 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 659 "0435 Adapter failed " 660 "to get Option ROM version status x%x\n", rc); 661 mempool_free(pmb, phba->mbox_mem_pool); 662 } 663 664 return 0; 665 } 666 667 /** 668 * lpfc_sli4_refresh_params - update driver copy of params. 669 * @phba: Pointer to HBA context object. 670 * 671 * This is called to refresh driver copy of dynamic fields from the 672 * common_get_sli4_parameters descriptor. 673 **/ 674 int 675 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 676 { 677 LPFC_MBOXQ_t *mboxq; 678 struct lpfc_mqe *mqe; 679 struct lpfc_sli4_parameters *mbx_sli4_parameters; 680 int length, rc; 681 682 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 683 if (!mboxq) 684 return -ENOMEM; 685 686 mqe = &mboxq->u.mqe; 687 /* Read the port's SLI4 Config Parameters */ 688 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 689 sizeof(struct lpfc_sli4_cfg_mhdr)); 690 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 691 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 692 length, LPFC_SLI4_MBX_EMBED); 693 694 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 695 if (unlikely(rc)) { 696 mempool_free(mboxq, phba->mbox_mem_pool); 697 return rc; 698 } 699 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 700 701 /* Are we forcing MI off via module parameter? */ 702 if (phba->cfg_enable_mi) 703 phba->sli4_hba.pc_sli4_params.mi_ver = 704 bf_get(cfg_mi_ver, mbx_sli4_parameters); 705 else 706 phba->sli4_hba.pc_sli4_params.mi_ver = 0; 707 708 phba->sli4_hba.pc_sli4_params.cmf = 709 bf_get(cfg_cmf, mbx_sli4_parameters); 710 phba->sli4_hba.pc_sli4_params.pls = 711 bf_get(cfg_pvl, mbx_sli4_parameters); 712 713 mempool_free(mboxq, phba->mbox_mem_pool); 714 return rc; 715 } 716 717 /** 718 * lpfc_hba_init_link - Initialize the FC link 719 * @phba: pointer to lpfc hba data structure. 720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 721 * 722 * This routine will issue the INIT_LINK mailbox command call. 723 * It is available to other drivers through the lpfc_hba data 724 * structure for use as a delayed link up mechanism with the 725 * module parameter lpfc_suppress_link_up. 726 * 727 * Return code 728 * 0 - success 729 * Any other value - error 730 **/ 731 static int 732 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 733 { 734 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 735 } 736 737 /** 738 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 739 * @phba: pointer to lpfc hba data structure. 740 * @fc_topology: desired fc topology. 741 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 742 * 743 * This routine will issue the INIT_LINK mailbox command call. 744 * It is available to other drivers through the lpfc_hba data 745 * structure for use as a delayed link up mechanism with the 746 * module parameter lpfc_suppress_link_up. 747 * 748 * Return code 749 * 0 - success 750 * Any other value - error 751 **/ 752 int 753 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 754 uint32_t flag) 755 { 756 struct lpfc_vport *vport = phba->pport; 757 LPFC_MBOXQ_t *pmb; 758 MAILBOX_t *mb; 759 int rc; 760 761 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 762 if (!pmb) { 763 phba->link_state = LPFC_HBA_ERROR; 764 return -ENOMEM; 765 } 766 mb = &pmb->u.mb; 767 pmb->vport = vport; 768 769 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 770 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 771 !(phba->lmt & LMT_1Gb)) || 772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 773 !(phba->lmt & LMT_2Gb)) || 774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 775 !(phba->lmt & LMT_4Gb)) || 776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 777 !(phba->lmt & LMT_8Gb)) || 778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 779 !(phba->lmt & LMT_10Gb)) || 780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 781 !(phba->lmt & LMT_16Gb)) || 782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 783 !(phba->lmt & LMT_32Gb)) || 784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 785 !(phba->lmt & LMT_64Gb))) { 786 /* Reset link speed to auto */ 787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 788 "1302 Invalid speed for this board:%d " 789 "Reset link speed to auto.\n", 790 phba->cfg_link_speed); 791 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 792 } 793 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 794 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 795 if (phba->sli_rev < LPFC_SLI_REV4) 796 lpfc_set_loopback_flag(phba); 797 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 798 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 800 "0498 Adapter failed to init, mbxCmd x%x " 801 "INIT_LINK, mbxStatus x%x\n", 802 mb->mbxCommand, mb->mbxStatus); 803 if (phba->sli_rev <= LPFC_SLI_REV3) { 804 /* Clear all interrupt enable conditions */ 805 writel(0, phba->HCregaddr); 806 readl(phba->HCregaddr); /* flush */ 807 /* Clear all pending interrupts */ 808 writel(0xffffffff, phba->HAregaddr); 809 readl(phba->HAregaddr); /* flush */ 810 } 811 phba->link_state = LPFC_HBA_ERROR; 812 if (rc != MBX_BUSY || flag == MBX_POLL) 813 mempool_free(pmb, phba->mbox_mem_pool); 814 return -EIO; 815 } 816 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 817 if (flag == MBX_POLL) 818 mempool_free(pmb, phba->mbox_mem_pool); 819 820 return 0; 821 } 822 823 /** 824 * lpfc_hba_down_link - this routine downs the FC link 825 * @phba: pointer to lpfc hba data structure. 826 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 827 * 828 * This routine will issue the DOWN_LINK mailbox command call. 829 * It is available to other drivers through the lpfc_hba data 830 * structure for use to stop the link. 831 * 832 * Return code 833 * 0 - success 834 * Any other value - error 835 **/ 836 static int 837 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 838 { 839 LPFC_MBOXQ_t *pmb; 840 int rc; 841 842 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 843 if (!pmb) { 844 phba->link_state = LPFC_HBA_ERROR; 845 return -ENOMEM; 846 } 847 848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 849 "0491 Adapter Link is disabled.\n"); 850 lpfc_down_link(phba, pmb); 851 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 852 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 853 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 855 "2522 Adapter failed to issue DOWN_LINK" 856 " mbox command rc 0x%x\n", rc); 857 858 mempool_free(pmb, phba->mbox_mem_pool); 859 return -EIO; 860 } 861 if (flag == MBX_POLL) 862 mempool_free(pmb, phba->mbox_mem_pool); 863 864 return 0; 865 } 866 867 /** 868 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 869 * @phba: pointer to lpfc HBA data structure. 870 * 871 * This routine will do LPFC uninitialization before the HBA is reset when 872 * bringing down the SLI Layer. 873 * 874 * Return codes 875 * 0 - success. 876 * Any other value - error. 877 **/ 878 int 879 lpfc_hba_down_prep(struct lpfc_hba *phba) 880 { 881 struct lpfc_vport **vports; 882 int i; 883 884 if (phba->sli_rev <= LPFC_SLI_REV3) { 885 /* Disable interrupts */ 886 writel(0, phba->HCregaddr); 887 readl(phba->HCregaddr); /* flush */ 888 } 889 890 if (phba->pport->load_flag & FC_UNLOADING) 891 lpfc_cleanup_discovery_resources(phba->pport); 892 else { 893 vports = lpfc_create_vport_work_array(phba); 894 if (vports != NULL) 895 for (i = 0; i <= phba->max_vports && 896 vports[i] != NULL; i++) 897 lpfc_cleanup_discovery_resources(vports[i]); 898 lpfc_destroy_vport_work_array(phba, vports); 899 } 900 return 0; 901 } 902 903 /** 904 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 905 * rspiocb which got deferred 906 * 907 * @phba: pointer to lpfc HBA data structure. 908 * 909 * This routine will cleanup completed slow path events after HBA is reset 910 * when bringing down the SLI Layer. 911 * 912 * 913 * Return codes 914 * void. 915 **/ 916 static void 917 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 918 { 919 struct lpfc_iocbq *rspiocbq; 920 struct hbq_dmabuf *dmabuf; 921 struct lpfc_cq_event *cq_event; 922 923 spin_lock_irq(&phba->hbalock); 924 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 925 spin_unlock_irq(&phba->hbalock); 926 927 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 928 /* Get the response iocb from the head of work queue */ 929 spin_lock_irq(&phba->hbalock); 930 list_remove_head(&phba->sli4_hba.sp_queue_event, 931 cq_event, struct lpfc_cq_event, list); 932 spin_unlock_irq(&phba->hbalock); 933 934 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 935 case CQE_CODE_COMPL_WQE: 936 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 937 cq_event); 938 lpfc_sli_release_iocbq(phba, rspiocbq); 939 break; 940 case CQE_CODE_RECEIVE: 941 case CQE_CODE_RECEIVE_V1: 942 dmabuf = container_of(cq_event, struct hbq_dmabuf, 943 cq_event); 944 lpfc_in_buf_free(phba, &dmabuf->dbuf); 945 } 946 } 947 } 948 949 /** 950 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 951 * @phba: pointer to lpfc HBA data structure. 952 * 953 * This routine will cleanup posted ELS buffers after the HBA is reset 954 * when bringing down the SLI Layer. 955 * 956 * 957 * Return codes 958 * void. 959 **/ 960 static void 961 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 962 { 963 struct lpfc_sli *psli = &phba->sli; 964 struct lpfc_sli_ring *pring; 965 struct lpfc_dmabuf *mp, *next_mp; 966 LIST_HEAD(buflist); 967 int count; 968 969 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 970 lpfc_sli_hbqbuf_free_all(phba); 971 else { 972 /* Cleanup preposted buffers on the ELS ring */ 973 pring = &psli->sli3_ring[LPFC_ELS_RING]; 974 spin_lock_irq(&phba->hbalock); 975 list_splice_init(&pring->postbufq, &buflist); 976 spin_unlock_irq(&phba->hbalock); 977 978 count = 0; 979 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 980 list_del(&mp->list); 981 count++; 982 lpfc_mbuf_free(phba, mp->virt, mp->phys); 983 kfree(mp); 984 } 985 986 spin_lock_irq(&phba->hbalock); 987 pring->postbufq_cnt -= count; 988 spin_unlock_irq(&phba->hbalock); 989 } 990 } 991 992 /** 993 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 994 * @phba: pointer to lpfc HBA data structure. 995 * 996 * This routine will cleanup the txcmplq after the HBA is reset when bringing 997 * down the SLI Layer. 998 * 999 * Return codes 1000 * void 1001 **/ 1002 static void 1003 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 1004 { 1005 struct lpfc_sli *psli = &phba->sli; 1006 struct lpfc_queue *qp = NULL; 1007 struct lpfc_sli_ring *pring; 1008 LIST_HEAD(completions); 1009 int i; 1010 struct lpfc_iocbq *piocb, *next_iocb; 1011 1012 if (phba->sli_rev != LPFC_SLI_REV4) { 1013 for (i = 0; i < psli->num_rings; i++) { 1014 pring = &psli->sli3_ring[i]; 1015 spin_lock_irq(&phba->hbalock); 1016 /* At this point in time the HBA is either reset or DOA 1017 * Nothing should be on txcmplq as it will 1018 * NEVER complete. 1019 */ 1020 list_splice_init(&pring->txcmplq, &completions); 1021 pring->txcmplq_cnt = 0; 1022 spin_unlock_irq(&phba->hbalock); 1023 1024 lpfc_sli_abort_iocb_ring(phba, pring); 1025 } 1026 /* Cancel all the IOCBs from the completions list */ 1027 lpfc_sli_cancel_iocbs(phba, &completions, 1028 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1029 return; 1030 } 1031 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1032 pring = qp->pring; 1033 if (!pring) 1034 continue; 1035 spin_lock_irq(&pring->ring_lock); 1036 list_for_each_entry_safe(piocb, next_iocb, 1037 &pring->txcmplq, list) 1038 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 1039 list_splice_init(&pring->txcmplq, &completions); 1040 pring->txcmplq_cnt = 0; 1041 spin_unlock_irq(&pring->ring_lock); 1042 lpfc_sli_abort_iocb_ring(phba, pring); 1043 } 1044 /* Cancel all the IOCBs from the completions list */ 1045 lpfc_sli_cancel_iocbs(phba, &completions, 1046 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1047 } 1048 1049 /** 1050 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1051 * @phba: pointer to lpfc HBA data structure. 1052 * 1053 * This routine will do uninitialization after the HBA is reset when bring 1054 * down the SLI Layer. 1055 * 1056 * Return codes 1057 * 0 - success. 1058 * Any other value - error. 1059 **/ 1060 static int 1061 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1062 { 1063 lpfc_hba_free_post_buf(phba); 1064 lpfc_hba_clean_txcmplq(phba); 1065 return 0; 1066 } 1067 1068 /** 1069 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1070 * @phba: pointer to lpfc HBA data structure. 1071 * 1072 * This routine will do uninitialization after the HBA is reset when bring 1073 * down the SLI Layer. 1074 * 1075 * Return codes 1076 * 0 - success. 1077 * Any other value - error. 1078 **/ 1079 static int 1080 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1081 { 1082 struct lpfc_io_buf *psb, *psb_next; 1083 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1084 struct lpfc_sli4_hdw_queue *qp; 1085 LIST_HEAD(aborts); 1086 LIST_HEAD(nvme_aborts); 1087 LIST_HEAD(nvmet_aborts); 1088 struct lpfc_sglq *sglq_entry = NULL; 1089 int cnt, idx; 1090 1091 1092 lpfc_sli_hbqbuf_free_all(phba); 1093 lpfc_hba_clean_txcmplq(phba); 1094 1095 /* At this point in time the HBA is either reset or DOA. Either 1096 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1097 * on the lpfc_els_sgl_list so that it can either be freed if the 1098 * driver is unloading or reposted if the driver is restarting 1099 * the port. 1100 */ 1101 1102 /* sgl_list_lock required because worker thread uses this 1103 * list. 1104 */ 1105 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1106 list_for_each_entry(sglq_entry, 1107 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1108 sglq_entry->state = SGL_FREED; 1109 1110 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1111 &phba->sli4_hba.lpfc_els_sgl_list); 1112 1113 1114 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1115 1116 /* abts_xxxx_buf_list_lock required because worker thread uses this 1117 * list. 1118 */ 1119 spin_lock_irq(&phba->hbalock); 1120 cnt = 0; 1121 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1122 qp = &phba->sli4_hba.hdwq[idx]; 1123 1124 spin_lock(&qp->abts_io_buf_list_lock); 1125 list_splice_init(&qp->lpfc_abts_io_buf_list, 1126 &aborts); 1127 1128 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1129 psb->pCmd = NULL; 1130 psb->status = IOSTAT_SUCCESS; 1131 cnt++; 1132 } 1133 spin_lock(&qp->io_buf_list_put_lock); 1134 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1135 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1136 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1137 qp->abts_scsi_io_bufs = 0; 1138 qp->abts_nvme_io_bufs = 0; 1139 spin_unlock(&qp->io_buf_list_put_lock); 1140 spin_unlock(&qp->abts_io_buf_list_lock); 1141 } 1142 spin_unlock_irq(&phba->hbalock); 1143 1144 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1145 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1146 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1147 &nvmet_aborts); 1148 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1149 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1150 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1151 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1152 } 1153 } 1154 1155 lpfc_sli4_free_sp_events(phba); 1156 return cnt; 1157 } 1158 1159 /** 1160 * lpfc_hba_down_post - Wrapper func for hba down post routine 1161 * @phba: pointer to lpfc HBA data structure. 1162 * 1163 * This routine wraps the actual SLI3 or SLI4 routine for performing 1164 * uninitialization after the HBA is reset when bring down the SLI Layer. 1165 * 1166 * Return codes 1167 * 0 - success. 1168 * Any other value - error. 1169 **/ 1170 int 1171 lpfc_hba_down_post(struct lpfc_hba *phba) 1172 { 1173 return (*phba->lpfc_hba_down_post)(phba); 1174 } 1175 1176 /** 1177 * lpfc_hb_timeout - The HBA-timer timeout handler 1178 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1179 * 1180 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1181 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1182 * work-port-events bitmap and the worker thread is notified. This timeout 1183 * event will be used by the worker thread to invoke the actual timeout 1184 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1185 * be performed in the timeout handler and the HBA timeout event bit shall 1186 * be cleared by the worker thread after it has taken the event bitmap out. 1187 **/ 1188 static void 1189 lpfc_hb_timeout(struct timer_list *t) 1190 { 1191 struct lpfc_hba *phba; 1192 uint32_t tmo_posted; 1193 unsigned long iflag; 1194 1195 phba = from_timer(phba, t, hb_tmofunc); 1196 1197 /* Check for heart beat timeout conditions */ 1198 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1199 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1200 if (!tmo_posted) 1201 phba->pport->work_port_events |= WORKER_HB_TMO; 1202 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1203 1204 /* Tell the worker thread there is work to do */ 1205 if (!tmo_posted) 1206 lpfc_worker_wake_up(phba); 1207 return; 1208 } 1209 1210 /** 1211 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1212 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1213 * 1214 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1215 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1216 * work-port-events bitmap and the worker thread is notified. This timeout 1217 * event will be used by the worker thread to invoke the actual timeout 1218 * handler routine, lpfc_rrq_handler. Any periodical operations will 1219 * be performed in the timeout handler and the RRQ timeout event bit shall 1220 * be cleared by the worker thread after it has taken the event bitmap out. 1221 **/ 1222 static void 1223 lpfc_rrq_timeout(struct timer_list *t) 1224 { 1225 struct lpfc_hba *phba; 1226 unsigned long iflag; 1227 1228 phba = from_timer(phba, t, rrq_tmr); 1229 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1230 if (!(phba->pport->load_flag & FC_UNLOADING)) 1231 phba->hba_flag |= HBA_RRQ_ACTIVE; 1232 else 1233 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1234 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1235 1236 if (!(phba->pport->load_flag & FC_UNLOADING)) 1237 lpfc_worker_wake_up(phba); 1238 } 1239 1240 /** 1241 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1242 * @phba: pointer to lpfc hba data structure. 1243 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1244 * 1245 * This is the callback function to the lpfc heart-beat mailbox command. 1246 * If configured, the lpfc driver issues the heart-beat mailbox command to 1247 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1248 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1249 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1250 * heart-beat outstanding state. Once the mailbox command comes back and 1251 * no error conditions detected, the heart-beat mailbox command timer is 1252 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1253 * state is cleared for the next heart-beat. If the timer expired with the 1254 * heart-beat outstanding state set, the driver will put the HBA offline. 1255 **/ 1256 static void 1257 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1258 { 1259 unsigned long drvr_flag; 1260 1261 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1262 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1263 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1264 1265 /* Check and reset heart-beat timer if necessary */ 1266 mempool_free(pmboxq, phba->mbox_mem_pool); 1267 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1268 !(phba->link_state == LPFC_HBA_ERROR) && 1269 !(phba->pport->load_flag & FC_UNLOADING)) 1270 mod_timer(&phba->hb_tmofunc, 1271 jiffies + 1272 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1273 return; 1274 } 1275 1276 /* 1277 * lpfc_idle_stat_delay_work - idle_stat tracking 1278 * 1279 * This routine tracks per-cq idle_stat and determines polling decisions. 1280 * 1281 * Return codes: 1282 * None 1283 **/ 1284 static void 1285 lpfc_idle_stat_delay_work(struct work_struct *work) 1286 { 1287 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1288 struct lpfc_hba, 1289 idle_stat_delay_work); 1290 struct lpfc_queue *cq; 1291 struct lpfc_sli4_hdw_queue *hdwq; 1292 struct lpfc_idle_stat *idle_stat; 1293 u32 i, idle_percent; 1294 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1295 1296 if (phba->pport->load_flag & FC_UNLOADING) 1297 return; 1298 1299 if (phba->link_state == LPFC_HBA_ERROR || 1300 phba->pport->fc_flag & FC_OFFLINE_MODE || 1301 phba->cmf_active_mode != LPFC_CFG_OFF) 1302 goto requeue; 1303 1304 for_each_present_cpu(i) { 1305 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1306 cq = hdwq->io_cq; 1307 1308 /* Skip if we've already handled this cq's primary CPU */ 1309 if (cq->chann != i) 1310 continue; 1311 1312 idle_stat = &phba->sli4_hba.idle_stat[i]; 1313 1314 /* get_cpu_idle_time returns values as running counters. Thus, 1315 * to know the amount for this period, the prior counter values 1316 * need to be subtracted from the current counter values. 1317 * From there, the idle time stat can be calculated as a 1318 * percentage of 100 - the sum of the other consumption times. 1319 */ 1320 wall_idle = get_cpu_idle_time(i, &wall, 1); 1321 diff_idle = wall_idle - idle_stat->prev_idle; 1322 diff_wall = wall - idle_stat->prev_wall; 1323 1324 if (diff_wall <= diff_idle) 1325 busy_time = 0; 1326 else 1327 busy_time = diff_wall - diff_idle; 1328 1329 idle_percent = div64_u64(100 * busy_time, diff_wall); 1330 idle_percent = 100 - idle_percent; 1331 1332 if (idle_percent < 15) 1333 cq->poll_mode = LPFC_QUEUE_WORK; 1334 else 1335 cq->poll_mode = LPFC_IRQ_POLL; 1336 1337 idle_stat->prev_idle = wall_idle; 1338 idle_stat->prev_wall = wall; 1339 } 1340 1341 requeue: 1342 schedule_delayed_work(&phba->idle_stat_delay_work, 1343 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1344 } 1345 1346 static void 1347 lpfc_hb_eq_delay_work(struct work_struct *work) 1348 { 1349 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1350 struct lpfc_hba, eq_delay_work); 1351 struct lpfc_eq_intr_info *eqi, *eqi_new; 1352 struct lpfc_queue *eq, *eq_next; 1353 unsigned char *ena_delay = NULL; 1354 uint32_t usdelay; 1355 int i; 1356 1357 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1358 return; 1359 1360 if (phba->link_state == LPFC_HBA_ERROR || 1361 phba->pport->fc_flag & FC_OFFLINE_MODE) 1362 goto requeue; 1363 1364 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1365 GFP_KERNEL); 1366 if (!ena_delay) 1367 goto requeue; 1368 1369 for (i = 0; i < phba->cfg_irq_chann; i++) { 1370 /* Get the EQ corresponding to the IRQ vector */ 1371 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1372 if (!eq) 1373 continue; 1374 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1375 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1376 ena_delay[eq->last_cpu] = 1; 1377 } 1378 } 1379 1380 for_each_present_cpu(i) { 1381 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1382 if (ena_delay[i]) { 1383 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1384 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1385 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1386 } else { 1387 usdelay = 0; 1388 } 1389 1390 eqi->icnt = 0; 1391 1392 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1393 if (unlikely(eq->last_cpu != i)) { 1394 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1395 eq->last_cpu); 1396 list_move_tail(&eq->cpu_list, &eqi_new->list); 1397 continue; 1398 } 1399 if (usdelay != eq->q_mode) 1400 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1401 usdelay); 1402 } 1403 } 1404 1405 kfree(ena_delay); 1406 1407 requeue: 1408 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1409 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1410 } 1411 1412 /** 1413 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1414 * @phba: pointer to lpfc hba data structure. 1415 * 1416 * For each heartbeat, this routine does some heuristic methods to adjust 1417 * XRI distribution. The goal is to fully utilize free XRIs. 1418 **/ 1419 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1420 { 1421 u32 i; 1422 u32 hwq_count; 1423 1424 hwq_count = phba->cfg_hdw_queue; 1425 for (i = 0; i < hwq_count; i++) { 1426 /* Adjust XRIs in private pool */ 1427 lpfc_adjust_pvt_pool_count(phba, i); 1428 1429 /* Adjust high watermark */ 1430 lpfc_adjust_high_watermark(phba, i); 1431 1432 #ifdef LPFC_MXP_STAT 1433 /* Snapshot pbl, pvt and busy count */ 1434 lpfc_snapshot_mxp(phba, i); 1435 #endif 1436 } 1437 } 1438 1439 /** 1440 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1441 * @phba: pointer to lpfc hba data structure. 1442 * 1443 * If a HB mbox is not already in progrees, this routine will allocate 1444 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1445 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1446 **/ 1447 int 1448 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1449 { 1450 LPFC_MBOXQ_t *pmboxq; 1451 int retval; 1452 1453 /* Is a Heartbeat mbox already in progress */ 1454 if (phba->hba_flag & HBA_HBEAT_INP) 1455 return 0; 1456 1457 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1458 if (!pmboxq) 1459 return -ENOMEM; 1460 1461 lpfc_heart_beat(phba, pmboxq); 1462 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1463 pmboxq->vport = phba->pport; 1464 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1465 1466 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1467 mempool_free(pmboxq, phba->mbox_mem_pool); 1468 return -ENXIO; 1469 } 1470 phba->hba_flag |= HBA_HBEAT_INP; 1471 1472 return 0; 1473 } 1474 1475 /** 1476 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1477 * @phba: pointer to lpfc hba data structure. 1478 * 1479 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1480 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1481 * of the value of lpfc_enable_hba_heartbeat. 1482 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1483 * try to issue a MBX_HEARTBEAT mbox command. 1484 **/ 1485 void 1486 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1487 { 1488 if (phba->cfg_enable_hba_heartbeat) 1489 return; 1490 phba->hba_flag |= HBA_HBEAT_TMO; 1491 } 1492 1493 /** 1494 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1495 * @phba: pointer to lpfc hba data structure. 1496 * 1497 * This is the actual HBA-timer timeout handler to be invoked by the worker 1498 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1499 * handler performs any periodic operations needed for the device. If such 1500 * periodic event has already been attended to either in the interrupt handler 1501 * or by processing slow-ring or fast-ring events within the HBA-timer 1502 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1503 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1504 * is configured and there is no heart-beat mailbox command outstanding, a 1505 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1506 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1507 * to offline. 1508 **/ 1509 void 1510 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1511 { 1512 struct lpfc_vport **vports; 1513 struct lpfc_dmabuf *buf_ptr; 1514 int retval = 0; 1515 int i, tmo; 1516 struct lpfc_sli *psli = &phba->sli; 1517 LIST_HEAD(completions); 1518 1519 if (phba->cfg_xri_rebalancing) { 1520 /* Multi-XRI pools handler */ 1521 lpfc_hb_mxp_handler(phba); 1522 } 1523 1524 vports = lpfc_create_vport_work_array(phba); 1525 if (vports != NULL) 1526 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1527 lpfc_rcv_seq_check_edtov(vports[i]); 1528 lpfc_fdmi_change_check(vports[i]); 1529 } 1530 lpfc_destroy_vport_work_array(phba, vports); 1531 1532 if ((phba->link_state == LPFC_HBA_ERROR) || 1533 (phba->pport->load_flag & FC_UNLOADING) || 1534 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1535 return; 1536 1537 if (phba->elsbuf_cnt && 1538 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1539 spin_lock_irq(&phba->hbalock); 1540 list_splice_init(&phba->elsbuf, &completions); 1541 phba->elsbuf_cnt = 0; 1542 phba->elsbuf_prev_cnt = 0; 1543 spin_unlock_irq(&phba->hbalock); 1544 1545 while (!list_empty(&completions)) { 1546 list_remove_head(&completions, buf_ptr, 1547 struct lpfc_dmabuf, list); 1548 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1549 kfree(buf_ptr); 1550 } 1551 } 1552 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1553 1554 /* If there is no heart beat outstanding, issue a heartbeat command */ 1555 if (phba->cfg_enable_hba_heartbeat) { 1556 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1557 spin_lock_irq(&phba->pport->work_port_lock); 1558 if (time_after(phba->last_completion_time + 1559 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1560 jiffies)) { 1561 spin_unlock_irq(&phba->pport->work_port_lock); 1562 if (phba->hba_flag & HBA_HBEAT_INP) 1563 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1564 else 1565 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1566 goto out; 1567 } 1568 spin_unlock_irq(&phba->pport->work_port_lock); 1569 1570 /* Check if a MBX_HEARTBEAT is already in progress */ 1571 if (phba->hba_flag & HBA_HBEAT_INP) { 1572 /* 1573 * If heart beat timeout called with HBA_HBEAT_INP set 1574 * we need to give the hb mailbox cmd a chance to 1575 * complete or TMO. 1576 */ 1577 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1578 "0459 Adapter heartbeat still outstanding: " 1579 "last compl time was %d ms.\n", 1580 jiffies_to_msecs(jiffies 1581 - phba->last_completion_time)); 1582 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1583 } else { 1584 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1585 (list_empty(&psli->mboxq))) { 1586 1587 retval = lpfc_issue_hb_mbox(phba); 1588 if (retval) { 1589 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1590 goto out; 1591 } 1592 phba->skipped_hb = 0; 1593 } else if (time_before_eq(phba->last_completion_time, 1594 phba->skipped_hb)) { 1595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1596 "2857 Last completion time not " 1597 " updated in %d ms\n", 1598 jiffies_to_msecs(jiffies 1599 - phba->last_completion_time)); 1600 } else 1601 phba->skipped_hb = jiffies; 1602 1603 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1604 goto out; 1605 } 1606 } else { 1607 /* Check to see if we want to force a MBX_HEARTBEAT */ 1608 if (phba->hba_flag & HBA_HBEAT_TMO) { 1609 retval = lpfc_issue_hb_mbox(phba); 1610 if (retval) 1611 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1612 else 1613 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1614 goto out; 1615 } 1616 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1617 } 1618 out: 1619 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1620 } 1621 1622 /** 1623 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1624 * @phba: pointer to lpfc hba data structure. 1625 * 1626 * This routine is called to bring the HBA offline when HBA hardware error 1627 * other than Port Error 6 has been detected. 1628 **/ 1629 static void 1630 lpfc_offline_eratt(struct lpfc_hba *phba) 1631 { 1632 struct lpfc_sli *psli = &phba->sli; 1633 1634 spin_lock_irq(&phba->hbalock); 1635 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1636 spin_unlock_irq(&phba->hbalock); 1637 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1638 1639 lpfc_offline(phba); 1640 lpfc_reset_barrier(phba); 1641 spin_lock_irq(&phba->hbalock); 1642 lpfc_sli_brdreset(phba); 1643 spin_unlock_irq(&phba->hbalock); 1644 lpfc_hba_down_post(phba); 1645 lpfc_sli_brdready(phba, HS_MBRDY); 1646 lpfc_unblock_mgmt_io(phba); 1647 phba->link_state = LPFC_HBA_ERROR; 1648 return; 1649 } 1650 1651 /** 1652 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1653 * @phba: pointer to lpfc hba data structure. 1654 * 1655 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1656 * other than Port Error 6 has been detected. 1657 **/ 1658 void 1659 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1660 { 1661 spin_lock_irq(&phba->hbalock); 1662 if (phba->link_state == LPFC_HBA_ERROR && 1663 test_bit(HBA_PCI_ERR, &phba->bit_flags)) { 1664 spin_unlock_irq(&phba->hbalock); 1665 return; 1666 } 1667 phba->link_state = LPFC_HBA_ERROR; 1668 spin_unlock_irq(&phba->hbalock); 1669 1670 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1671 lpfc_sli_flush_io_rings(phba); 1672 lpfc_offline(phba); 1673 lpfc_hba_down_post(phba); 1674 lpfc_unblock_mgmt_io(phba); 1675 } 1676 1677 /** 1678 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1679 * @phba: pointer to lpfc hba data structure. 1680 * 1681 * This routine is invoked to handle the deferred HBA hardware error 1682 * conditions. This type of error is indicated by HBA by setting ER1 1683 * and another ER bit in the host status register. The driver will 1684 * wait until the ER1 bit clears before handling the error condition. 1685 **/ 1686 static void 1687 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1688 { 1689 uint32_t old_host_status = phba->work_hs; 1690 struct lpfc_sli *psli = &phba->sli; 1691 1692 /* If the pci channel is offline, ignore possible errors, 1693 * since we cannot communicate with the pci card anyway. 1694 */ 1695 if (pci_channel_offline(phba->pcidev)) { 1696 spin_lock_irq(&phba->hbalock); 1697 phba->hba_flag &= ~DEFER_ERATT; 1698 spin_unlock_irq(&phba->hbalock); 1699 return; 1700 } 1701 1702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1703 "0479 Deferred Adapter Hardware Error " 1704 "Data: x%x x%x x%x\n", 1705 phba->work_hs, phba->work_status[0], 1706 phba->work_status[1]); 1707 1708 spin_lock_irq(&phba->hbalock); 1709 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1710 spin_unlock_irq(&phba->hbalock); 1711 1712 1713 /* 1714 * Firmware stops when it triggred erratt. That could cause the I/Os 1715 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1716 * SCSI layer retry it after re-establishing link. 1717 */ 1718 lpfc_sli_abort_fcp_rings(phba); 1719 1720 /* 1721 * There was a firmware error. Take the hba offline and then 1722 * attempt to restart it. 1723 */ 1724 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1725 lpfc_offline(phba); 1726 1727 /* Wait for the ER1 bit to clear.*/ 1728 while (phba->work_hs & HS_FFER1) { 1729 msleep(100); 1730 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1731 phba->work_hs = UNPLUG_ERR ; 1732 break; 1733 } 1734 /* If driver is unloading let the worker thread continue */ 1735 if (phba->pport->load_flag & FC_UNLOADING) { 1736 phba->work_hs = 0; 1737 break; 1738 } 1739 } 1740 1741 /* 1742 * This is to ptrotect against a race condition in which 1743 * first write to the host attention register clear the 1744 * host status register. 1745 */ 1746 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1747 phba->work_hs = old_host_status & ~HS_FFER1; 1748 1749 spin_lock_irq(&phba->hbalock); 1750 phba->hba_flag &= ~DEFER_ERATT; 1751 spin_unlock_irq(&phba->hbalock); 1752 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1753 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1754 } 1755 1756 static void 1757 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1758 { 1759 struct lpfc_board_event_header board_event; 1760 struct Scsi_Host *shost; 1761 1762 board_event.event_type = FC_REG_BOARD_EVENT; 1763 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1764 shost = lpfc_shost_from_vport(phba->pport); 1765 fc_host_post_vendor_event(shost, fc_get_event_number(), 1766 sizeof(board_event), 1767 (char *) &board_event, 1768 LPFC_NL_VENDOR_ID); 1769 } 1770 1771 /** 1772 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1773 * @phba: pointer to lpfc hba data structure. 1774 * 1775 * This routine is invoked to handle the following HBA hardware error 1776 * conditions: 1777 * 1 - HBA error attention interrupt 1778 * 2 - DMA ring index out of range 1779 * 3 - Mailbox command came back as unknown 1780 **/ 1781 static void 1782 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1783 { 1784 struct lpfc_vport *vport = phba->pport; 1785 struct lpfc_sli *psli = &phba->sli; 1786 uint32_t event_data; 1787 unsigned long temperature; 1788 struct temp_event temp_event_data; 1789 struct Scsi_Host *shost; 1790 1791 /* If the pci channel is offline, ignore possible errors, 1792 * since we cannot communicate with the pci card anyway. 1793 */ 1794 if (pci_channel_offline(phba->pcidev)) { 1795 spin_lock_irq(&phba->hbalock); 1796 phba->hba_flag &= ~DEFER_ERATT; 1797 spin_unlock_irq(&phba->hbalock); 1798 return; 1799 } 1800 1801 /* If resets are disabled then leave the HBA alone and return */ 1802 if (!phba->cfg_enable_hba_reset) 1803 return; 1804 1805 /* Send an internal error event to mgmt application */ 1806 lpfc_board_errevt_to_mgmt(phba); 1807 1808 if (phba->hba_flag & DEFER_ERATT) 1809 lpfc_handle_deferred_eratt(phba); 1810 1811 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1812 if (phba->work_hs & HS_FFER6) 1813 /* Re-establishing Link */ 1814 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1815 "1301 Re-establishing Link " 1816 "Data: x%x x%x x%x\n", 1817 phba->work_hs, phba->work_status[0], 1818 phba->work_status[1]); 1819 if (phba->work_hs & HS_FFER8) 1820 /* Device Zeroization */ 1821 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1822 "2861 Host Authentication device " 1823 "zeroization Data:x%x x%x x%x\n", 1824 phba->work_hs, phba->work_status[0], 1825 phba->work_status[1]); 1826 1827 spin_lock_irq(&phba->hbalock); 1828 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1829 spin_unlock_irq(&phba->hbalock); 1830 1831 /* 1832 * Firmware stops when it triggled erratt with HS_FFER6. 1833 * That could cause the I/Os dropped by the firmware. 1834 * Error iocb (I/O) on txcmplq and let the SCSI layer 1835 * retry it after re-establishing link. 1836 */ 1837 lpfc_sli_abort_fcp_rings(phba); 1838 1839 /* 1840 * There was a firmware error. Take the hba offline and then 1841 * attempt to restart it. 1842 */ 1843 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1844 lpfc_offline(phba); 1845 lpfc_sli_brdrestart(phba); 1846 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1847 lpfc_unblock_mgmt_io(phba); 1848 return; 1849 } 1850 lpfc_unblock_mgmt_io(phba); 1851 } else if (phba->work_hs & HS_CRIT_TEMP) { 1852 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1853 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1854 temp_event_data.event_code = LPFC_CRIT_TEMP; 1855 temp_event_data.data = (uint32_t)temperature; 1856 1857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1858 "0406 Adapter maximum temperature exceeded " 1859 "(%ld), taking this port offline " 1860 "Data: x%x x%x x%x\n", 1861 temperature, phba->work_hs, 1862 phba->work_status[0], phba->work_status[1]); 1863 1864 shost = lpfc_shost_from_vport(phba->pport); 1865 fc_host_post_vendor_event(shost, fc_get_event_number(), 1866 sizeof(temp_event_data), 1867 (char *) &temp_event_data, 1868 SCSI_NL_VID_TYPE_PCI 1869 | PCI_VENDOR_ID_EMULEX); 1870 1871 spin_lock_irq(&phba->hbalock); 1872 phba->over_temp_state = HBA_OVER_TEMP; 1873 spin_unlock_irq(&phba->hbalock); 1874 lpfc_offline_eratt(phba); 1875 1876 } else { 1877 /* The if clause above forces this code path when the status 1878 * failure is a value other than FFER6. Do not call the offline 1879 * twice. This is the adapter hardware error path. 1880 */ 1881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1882 "0457 Adapter Hardware Error " 1883 "Data: x%x x%x x%x\n", 1884 phba->work_hs, 1885 phba->work_status[0], phba->work_status[1]); 1886 1887 event_data = FC_REG_DUMP_EVENT; 1888 shost = lpfc_shost_from_vport(vport); 1889 fc_host_post_vendor_event(shost, fc_get_event_number(), 1890 sizeof(event_data), (char *) &event_data, 1891 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1892 1893 lpfc_offline_eratt(phba); 1894 } 1895 return; 1896 } 1897 1898 /** 1899 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1900 * @phba: pointer to lpfc hba data structure. 1901 * @mbx_action: flag for mailbox shutdown action. 1902 * @en_rn_msg: send reset/port recovery message. 1903 * This routine is invoked to perform an SLI4 port PCI function reset in 1904 * response to port status register polling attention. It waits for port 1905 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1906 * During this process, interrupt vectors are freed and later requested 1907 * for handling possible port resource change. 1908 **/ 1909 static int 1910 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1911 bool en_rn_msg) 1912 { 1913 int rc; 1914 uint32_t intr_mode; 1915 LPFC_MBOXQ_t *mboxq; 1916 1917 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1918 LPFC_SLI_INTF_IF_TYPE_2) { 1919 /* 1920 * On error status condition, driver need to wait for port 1921 * ready before performing reset. 1922 */ 1923 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1924 if (rc) 1925 return rc; 1926 } 1927 1928 /* need reset: attempt for port recovery */ 1929 if (en_rn_msg) 1930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1931 "2887 Reset Needed: Attempting Port " 1932 "Recovery...\n"); 1933 1934 /* If we are no wait, the HBA has been reset and is not 1935 * functional, thus we should clear 1936 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1937 */ 1938 if (mbx_action == LPFC_MBX_NO_WAIT) { 1939 spin_lock_irq(&phba->hbalock); 1940 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1941 if (phba->sli.mbox_active) { 1942 mboxq = phba->sli.mbox_active; 1943 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1944 __lpfc_mbox_cmpl_put(phba, mboxq); 1945 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1946 phba->sli.mbox_active = NULL; 1947 } 1948 spin_unlock_irq(&phba->hbalock); 1949 } 1950 1951 lpfc_offline_prep(phba, mbx_action); 1952 lpfc_sli_flush_io_rings(phba); 1953 lpfc_offline(phba); 1954 /* release interrupt for possible resource change */ 1955 lpfc_sli4_disable_intr(phba); 1956 rc = lpfc_sli_brdrestart(phba); 1957 if (rc) { 1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1959 "6309 Failed to restart board\n"); 1960 return rc; 1961 } 1962 /* request and enable interrupt */ 1963 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1964 if (intr_mode == LPFC_INTR_ERROR) { 1965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1966 "3175 Failed to enable interrupt\n"); 1967 return -EIO; 1968 } 1969 phba->intr_mode = intr_mode; 1970 rc = lpfc_online(phba); 1971 if (rc == 0) 1972 lpfc_unblock_mgmt_io(phba); 1973 1974 return rc; 1975 } 1976 1977 /** 1978 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1979 * @phba: pointer to lpfc hba data structure. 1980 * 1981 * This routine is invoked to handle the SLI4 HBA hardware error attention 1982 * conditions. 1983 **/ 1984 static void 1985 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1986 { 1987 struct lpfc_vport *vport = phba->pport; 1988 uint32_t event_data; 1989 struct Scsi_Host *shost; 1990 uint32_t if_type; 1991 struct lpfc_register portstat_reg = {0}; 1992 uint32_t reg_err1, reg_err2; 1993 uint32_t uerrlo_reg, uemasklo_reg; 1994 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1995 bool en_rn_msg = true; 1996 struct temp_event temp_event_data; 1997 struct lpfc_register portsmphr_reg; 1998 int rc, i; 1999 2000 /* If the pci channel is offline, ignore possible errors, since 2001 * we cannot communicate with the pci card anyway. 2002 */ 2003 if (pci_channel_offline(phba->pcidev)) { 2004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2005 "3166 pci channel is offline\n"); 2006 lpfc_sli_flush_io_rings(phba); 2007 return; 2008 } 2009 2010 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 2011 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2012 switch (if_type) { 2013 case LPFC_SLI_INTF_IF_TYPE_0: 2014 pci_rd_rc1 = lpfc_readl( 2015 phba->sli4_hba.u.if_type0.UERRLOregaddr, 2016 &uerrlo_reg); 2017 pci_rd_rc2 = lpfc_readl( 2018 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2019 &uemasklo_reg); 2020 /* consider PCI bus read error as pci_channel_offline */ 2021 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2022 return; 2023 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2024 lpfc_sli4_offline_eratt(phba); 2025 return; 2026 } 2027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2028 "7623 Checking UE recoverable"); 2029 2030 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2031 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2032 &portsmphr_reg.word0)) 2033 continue; 2034 2035 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2036 &portsmphr_reg); 2037 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2038 LPFC_PORT_SEM_UE_RECOVERABLE) 2039 break; 2040 /*Sleep for 1Sec, before checking SEMAPHORE */ 2041 msleep(1000); 2042 } 2043 2044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2045 "4827 smphr_port_status x%x : Waited %dSec", 2046 smphr_port_status, i); 2047 2048 /* Recoverable UE, reset the HBA device */ 2049 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2050 LPFC_PORT_SEM_UE_RECOVERABLE) { 2051 for (i = 0; i < 20; i++) { 2052 msleep(1000); 2053 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2054 &portsmphr_reg.word0) && 2055 (LPFC_POST_STAGE_PORT_READY == 2056 bf_get(lpfc_port_smphr_port_status, 2057 &portsmphr_reg))) { 2058 rc = lpfc_sli4_port_sta_fn_reset(phba, 2059 LPFC_MBX_NO_WAIT, en_rn_msg); 2060 if (rc == 0) 2061 return; 2062 lpfc_printf_log(phba, KERN_ERR, 2063 LOG_TRACE_EVENT, 2064 "4215 Failed to recover UE"); 2065 break; 2066 } 2067 } 2068 } 2069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2070 "7624 Firmware not ready: Failing UE recovery," 2071 " waited %dSec", i); 2072 phba->link_state = LPFC_HBA_ERROR; 2073 break; 2074 2075 case LPFC_SLI_INTF_IF_TYPE_2: 2076 case LPFC_SLI_INTF_IF_TYPE_6: 2077 pci_rd_rc1 = lpfc_readl( 2078 phba->sli4_hba.u.if_type2.STATUSregaddr, 2079 &portstat_reg.word0); 2080 /* consider PCI bus read error as pci_channel_offline */ 2081 if (pci_rd_rc1 == -EIO) { 2082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2083 "3151 PCI bus read access failure: x%x\n", 2084 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2085 lpfc_sli4_offline_eratt(phba); 2086 return; 2087 } 2088 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2089 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2090 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2092 "2889 Port Overtemperature event, " 2093 "taking port offline Data: x%x x%x\n", 2094 reg_err1, reg_err2); 2095 2096 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2097 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2098 temp_event_data.event_code = LPFC_CRIT_TEMP; 2099 temp_event_data.data = 0xFFFFFFFF; 2100 2101 shost = lpfc_shost_from_vport(phba->pport); 2102 fc_host_post_vendor_event(shost, fc_get_event_number(), 2103 sizeof(temp_event_data), 2104 (char *)&temp_event_data, 2105 SCSI_NL_VID_TYPE_PCI 2106 | PCI_VENDOR_ID_EMULEX); 2107 2108 spin_lock_irq(&phba->hbalock); 2109 phba->over_temp_state = HBA_OVER_TEMP; 2110 spin_unlock_irq(&phba->hbalock); 2111 lpfc_sli4_offline_eratt(phba); 2112 return; 2113 } 2114 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2115 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2116 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2117 "3143 Port Down: Firmware Update " 2118 "Detected\n"); 2119 en_rn_msg = false; 2120 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2121 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2123 "3144 Port Down: Debug Dump\n"); 2124 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2125 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2127 "3145 Port Down: Provisioning\n"); 2128 2129 /* If resets are disabled then leave the HBA alone and return */ 2130 if (!phba->cfg_enable_hba_reset) 2131 return; 2132 2133 /* Check port status register for function reset */ 2134 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2135 en_rn_msg); 2136 if (rc == 0) { 2137 /* don't report event on forced debug dump */ 2138 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2139 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2140 return; 2141 else 2142 break; 2143 } 2144 /* fall through for not able to recover */ 2145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2146 "3152 Unrecoverable error\n"); 2147 phba->link_state = LPFC_HBA_ERROR; 2148 break; 2149 case LPFC_SLI_INTF_IF_TYPE_1: 2150 default: 2151 break; 2152 } 2153 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2154 "3123 Report dump event to upper layer\n"); 2155 /* Send an internal error event to mgmt application */ 2156 lpfc_board_errevt_to_mgmt(phba); 2157 2158 event_data = FC_REG_DUMP_EVENT; 2159 shost = lpfc_shost_from_vport(vport); 2160 fc_host_post_vendor_event(shost, fc_get_event_number(), 2161 sizeof(event_data), (char *) &event_data, 2162 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2163 } 2164 2165 /** 2166 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2167 * @phba: pointer to lpfc HBA data structure. 2168 * 2169 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2170 * routine from the API jump table function pointer from the lpfc_hba struct. 2171 * 2172 * Return codes 2173 * 0 - success. 2174 * Any other value - error. 2175 **/ 2176 void 2177 lpfc_handle_eratt(struct lpfc_hba *phba) 2178 { 2179 (*phba->lpfc_handle_eratt)(phba); 2180 } 2181 2182 /** 2183 * lpfc_handle_latt - The HBA link event handler 2184 * @phba: pointer to lpfc hba data structure. 2185 * 2186 * This routine is invoked from the worker thread to handle a HBA host 2187 * attention link event. SLI3 only. 2188 **/ 2189 void 2190 lpfc_handle_latt(struct lpfc_hba *phba) 2191 { 2192 struct lpfc_vport *vport = phba->pport; 2193 struct lpfc_sli *psli = &phba->sli; 2194 LPFC_MBOXQ_t *pmb; 2195 volatile uint32_t control; 2196 int rc = 0; 2197 2198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2199 if (!pmb) { 2200 rc = 1; 2201 goto lpfc_handle_latt_err_exit; 2202 } 2203 2204 rc = lpfc_mbox_rsrc_prep(phba, pmb); 2205 if (rc) { 2206 rc = 2; 2207 mempool_free(pmb, phba->mbox_mem_pool); 2208 goto lpfc_handle_latt_err_exit; 2209 } 2210 2211 /* Cleanup any outstanding ELS commands */ 2212 lpfc_els_flush_all_cmd(phba); 2213 psli->slistat.link_event++; 2214 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 2215 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2216 pmb->vport = vport; 2217 /* Block ELS IOCBs until we have processed this mbox command */ 2218 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2219 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2220 if (rc == MBX_NOT_FINISHED) { 2221 rc = 4; 2222 goto lpfc_handle_latt_free_mbuf; 2223 } 2224 2225 /* Clear Link Attention in HA REG */ 2226 spin_lock_irq(&phba->hbalock); 2227 writel(HA_LATT, phba->HAregaddr); 2228 readl(phba->HAregaddr); /* flush */ 2229 spin_unlock_irq(&phba->hbalock); 2230 2231 return; 2232 2233 lpfc_handle_latt_free_mbuf: 2234 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2235 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 2236 lpfc_handle_latt_err_exit: 2237 /* Enable Link attention interrupts */ 2238 spin_lock_irq(&phba->hbalock); 2239 psli->sli_flag |= LPFC_PROCESS_LA; 2240 control = readl(phba->HCregaddr); 2241 control |= HC_LAINT_ENA; 2242 writel(control, phba->HCregaddr); 2243 readl(phba->HCregaddr); /* flush */ 2244 2245 /* Clear Link Attention in HA REG */ 2246 writel(HA_LATT, phba->HAregaddr); 2247 readl(phba->HAregaddr); /* flush */ 2248 spin_unlock_irq(&phba->hbalock); 2249 lpfc_linkdown(phba); 2250 phba->link_state = LPFC_HBA_ERROR; 2251 2252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2253 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2254 2255 return; 2256 } 2257 2258 /** 2259 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2260 * @phba: pointer to lpfc hba data structure. 2261 * @vpd: pointer to the vital product data. 2262 * @len: length of the vital product data in bytes. 2263 * 2264 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2265 * an array of characters. In this routine, the ModelName, ProgramType, and 2266 * ModelDesc, etc. fields of the phba data structure will be populated. 2267 * 2268 * Return codes 2269 * 0 - pointer to the VPD passed in is NULL 2270 * 1 - success 2271 **/ 2272 int 2273 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2274 { 2275 uint8_t lenlo, lenhi; 2276 int Length; 2277 int i, j; 2278 int finished = 0; 2279 int index = 0; 2280 2281 if (!vpd) 2282 return 0; 2283 2284 /* Vital Product */ 2285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2286 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2287 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2288 (uint32_t) vpd[3]); 2289 while (!finished && (index < (len - 4))) { 2290 switch (vpd[index]) { 2291 case 0x82: 2292 case 0x91: 2293 index += 1; 2294 lenlo = vpd[index]; 2295 index += 1; 2296 lenhi = vpd[index]; 2297 index += 1; 2298 i = ((((unsigned short)lenhi) << 8) + lenlo); 2299 index += i; 2300 break; 2301 case 0x90: 2302 index += 1; 2303 lenlo = vpd[index]; 2304 index += 1; 2305 lenhi = vpd[index]; 2306 index += 1; 2307 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2308 if (Length > len - index) 2309 Length = len - index; 2310 while (Length > 0) { 2311 /* Look for Serial Number */ 2312 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2313 index += 2; 2314 i = vpd[index]; 2315 index += 1; 2316 j = 0; 2317 Length -= (3+i); 2318 while(i--) { 2319 phba->SerialNumber[j++] = vpd[index++]; 2320 if (j == 31) 2321 break; 2322 } 2323 phba->SerialNumber[j] = 0; 2324 continue; 2325 } 2326 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2327 phba->vpd_flag |= VPD_MODEL_DESC; 2328 index += 2; 2329 i = vpd[index]; 2330 index += 1; 2331 j = 0; 2332 Length -= (3+i); 2333 while(i--) { 2334 phba->ModelDesc[j++] = vpd[index++]; 2335 if (j == 255) 2336 break; 2337 } 2338 phba->ModelDesc[j] = 0; 2339 continue; 2340 } 2341 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2342 phba->vpd_flag |= VPD_MODEL_NAME; 2343 index += 2; 2344 i = vpd[index]; 2345 index += 1; 2346 j = 0; 2347 Length -= (3+i); 2348 while(i--) { 2349 phba->ModelName[j++] = vpd[index++]; 2350 if (j == 79) 2351 break; 2352 } 2353 phba->ModelName[j] = 0; 2354 continue; 2355 } 2356 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2357 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2358 index += 2; 2359 i = vpd[index]; 2360 index += 1; 2361 j = 0; 2362 Length -= (3+i); 2363 while(i--) { 2364 phba->ProgramType[j++] = vpd[index++]; 2365 if (j == 255) 2366 break; 2367 } 2368 phba->ProgramType[j] = 0; 2369 continue; 2370 } 2371 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2372 phba->vpd_flag |= VPD_PORT; 2373 index += 2; 2374 i = vpd[index]; 2375 index += 1; 2376 j = 0; 2377 Length -= (3+i); 2378 while(i--) { 2379 if ((phba->sli_rev == LPFC_SLI_REV4) && 2380 (phba->sli4_hba.pport_name_sta == 2381 LPFC_SLI4_PPNAME_GET)) { 2382 j++; 2383 index++; 2384 } else 2385 phba->Port[j++] = vpd[index++]; 2386 if (j == 19) 2387 break; 2388 } 2389 if ((phba->sli_rev != LPFC_SLI_REV4) || 2390 (phba->sli4_hba.pport_name_sta == 2391 LPFC_SLI4_PPNAME_NON)) 2392 phba->Port[j] = 0; 2393 continue; 2394 } 2395 else { 2396 index += 2; 2397 i = vpd[index]; 2398 index += 1; 2399 index += i; 2400 Length -= (3 + i); 2401 } 2402 } 2403 finished = 0; 2404 break; 2405 case 0x78: 2406 finished = 1; 2407 break; 2408 default: 2409 index ++; 2410 break; 2411 } 2412 } 2413 2414 return(1); 2415 } 2416 2417 /** 2418 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2419 * @phba: pointer to lpfc hba data structure. 2420 * @mdp: pointer to the data structure to hold the derived model name. 2421 * @descp: pointer to the data structure to hold the derived description. 2422 * 2423 * This routine retrieves HBA's description based on its registered PCI device 2424 * ID. The @descp passed into this function points to an array of 256 chars. It 2425 * shall be returned with the model name, maximum speed, and the host bus type. 2426 * The @mdp passed into this function points to an array of 80 chars. When the 2427 * function returns, the @mdp will be filled with the model name. 2428 **/ 2429 static void 2430 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2431 { 2432 lpfc_vpd_t *vp; 2433 uint16_t dev_id = phba->pcidev->device; 2434 int max_speed; 2435 int GE = 0; 2436 int oneConnect = 0; /* default is not a oneConnect */ 2437 struct { 2438 char *name; 2439 char *bus; 2440 char *function; 2441 } m = {"<Unknown>", "", ""}; 2442 2443 if (mdp && mdp[0] != '\0' 2444 && descp && descp[0] != '\0') 2445 return; 2446 2447 if (phba->lmt & LMT_64Gb) 2448 max_speed = 64; 2449 else if (phba->lmt & LMT_32Gb) 2450 max_speed = 32; 2451 else if (phba->lmt & LMT_16Gb) 2452 max_speed = 16; 2453 else if (phba->lmt & LMT_10Gb) 2454 max_speed = 10; 2455 else if (phba->lmt & LMT_8Gb) 2456 max_speed = 8; 2457 else if (phba->lmt & LMT_4Gb) 2458 max_speed = 4; 2459 else if (phba->lmt & LMT_2Gb) 2460 max_speed = 2; 2461 else if (phba->lmt & LMT_1Gb) 2462 max_speed = 1; 2463 else 2464 max_speed = 0; 2465 2466 vp = &phba->vpd; 2467 2468 switch (dev_id) { 2469 case PCI_DEVICE_ID_FIREFLY: 2470 m = (typeof(m)){"LP6000", "PCI", 2471 "Obsolete, Unsupported Fibre Channel Adapter"}; 2472 break; 2473 case PCI_DEVICE_ID_SUPERFLY: 2474 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2475 m = (typeof(m)){"LP7000", "PCI", ""}; 2476 else 2477 m = (typeof(m)){"LP7000E", "PCI", ""}; 2478 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2479 break; 2480 case PCI_DEVICE_ID_DRAGONFLY: 2481 m = (typeof(m)){"LP8000", "PCI", 2482 "Obsolete, Unsupported Fibre Channel Adapter"}; 2483 break; 2484 case PCI_DEVICE_ID_CENTAUR: 2485 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2486 m = (typeof(m)){"LP9002", "PCI", ""}; 2487 else 2488 m = (typeof(m)){"LP9000", "PCI", ""}; 2489 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2490 break; 2491 case PCI_DEVICE_ID_RFLY: 2492 m = (typeof(m)){"LP952", "PCI", 2493 "Obsolete, Unsupported Fibre Channel Adapter"}; 2494 break; 2495 case PCI_DEVICE_ID_PEGASUS: 2496 m = (typeof(m)){"LP9802", "PCI-X", 2497 "Obsolete, Unsupported Fibre Channel Adapter"}; 2498 break; 2499 case PCI_DEVICE_ID_THOR: 2500 m = (typeof(m)){"LP10000", "PCI-X", 2501 "Obsolete, Unsupported Fibre Channel Adapter"}; 2502 break; 2503 case PCI_DEVICE_ID_VIPER: 2504 m = (typeof(m)){"LPX1000", "PCI-X", 2505 "Obsolete, Unsupported Fibre Channel Adapter"}; 2506 break; 2507 case PCI_DEVICE_ID_PFLY: 2508 m = (typeof(m)){"LP982", "PCI-X", 2509 "Obsolete, Unsupported Fibre Channel Adapter"}; 2510 break; 2511 case PCI_DEVICE_ID_TFLY: 2512 m = (typeof(m)){"LP1050", "PCI-X", 2513 "Obsolete, Unsupported Fibre Channel Adapter"}; 2514 break; 2515 case PCI_DEVICE_ID_HELIOS: 2516 m = (typeof(m)){"LP11000", "PCI-X2", 2517 "Obsolete, Unsupported Fibre Channel Adapter"}; 2518 break; 2519 case PCI_DEVICE_ID_HELIOS_SCSP: 2520 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2521 "Obsolete, Unsupported Fibre Channel Adapter"}; 2522 break; 2523 case PCI_DEVICE_ID_HELIOS_DCSP: 2524 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2525 "Obsolete, Unsupported Fibre Channel Adapter"}; 2526 break; 2527 case PCI_DEVICE_ID_NEPTUNE: 2528 m = (typeof(m)){"LPe1000", "PCIe", 2529 "Obsolete, Unsupported Fibre Channel Adapter"}; 2530 break; 2531 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2532 m = (typeof(m)){"LPe1000-SP", "PCIe", 2533 "Obsolete, Unsupported Fibre Channel Adapter"}; 2534 break; 2535 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2536 m = (typeof(m)){"LPe1002-SP", "PCIe", 2537 "Obsolete, Unsupported Fibre Channel Adapter"}; 2538 break; 2539 case PCI_DEVICE_ID_BMID: 2540 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2541 break; 2542 case PCI_DEVICE_ID_BSMB: 2543 m = (typeof(m)){"LP111", "PCI-X2", 2544 "Obsolete, Unsupported Fibre Channel Adapter"}; 2545 break; 2546 case PCI_DEVICE_ID_ZEPHYR: 2547 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2548 break; 2549 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2550 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2551 break; 2552 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2553 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2554 GE = 1; 2555 break; 2556 case PCI_DEVICE_ID_ZMID: 2557 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2558 break; 2559 case PCI_DEVICE_ID_ZSMB: 2560 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2561 break; 2562 case PCI_DEVICE_ID_LP101: 2563 m = (typeof(m)){"LP101", "PCI-X", 2564 "Obsolete, Unsupported Fibre Channel Adapter"}; 2565 break; 2566 case PCI_DEVICE_ID_LP10000S: 2567 m = (typeof(m)){"LP10000-S", "PCI", 2568 "Obsolete, Unsupported Fibre Channel Adapter"}; 2569 break; 2570 case PCI_DEVICE_ID_LP11000S: 2571 m = (typeof(m)){"LP11000-S", "PCI-X2", 2572 "Obsolete, Unsupported Fibre Channel Adapter"}; 2573 break; 2574 case PCI_DEVICE_ID_LPE11000S: 2575 m = (typeof(m)){"LPe11000-S", "PCIe", 2576 "Obsolete, Unsupported Fibre Channel Adapter"}; 2577 break; 2578 case PCI_DEVICE_ID_SAT: 2579 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2580 break; 2581 case PCI_DEVICE_ID_SAT_MID: 2582 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2583 break; 2584 case PCI_DEVICE_ID_SAT_SMB: 2585 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2586 break; 2587 case PCI_DEVICE_ID_SAT_DCSP: 2588 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2589 break; 2590 case PCI_DEVICE_ID_SAT_SCSP: 2591 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2592 break; 2593 case PCI_DEVICE_ID_SAT_S: 2594 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2595 break; 2596 case PCI_DEVICE_ID_HORNET: 2597 m = (typeof(m)){"LP21000", "PCIe", 2598 "Obsolete, Unsupported FCoE Adapter"}; 2599 GE = 1; 2600 break; 2601 case PCI_DEVICE_ID_PROTEUS_VF: 2602 m = (typeof(m)){"LPev12000", "PCIe IOV", 2603 "Obsolete, Unsupported Fibre Channel Adapter"}; 2604 break; 2605 case PCI_DEVICE_ID_PROTEUS_PF: 2606 m = (typeof(m)){"LPev12000", "PCIe IOV", 2607 "Obsolete, Unsupported Fibre Channel Adapter"}; 2608 break; 2609 case PCI_DEVICE_ID_PROTEUS_S: 2610 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2611 "Obsolete, Unsupported Fibre Channel Adapter"}; 2612 break; 2613 case PCI_DEVICE_ID_TIGERSHARK: 2614 oneConnect = 1; 2615 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2616 break; 2617 case PCI_DEVICE_ID_TOMCAT: 2618 oneConnect = 1; 2619 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2620 break; 2621 case PCI_DEVICE_ID_FALCON: 2622 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2623 "EmulexSecure Fibre"}; 2624 break; 2625 case PCI_DEVICE_ID_BALIUS: 2626 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2627 "Obsolete, Unsupported Fibre Channel Adapter"}; 2628 break; 2629 case PCI_DEVICE_ID_LANCER_FC: 2630 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2631 break; 2632 case PCI_DEVICE_ID_LANCER_FC_VF: 2633 m = (typeof(m)){"LPe16000", "PCIe", 2634 "Obsolete, Unsupported Fibre Channel Adapter"}; 2635 break; 2636 case PCI_DEVICE_ID_LANCER_FCOE: 2637 oneConnect = 1; 2638 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2639 break; 2640 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2641 oneConnect = 1; 2642 m = (typeof(m)){"OCe15100", "PCIe", 2643 "Obsolete, Unsupported FCoE"}; 2644 break; 2645 case PCI_DEVICE_ID_LANCER_G6_FC: 2646 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2647 break; 2648 case PCI_DEVICE_ID_LANCER_G7_FC: 2649 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2650 break; 2651 case PCI_DEVICE_ID_LANCER_G7P_FC: 2652 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2653 break; 2654 case PCI_DEVICE_ID_SKYHAWK: 2655 case PCI_DEVICE_ID_SKYHAWK_VF: 2656 oneConnect = 1; 2657 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2658 break; 2659 default: 2660 m = (typeof(m)){"Unknown", "", ""}; 2661 break; 2662 } 2663 2664 if (mdp && mdp[0] == '\0') 2665 snprintf(mdp, 79,"%s", m.name); 2666 /* 2667 * oneConnect hba requires special processing, they are all initiators 2668 * and we put the port number on the end 2669 */ 2670 if (descp && descp[0] == '\0') { 2671 if (oneConnect) 2672 snprintf(descp, 255, 2673 "Emulex OneConnect %s, %s Initiator %s", 2674 m.name, m.function, 2675 phba->Port); 2676 else if (max_speed == 0) 2677 snprintf(descp, 255, 2678 "Emulex %s %s %s", 2679 m.name, m.bus, m.function); 2680 else 2681 snprintf(descp, 255, 2682 "Emulex %s %d%s %s %s", 2683 m.name, max_speed, (GE) ? "GE" : "Gb", 2684 m.bus, m.function); 2685 } 2686 } 2687 2688 /** 2689 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2690 * @phba: pointer to lpfc hba data structure. 2691 * @pring: pointer to a IOCB ring. 2692 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2693 * 2694 * This routine posts a given number of IOCBs with the associated DMA buffer 2695 * descriptors specified by the cnt argument to the given IOCB ring. 2696 * 2697 * Return codes 2698 * The number of IOCBs NOT able to be posted to the IOCB ring. 2699 **/ 2700 int 2701 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2702 { 2703 IOCB_t *icmd; 2704 struct lpfc_iocbq *iocb; 2705 struct lpfc_dmabuf *mp1, *mp2; 2706 2707 cnt += pring->missbufcnt; 2708 2709 /* While there are buffers to post */ 2710 while (cnt > 0) { 2711 /* Allocate buffer for command iocb */ 2712 iocb = lpfc_sli_get_iocbq(phba); 2713 if (iocb == NULL) { 2714 pring->missbufcnt = cnt; 2715 return cnt; 2716 } 2717 icmd = &iocb->iocb; 2718 2719 /* 2 buffers can be posted per command */ 2720 /* Allocate buffer to post */ 2721 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2722 if (mp1) 2723 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2724 if (!mp1 || !mp1->virt) { 2725 kfree(mp1); 2726 lpfc_sli_release_iocbq(phba, iocb); 2727 pring->missbufcnt = cnt; 2728 return cnt; 2729 } 2730 2731 INIT_LIST_HEAD(&mp1->list); 2732 /* Allocate buffer to post */ 2733 if (cnt > 1) { 2734 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2735 if (mp2) 2736 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2737 &mp2->phys); 2738 if (!mp2 || !mp2->virt) { 2739 kfree(mp2); 2740 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2741 kfree(mp1); 2742 lpfc_sli_release_iocbq(phba, iocb); 2743 pring->missbufcnt = cnt; 2744 return cnt; 2745 } 2746 2747 INIT_LIST_HEAD(&mp2->list); 2748 } else { 2749 mp2 = NULL; 2750 } 2751 2752 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2753 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2754 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2755 icmd->ulpBdeCount = 1; 2756 cnt--; 2757 if (mp2) { 2758 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2759 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2760 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2761 cnt--; 2762 icmd->ulpBdeCount = 2; 2763 } 2764 2765 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2766 icmd->ulpLe = 1; 2767 2768 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2769 IOCB_ERROR) { 2770 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2771 kfree(mp1); 2772 cnt++; 2773 if (mp2) { 2774 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2775 kfree(mp2); 2776 cnt++; 2777 } 2778 lpfc_sli_release_iocbq(phba, iocb); 2779 pring->missbufcnt = cnt; 2780 return cnt; 2781 } 2782 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2783 if (mp2) 2784 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2785 } 2786 pring->missbufcnt = 0; 2787 return 0; 2788 } 2789 2790 /** 2791 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2792 * @phba: pointer to lpfc hba data structure. 2793 * 2794 * This routine posts initial receive IOCB buffers to the ELS ring. The 2795 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2796 * set to 64 IOCBs. SLI3 only. 2797 * 2798 * Return codes 2799 * 0 - success (currently always success) 2800 **/ 2801 static int 2802 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2803 { 2804 struct lpfc_sli *psli = &phba->sli; 2805 2806 /* Ring 0, ELS / CT buffers */ 2807 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2808 /* Ring 2 - FCP no buffers needed */ 2809 2810 return 0; 2811 } 2812 2813 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2814 2815 /** 2816 * lpfc_sha_init - Set up initial array of hash table entries 2817 * @HashResultPointer: pointer to an array as hash table. 2818 * 2819 * This routine sets up the initial values to the array of hash table entries 2820 * for the LC HBAs. 2821 **/ 2822 static void 2823 lpfc_sha_init(uint32_t * HashResultPointer) 2824 { 2825 HashResultPointer[0] = 0x67452301; 2826 HashResultPointer[1] = 0xEFCDAB89; 2827 HashResultPointer[2] = 0x98BADCFE; 2828 HashResultPointer[3] = 0x10325476; 2829 HashResultPointer[4] = 0xC3D2E1F0; 2830 } 2831 2832 /** 2833 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2834 * @HashResultPointer: pointer to an initial/result hash table. 2835 * @HashWorkingPointer: pointer to an working hash table. 2836 * 2837 * This routine iterates an initial hash table pointed by @HashResultPointer 2838 * with the values from the working hash table pointeed by @HashWorkingPointer. 2839 * The results are putting back to the initial hash table, returned through 2840 * the @HashResultPointer as the result hash table. 2841 **/ 2842 static void 2843 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2844 { 2845 int t; 2846 uint32_t TEMP; 2847 uint32_t A, B, C, D, E; 2848 t = 16; 2849 do { 2850 HashWorkingPointer[t] = 2851 S(1, 2852 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2853 8] ^ 2854 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2855 } while (++t <= 79); 2856 t = 0; 2857 A = HashResultPointer[0]; 2858 B = HashResultPointer[1]; 2859 C = HashResultPointer[2]; 2860 D = HashResultPointer[3]; 2861 E = HashResultPointer[4]; 2862 2863 do { 2864 if (t < 20) { 2865 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2866 } else if (t < 40) { 2867 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2868 } else if (t < 60) { 2869 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2870 } else { 2871 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2872 } 2873 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2874 E = D; 2875 D = C; 2876 C = S(30, B); 2877 B = A; 2878 A = TEMP; 2879 } while (++t <= 79); 2880 2881 HashResultPointer[0] += A; 2882 HashResultPointer[1] += B; 2883 HashResultPointer[2] += C; 2884 HashResultPointer[3] += D; 2885 HashResultPointer[4] += E; 2886 2887 } 2888 2889 /** 2890 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2891 * @RandomChallenge: pointer to the entry of host challenge random number array. 2892 * @HashWorking: pointer to the entry of the working hash array. 2893 * 2894 * This routine calculates the working hash array referred by @HashWorking 2895 * from the challenge random numbers associated with the host, referred by 2896 * @RandomChallenge. The result is put into the entry of the working hash 2897 * array and returned by reference through @HashWorking. 2898 **/ 2899 static void 2900 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2901 { 2902 *HashWorking = (*RandomChallenge ^ *HashWorking); 2903 } 2904 2905 /** 2906 * lpfc_hba_init - Perform special handling for LC HBA initialization 2907 * @phba: pointer to lpfc hba data structure. 2908 * @hbainit: pointer to an array of unsigned 32-bit integers. 2909 * 2910 * This routine performs the special handling for LC HBA initialization. 2911 **/ 2912 void 2913 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2914 { 2915 int t; 2916 uint32_t *HashWorking; 2917 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2918 2919 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2920 if (!HashWorking) 2921 return; 2922 2923 HashWorking[0] = HashWorking[78] = *pwwnn++; 2924 HashWorking[1] = HashWorking[79] = *pwwnn; 2925 2926 for (t = 0; t < 7; t++) 2927 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2928 2929 lpfc_sha_init(hbainit); 2930 lpfc_sha_iterate(hbainit, HashWorking); 2931 kfree(HashWorking); 2932 } 2933 2934 /** 2935 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2936 * @vport: pointer to a virtual N_Port data structure. 2937 * 2938 * This routine performs the necessary cleanups before deleting the @vport. 2939 * It invokes the discovery state machine to perform necessary state 2940 * transitions and to release the ndlps associated with the @vport. Note, 2941 * the physical port is treated as @vport 0. 2942 **/ 2943 void 2944 lpfc_cleanup(struct lpfc_vport *vport) 2945 { 2946 struct lpfc_hba *phba = vport->phba; 2947 struct lpfc_nodelist *ndlp, *next_ndlp; 2948 int i = 0; 2949 2950 if (phba->link_state > LPFC_LINK_DOWN) 2951 lpfc_port_link_failure(vport); 2952 2953 /* Clean up VMID resources */ 2954 if (lpfc_is_vmid_enabled(phba)) 2955 lpfc_vmid_vport_cleanup(vport); 2956 2957 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2958 if (vport->port_type != LPFC_PHYSICAL_PORT && 2959 ndlp->nlp_DID == Fabric_DID) { 2960 /* Just free up ndlp with Fabric_DID for vports */ 2961 lpfc_nlp_put(ndlp); 2962 continue; 2963 } 2964 2965 if (ndlp->nlp_DID == Fabric_Cntl_DID && 2966 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2967 lpfc_nlp_put(ndlp); 2968 continue; 2969 } 2970 2971 /* Fabric Ports not in UNMAPPED state are cleaned up in the 2972 * DEVICE_RM event. 2973 */ 2974 if (ndlp->nlp_type & NLP_FABRIC && 2975 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 2976 lpfc_disc_state_machine(vport, ndlp, NULL, 2977 NLP_EVT_DEVICE_RECOVERY); 2978 2979 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 2980 lpfc_disc_state_machine(vport, ndlp, NULL, 2981 NLP_EVT_DEVICE_RM); 2982 } 2983 2984 /* This is a special case flush to return all 2985 * IOs before entering this loop. There are 2986 * two points in the code where a flush is 2987 * avoided if the FC_UNLOADING flag is set. 2988 * one is in the multipool destroy, 2989 * (this prevents a crash) and the other is 2990 * in the nvme abort handler, ( also prevents 2991 * a crash). Both of these exceptions are 2992 * cases where the slot is still accessible. 2993 * The flush here is only when the pci slot 2994 * is offline. 2995 */ 2996 if (vport->load_flag & FC_UNLOADING && 2997 pci_channel_offline(phba->pcidev)) 2998 lpfc_sli_flush_io_rings(vport->phba); 2999 3000 /* At this point, ALL ndlp's should be gone 3001 * because of the previous NLP_EVT_DEVICE_RM. 3002 * Lets wait for this to happen, if needed. 3003 */ 3004 while (!list_empty(&vport->fc_nodes)) { 3005 if (i++ > 3000) { 3006 lpfc_printf_vlog(vport, KERN_ERR, 3007 LOG_TRACE_EVENT, 3008 "0233 Nodelist not empty\n"); 3009 list_for_each_entry_safe(ndlp, next_ndlp, 3010 &vport->fc_nodes, nlp_listp) { 3011 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 3012 LOG_DISCOVERY, 3013 "0282 did:x%x ndlp:x%px " 3014 "refcnt:%d xflags x%x nflag x%x\n", 3015 ndlp->nlp_DID, (void *)ndlp, 3016 kref_read(&ndlp->kref), 3017 ndlp->fc4_xpt_flags, 3018 ndlp->nlp_flag); 3019 } 3020 break; 3021 } 3022 3023 /* Wait for any activity on ndlps to settle */ 3024 msleep(10); 3025 } 3026 lpfc_cleanup_vports_rrqs(vport, NULL); 3027 } 3028 3029 /** 3030 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3031 * @vport: pointer to a virtual N_Port data structure. 3032 * 3033 * This routine stops all the timers associated with a @vport. This function 3034 * is invoked before disabling or deleting a @vport. Note that the physical 3035 * port is treated as @vport 0. 3036 **/ 3037 void 3038 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3039 { 3040 del_timer_sync(&vport->els_tmofunc); 3041 del_timer_sync(&vport->delayed_disc_tmo); 3042 lpfc_can_disctmo(vport); 3043 return; 3044 } 3045 3046 /** 3047 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3048 * @phba: pointer to lpfc hba data structure. 3049 * 3050 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3051 * caller of this routine should already hold the host lock. 3052 **/ 3053 void 3054 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3055 { 3056 /* Clear pending FCF rediscovery wait flag */ 3057 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3058 3059 /* Now, try to stop the timer */ 3060 del_timer(&phba->fcf.redisc_wait); 3061 } 3062 3063 /** 3064 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3065 * @phba: pointer to lpfc hba data structure. 3066 * 3067 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3068 * checks whether the FCF rediscovery wait timer is pending with the host 3069 * lock held before proceeding with disabling the timer and clearing the 3070 * wait timer pendig flag. 3071 **/ 3072 void 3073 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3074 { 3075 spin_lock_irq(&phba->hbalock); 3076 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3077 /* FCF rediscovery timer already fired or stopped */ 3078 spin_unlock_irq(&phba->hbalock); 3079 return; 3080 } 3081 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3082 /* Clear failover in progress flags */ 3083 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3084 spin_unlock_irq(&phba->hbalock); 3085 } 3086 3087 /** 3088 * lpfc_cmf_stop - Stop CMF processing 3089 * @phba: pointer to lpfc hba data structure. 3090 * 3091 * This is called when the link goes down or if CMF mode is turned OFF. 3092 * It is also called when going offline or unloaded just before the 3093 * congestion info buffer is unregistered. 3094 **/ 3095 void 3096 lpfc_cmf_stop(struct lpfc_hba *phba) 3097 { 3098 int cpu; 3099 struct lpfc_cgn_stat *cgs; 3100 3101 /* We only do something if CMF is enabled */ 3102 if (!phba->sli4_hba.pc_sli4_params.cmf) 3103 return; 3104 3105 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3106 "6221 Stop CMF / Cancel Timer\n"); 3107 3108 /* Cancel the CMF timer */ 3109 hrtimer_cancel(&phba->cmf_timer); 3110 3111 /* Zero CMF counters */ 3112 atomic_set(&phba->cmf_busy, 0); 3113 for_each_present_cpu(cpu) { 3114 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3115 atomic64_set(&cgs->total_bytes, 0); 3116 atomic64_set(&cgs->rcv_bytes, 0); 3117 atomic_set(&cgs->rx_io_cnt, 0); 3118 atomic64_set(&cgs->rx_latency, 0); 3119 } 3120 atomic_set(&phba->cmf_bw_wait, 0); 3121 3122 /* Resume any blocked IO - Queue unblock on workqueue */ 3123 queue_work(phba->wq, &phba->unblock_request_work); 3124 } 3125 3126 static inline uint64_t 3127 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3128 { 3129 uint64_t rate = lpfc_sli_port_speed_get(phba); 3130 3131 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3132 } 3133 3134 void 3135 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3136 { 3137 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3138 "6223 Signal CMF init\n"); 3139 3140 /* Use the new fc_linkspeed to recalculate */ 3141 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3142 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3143 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3144 phba->cmf_interval_rate, 1000); 3145 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3146 3147 /* This is a signal to firmware to sync up CMF BW with link speed */ 3148 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3149 } 3150 3151 /** 3152 * lpfc_cmf_start - Start CMF processing 3153 * @phba: pointer to lpfc hba data structure. 3154 * 3155 * This is called when the link comes up or if CMF mode is turned OFF 3156 * to Monitor or Managed. 3157 **/ 3158 void 3159 lpfc_cmf_start(struct lpfc_hba *phba) 3160 { 3161 struct lpfc_cgn_stat *cgs; 3162 int cpu; 3163 3164 /* We only do something if CMF is enabled */ 3165 if (!phba->sli4_hba.pc_sli4_params.cmf || 3166 phba->cmf_active_mode == LPFC_CFG_OFF) 3167 return; 3168 3169 /* Reinitialize congestion buffer info */ 3170 lpfc_init_congestion_buf(phba); 3171 3172 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3173 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3174 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3175 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3176 3177 atomic_set(&phba->cmf_busy, 0); 3178 for_each_present_cpu(cpu) { 3179 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3180 atomic64_set(&cgs->total_bytes, 0); 3181 atomic64_set(&cgs->rcv_bytes, 0); 3182 atomic_set(&cgs->rx_io_cnt, 0); 3183 atomic64_set(&cgs->rx_latency, 0); 3184 } 3185 phba->cmf_latency.tv_sec = 0; 3186 phba->cmf_latency.tv_nsec = 0; 3187 3188 lpfc_cmf_signal_init(phba); 3189 3190 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3191 "6222 Start CMF / Timer\n"); 3192 3193 phba->cmf_timer_cnt = 0; 3194 hrtimer_start(&phba->cmf_timer, 3195 ktime_set(0, LPFC_CMF_INTERVAL * 1000000), 3196 HRTIMER_MODE_REL); 3197 /* Setup for latency check in IO cmpl routines */ 3198 ktime_get_real_ts64(&phba->cmf_latency); 3199 3200 atomic_set(&phba->cmf_bw_wait, 0); 3201 atomic_set(&phba->cmf_stop_io, 0); 3202 } 3203 3204 /** 3205 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3206 * @phba: pointer to lpfc hba data structure. 3207 * 3208 * This routine stops all the timers associated with a HBA. This function is 3209 * invoked before either putting a HBA offline or unloading the driver. 3210 **/ 3211 void 3212 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3213 { 3214 if (phba->pport) 3215 lpfc_stop_vport_timers(phba->pport); 3216 cancel_delayed_work_sync(&phba->eq_delay_work); 3217 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3218 del_timer_sync(&phba->sli.mbox_tmo); 3219 del_timer_sync(&phba->fabric_block_timer); 3220 del_timer_sync(&phba->eratt_poll); 3221 del_timer_sync(&phba->hb_tmofunc); 3222 if (phba->sli_rev == LPFC_SLI_REV4) { 3223 del_timer_sync(&phba->rrq_tmr); 3224 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3225 } 3226 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3227 3228 switch (phba->pci_dev_grp) { 3229 case LPFC_PCI_DEV_LP: 3230 /* Stop any LightPulse device specific driver timers */ 3231 del_timer_sync(&phba->fcp_poll_timer); 3232 break; 3233 case LPFC_PCI_DEV_OC: 3234 /* Stop any OneConnect device specific driver timers */ 3235 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3236 break; 3237 default: 3238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3239 "0297 Invalid device group (x%x)\n", 3240 phba->pci_dev_grp); 3241 break; 3242 } 3243 return; 3244 } 3245 3246 /** 3247 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3248 * @phba: pointer to lpfc hba data structure. 3249 * @mbx_action: flag for mailbox no wait action. 3250 * 3251 * This routine marks a HBA's management interface as blocked. Once the HBA's 3252 * management interface is marked as blocked, all the user space access to 3253 * the HBA, whether they are from sysfs interface or libdfc interface will 3254 * all be blocked. The HBA is set to block the management interface when the 3255 * driver prepares the HBA interface for online or offline. 3256 **/ 3257 static void 3258 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3259 { 3260 unsigned long iflag; 3261 uint8_t actcmd = MBX_HEARTBEAT; 3262 unsigned long timeout; 3263 3264 spin_lock_irqsave(&phba->hbalock, iflag); 3265 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3266 spin_unlock_irqrestore(&phba->hbalock, iflag); 3267 if (mbx_action == LPFC_MBX_NO_WAIT) 3268 return; 3269 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3270 spin_lock_irqsave(&phba->hbalock, iflag); 3271 if (phba->sli.mbox_active) { 3272 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3273 /* Determine how long we might wait for the active mailbox 3274 * command to be gracefully completed by firmware. 3275 */ 3276 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3277 phba->sli.mbox_active) * 1000) + jiffies; 3278 } 3279 spin_unlock_irqrestore(&phba->hbalock, iflag); 3280 3281 /* Wait for the outstnading mailbox command to complete */ 3282 while (phba->sli.mbox_active) { 3283 /* Check active mailbox complete status every 2ms */ 3284 msleep(2); 3285 if (time_after(jiffies, timeout)) { 3286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3287 "2813 Mgmt IO is Blocked %x " 3288 "- mbox cmd %x still active\n", 3289 phba->sli.sli_flag, actcmd); 3290 break; 3291 } 3292 } 3293 } 3294 3295 /** 3296 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3297 * @phba: pointer to lpfc hba data structure. 3298 * 3299 * Allocate RPIs for all active remote nodes. This is needed whenever 3300 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3301 * is to fixup the temporary rpi assignments. 3302 **/ 3303 void 3304 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3305 { 3306 struct lpfc_nodelist *ndlp, *next_ndlp; 3307 struct lpfc_vport **vports; 3308 int i, rpi; 3309 3310 if (phba->sli_rev != LPFC_SLI_REV4) 3311 return; 3312 3313 vports = lpfc_create_vport_work_array(phba); 3314 if (vports == NULL) 3315 return; 3316 3317 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3318 if (vports[i]->load_flag & FC_UNLOADING) 3319 continue; 3320 3321 list_for_each_entry_safe(ndlp, next_ndlp, 3322 &vports[i]->fc_nodes, 3323 nlp_listp) { 3324 rpi = lpfc_sli4_alloc_rpi(phba); 3325 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3326 /* TODO print log? */ 3327 continue; 3328 } 3329 ndlp->nlp_rpi = rpi; 3330 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3331 LOG_NODE | LOG_DISCOVERY, 3332 "0009 Assign RPI x%x to ndlp x%px " 3333 "DID:x%06x flg:x%x\n", 3334 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3335 ndlp->nlp_flag); 3336 } 3337 } 3338 lpfc_destroy_vport_work_array(phba, vports); 3339 } 3340 3341 /** 3342 * lpfc_create_expedite_pool - create expedite pool 3343 * @phba: pointer to lpfc hba data structure. 3344 * 3345 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3346 * to expedite pool. Mark them as expedite. 3347 **/ 3348 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3349 { 3350 struct lpfc_sli4_hdw_queue *qp; 3351 struct lpfc_io_buf *lpfc_ncmd; 3352 struct lpfc_io_buf *lpfc_ncmd_next; 3353 struct lpfc_epd_pool *epd_pool; 3354 unsigned long iflag; 3355 3356 epd_pool = &phba->epd_pool; 3357 qp = &phba->sli4_hba.hdwq[0]; 3358 3359 spin_lock_init(&epd_pool->lock); 3360 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3361 spin_lock(&epd_pool->lock); 3362 INIT_LIST_HEAD(&epd_pool->list); 3363 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3364 &qp->lpfc_io_buf_list_put, list) { 3365 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3366 lpfc_ncmd->expedite = true; 3367 qp->put_io_bufs--; 3368 epd_pool->count++; 3369 if (epd_pool->count >= XRI_BATCH) 3370 break; 3371 } 3372 spin_unlock(&epd_pool->lock); 3373 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3374 } 3375 3376 /** 3377 * lpfc_destroy_expedite_pool - destroy expedite pool 3378 * @phba: pointer to lpfc hba data structure. 3379 * 3380 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3381 * of HWQ 0. Clear the mark. 3382 **/ 3383 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3384 { 3385 struct lpfc_sli4_hdw_queue *qp; 3386 struct lpfc_io_buf *lpfc_ncmd; 3387 struct lpfc_io_buf *lpfc_ncmd_next; 3388 struct lpfc_epd_pool *epd_pool; 3389 unsigned long iflag; 3390 3391 epd_pool = &phba->epd_pool; 3392 qp = &phba->sli4_hba.hdwq[0]; 3393 3394 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3395 spin_lock(&epd_pool->lock); 3396 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3397 &epd_pool->list, list) { 3398 list_move_tail(&lpfc_ncmd->list, 3399 &qp->lpfc_io_buf_list_put); 3400 lpfc_ncmd->flags = false; 3401 qp->put_io_bufs++; 3402 epd_pool->count--; 3403 } 3404 spin_unlock(&epd_pool->lock); 3405 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3406 } 3407 3408 /** 3409 * lpfc_create_multixri_pools - create multi-XRI pools 3410 * @phba: pointer to lpfc hba data structure. 3411 * 3412 * This routine initialize public, private per HWQ. Then, move XRIs from 3413 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3414 * Initialized. 3415 **/ 3416 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3417 { 3418 u32 i, j; 3419 u32 hwq_count; 3420 u32 count_per_hwq; 3421 struct lpfc_io_buf *lpfc_ncmd; 3422 struct lpfc_io_buf *lpfc_ncmd_next; 3423 unsigned long iflag; 3424 struct lpfc_sli4_hdw_queue *qp; 3425 struct lpfc_multixri_pool *multixri_pool; 3426 struct lpfc_pbl_pool *pbl_pool; 3427 struct lpfc_pvt_pool *pvt_pool; 3428 3429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3430 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3431 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3432 phba->sli4_hba.io_xri_cnt); 3433 3434 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3435 lpfc_create_expedite_pool(phba); 3436 3437 hwq_count = phba->cfg_hdw_queue; 3438 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3439 3440 for (i = 0; i < hwq_count; i++) { 3441 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3442 3443 if (!multixri_pool) { 3444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3445 "1238 Failed to allocate memory for " 3446 "multixri_pool\n"); 3447 3448 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3449 lpfc_destroy_expedite_pool(phba); 3450 3451 j = 0; 3452 while (j < i) { 3453 qp = &phba->sli4_hba.hdwq[j]; 3454 kfree(qp->p_multixri_pool); 3455 j++; 3456 } 3457 phba->cfg_xri_rebalancing = 0; 3458 return; 3459 } 3460 3461 qp = &phba->sli4_hba.hdwq[i]; 3462 qp->p_multixri_pool = multixri_pool; 3463 3464 multixri_pool->xri_limit = count_per_hwq; 3465 multixri_pool->rrb_next_hwqid = i; 3466 3467 /* Deal with public free xri pool */ 3468 pbl_pool = &multixri_pool->pbl_pool; 3469 spin_lock_init(&pbl_pool->lock); 3470 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3471 spin_lock(&pbl_pool->lock); 3472 INIT_LIST_HEAD(&pbl_pool->list); 3473 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3474 &qp->lpfc_io_buf_list_put, list) { 3475 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3476 qp->put_io_bufs--; 3477 pbl_pool->count++; 3478 } 3479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3480 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3481 pbl_pool->count, i); 3482 spin_unlock(&pbl_pool->lock); 3483 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3484 3485 /* Deal with private free xri pool */ 3486 pvt_pool = &multixri_pool->pvt_pool; 3487 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3488 pvt_pool->low_watermark = XRI_BATCH; 3489 spin_lock_init(&pvt_pool->lock); 3490 spin_lock_irqsave(&pvt_pool->lock, iflag); 3491 INIT_LIST_HEAD(&pvt_pool->list); 3492 pvt_pool->count = 0; 3493 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3494 } 3495 } 3496 3497 /** 3498 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3499 * @phba: pointer to lpfc hba data structure. 3500 * 3501 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3502 **/ 3503 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3504 { 3505 u32 i; 3506 u32 hwq_count; 3507 struct lpfc_io_buf *lpfc_ncmd; 3508 struct lpfc_io_buf *lpfc_ncmd_next; 3509 unsigned long iflag; 3510 struct lpfc_sli4_hdw_queue *qp; 3511 struct lpfc_multixri_pool *multixri_pool; 3512 struct lpfc_pbl_pool *pbl_pool; 3513 struct lpfc_pvt_pool *pvt_pool; 3514 3515 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3516 lpfc_destroy_expedite_pool(phba); 3517 3518 if (!(phba->pport->load_flag & FC_UNLOADING)) 3519 lpfc_sli_flush_io_rings(phba); 3520 3521 hwq_count = phba->cfg_hdw_queue; 3522 3523 for (i = 0; i < hwq_count; i++) { 3524 qp = &phba->sli4_hba.hdwq[i]; 3525 multixri_pool = qp->p_multixri_pool; 3526 if (!multixri_pool) 3527 continue; 3528 3529 qp->p_multixri_pool = NULL; 3530 3531 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3532 3533 /* Deal with public free xri pool */ 3534 pbl_pool = &multixri_pool->pbl_pool; 3535 spin_lock(&pbl_pool->lock); 3536 3537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3538 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3539 pbl_pool->count, i); 3540 3541 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3542 &pbl_pool->list, list) { 3543 list_move_tail(&lpfc_ncmd->list, 3544 &qp->lpfc_io_buf_list_put); 3545 qp->put_io_bufs++; 3546 pbl_pool->count--; 3547 } 3548 3549 INIT_LIST_HEAD(&pbl_pool->list); 3550 pbl_pool->count = 0; 3551 3552 spin_unlock(&pbl_pool->lock); 3553 3554 /* Deal with private free xri pool */ 3555 pvt_pool = &multixri_pool->pvt_pool; 3556 spin_lock(&pvt_pool->lock); 3557 3558 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3559 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3560 pvt_pool->count, i); 3561 3562 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3563 &pvt_pool->list, list) { 3564 list_move_tail(&lpfc_ncmd->list, 3565 &qp->lpfc_io_buf_list_put); 3566 qp->put_io_bufs++; 3567 pvt_pool->count--; 3568 } 3569 3570 INIT_LIST_HEAD(&pvt_pool->list); 3571 pvt_pool->count = 0; 3572 3573 spin_unlock(&pvt_pool->lock); 3574 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3575 3576 kfree(multixri_pool); 3577 } 3578 } 3579 3580 /** 3581 * lpfc_online - Initialize and bring a HBA online 3582 * @phba: pointer to lpfc hba data structure. 3583 * 3584 * This routine initializes the HBA and brings a HBA online. During this 3585 * process, the management interface is blocked to prevent user space access 3586 * to the HBA interfering with the driver initialization. 3587 * 3588 * Return codes 3589 * 0 - successful 3590 * 1 - failed 3591 **/ 3592 int 3593 lpfc_online(struct lpfc_hba *phba) 3594 { 3595 struct lpfc_vport *vport; 3596 struct lpfc_vport **vports; 3597 int i, error = 0; 3598 bool vpis_cleared = false; 3599 3600 if (!phba) 3601 return 0; 3602 vport = phba->pport; 3603 3604 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3605 return 0; 3606 3607 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3608 "0458 Bring Adapter online\n"); 3609 3610 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3611 3612 if (phba->sli_rev == LPFC_SLI_REV4) { 3613 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3614 lpfc_unblock_mgmt_io(phba); 3615 return 1; 3616 } 3617 spin_lock_irq(&phba->hbalock); 3618 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3619 vpis_cleared = true; 3620 spin_unlock_irq(&phba->hbalock); 3621 3622 /* Reestablish the local initiator port. 3623 * The offline process destroyed the previous lport. 3624 */ 3625 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3626 !phba->nvmet_support) { 3627 error = lpfc_nvme_create_localport(phba->pport); 3628 if (error) 3629 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3630 "6132 NVME restore reg failed " 3631 "on nvmei error x%x\n", error); 3632 } 3633 } else { 3634 lpfc_sli_queue_init(phba); 3635 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3636 lpfc_unblock_mgmt_io(phba); 3637 return 1; 3638 } 3639 } 3640 3641 vports = lpfc_create_vport_work_array(phba); 3642 if (vports != NULL) { 3643 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3644 struct Scsi_Host *shost; 3645 shost = lpfc_shost_from_vport(vports[i]); 3646 spin_lock_irq(shost->host_lock); 3647 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3648 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3649 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3650 if (phba->sli_rev == LPFC_SLI_REV4) { 3651 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3652 if ((vpis_cleared) && 3653 (vports[i]->port_type != 3654 LPFC_PHYSICAL_PORT)) 3655 vports[i]->vpi = 0; 3656 } 3657 spin_unlock_irq(shost->host_lock); 3658 } 3659 } 3660 lpfc_destroy_vport_work_array(phba, vports); 3661 3662 if (phba->cfg_xri_rebalancing) 3663 lpfc_create_multixri_pools(phba); 3664 3665 lpfc_cpuhp_add(phba); 3666 3667 lpfc_unblock_mgmt_io(phba); 3668 return 0; 3669 } 3670 3671 /** 3672 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3673 * @phba: pointer to lpfc hba data structure. 3674 * 3675 * This routine marks a HBA's management interface as not blocked. Once the 3676 * HBA's management interface is marked as not blocked, all the user space 3677 * access to the HBA, whether they are from sysfs interface or libdfc 3678 * interface will be allowed. The HBA is set to block the management interface 3679 * when the driver prepares the HBA interface for online or offline and then 3680 * set to unblock the management interface afterwards. 3681 **/ 3682 void 3683 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3684 { 3685 unsigned long iflag; 3686 3687 spin_lock_irqsave(&phba->hbalock, iflag); 3688 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3689 spin_unlock_irqrestore(&phba->hbalock, iflag); 3690 } 3691 3692 /** 3693 * lpfc_offline_prep - Prepare a HBA to be brought offline 3694 * @phba: pointer to lpfc hba data structure. 3695 * @mbx_action: flag for mailbox shutdown action. 3696 * 3697 * This routine is invoked to prepare a HBA to be brought offline. It performs 3698 * unregistration login to all the nodes on all vports and flushes the mailbox 3699 * queue to make it ready to be brought offline. 3700 **/ 3701 void 3702 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3703 { 3704 struct lpfc_vport *vport = phba->pport; 3705 struct lpfc_nodelist *ndlp, *next_ndlp; 3706 struct lpfc_vport **vports; 3707 struct Scsi_Host *shost; 3708 int i; 3709 int offline; 3710 bool hba_pci_err; 3711 3712 if (vport->fc_flag & FC_OFFLINE_MODE) 3713 return; 3714 3715 lpfc_block_mgmt_io(phba, mbx_action); 3716 3717 lpfc_linkdown(phba); 3718 3719 offline = pci_channel_offline(phba->pcidev); 3720 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 3721 3722 /* Issue an unreg_login to all nodes on all vports */ 3723 vports = lpfc_create_vport_work_array(phba); 3724 if (vports != NULL) { 3725 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3726 if (vports[i]->load_flag & FC_UNLOADING) 3727 continue; 3728 shost = lpfc_shost_from_vport(vports[i]); 3729 spin_lock_irq(shost->host_lock); 3730 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3731 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3732 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3733 spin_unlock_irq(shost->host_lock); 3734 3735 shost = lpfc_shost_from_vport(vports[i]); 3736 list_for_each_entry_safe(ndlp, next_ndlp, 3737 &vports[i]->fc_nodes, 3738 nlp_listp) { 3739 3740 spin_lock_irq(&ndlp->lock); 3741 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3742 spin_unlock_irq(&ndlp->lock); 3743 3744 if (offline || hba_pci_err) { 3745 spin_lock_irq(&ndlp->lock); 3746 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3747 NLP_RPI_REGISTERED); 3748 spin_unlock_irq(&ndlp->lock); 3749 if (phba->sli_rev == LPFC_SLI_REV4) 3750 lpfc_sli_rpi_release(vports[i], 3751 ndlp); 3752 } else { 3753 lpfc_unreg_rpi(vports[i], ndlp); 3754 } 3755 /* 3756 * Whenever an SLI4 port goes offline, free the 3757 * RPI. Get a new RPI when the adapter port 3758 * comes back online. 3759 */ 3760 if (phba->sli_rev == LPFC_SLI_REV4) { 3761 lpfc_printf_vlog(vports[i], KERN_INFO, 3762 LOG_NODE | LOG_DISCOVERY, 3763 "0011 Free RPI x%x on " 3764 "ndlp: x%px did x%x\n", 3765 ndlp->nlp_rpi, ndlp, 3766 ndlp->nlp_DID); 3767 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3768 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3769 } 3770 3771 if (ndlp->nlp_type & NLP_FABRIC) { 3772 lpfc_disc_state_machine(vports[i], ndlp, 3773 NULL, NLP_EVT_DEVICE_RECOVERY); 3774 3775 /* Don't remove the node unless the node 3776 * has been unregistered with the 3777 * transport, and we're not in recovery 3778 * before dev_loss_tmo triggered. 3779 * Otherwise, let dev_loss take care of 3780 * the node. 3781 */ 3782 if (!(ndlp->save_flags & 3783 NLP_IN_RECOV_POST_DEV_LOSS) && 3784 !(ndlp->fc4_xpt_flags & 3785 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3786 lpfc_disc_state_machine 3787 (vports[i], ndlp, 3788 NULL, 3789 NLP_EVT_DEVICE_RM); 3790 } 3791 } 3792 } 3793 } 3794 lpfc_destroy_vport_work_array(phba, vports); 3795 3796 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3797 3798 if (phba->wq) 3799 flush_workqueue(phba->wq); 3800 } 3801 3802 /** 3803 * lpfc_offline - Bring a HBA offline 3804 * @phba: pointer to lpfc hba data structure. 3805 * 3806 * This routine actually brings a HBA offline. It stops all the timers 3807 * associated with the HBA, brings down the SLI layer, and eventually 3808 * marks the HBA as in offline state for the upper layer protocol. 3809 **/ 3810 void 3811 lpfc_offline(struct lpfc_hba *phba) 3812 { 3813 struct Scsi_Host *shost; 3814 struct lpfc_vport **vports; 3815 int i; 3816 3817 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3818 return; 3819 3820 /* stop port and all timers associated with this hba */ 3821 lpfc_stop_port(phba); 3822 3823 /* Tear down the local and target port registrations. The 3824 * nvme transports need to cleanup. 3825 */ 3826 lpfc_nvmet_destroy_targetport(phba); 3827 lpfc_nvme_destroy_localport(phba->pport); 3828 3829 vports = lpfc_create_vport_work_array(phba); 3830 if (vports != NULL) 3831 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3832 lpfc_stop_vport_timers(vports[i]); 3833 lpfc_destroy_vport_work_array(phba, vports); 3834 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3835 "0460 Bring Adapter offline\n"); 3836 /* Bring down the SLI Layer and cleanup. The HBA is offline 3837 now. */ 3838 lpfc_sli_hba_down(phba); 3839 spin_lock_irq(&phba->hbalock); 3840 phba->work_ha = 0; 3841 spin_unlock_irq(&phba->hbalock); 3842 vports = lpfc_create_vport_work_array(phba); 3843 if (vports != NULL) 3844 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3845 shost = lpfc_shost_from_vport(vports[i]); 3846 spin_lock_irq(shost->host_lock); 3847 vports[i]->work_port_events = 0; 3848 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3849 spin_unlock_irq(shost->host_lock); 3850 } 3851 lpfc_destroy_vport_work_array(phba, vports); 3852 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3853 * in hba_unset 3854 */ 3855 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3856 __lpfc_cpuhp_remove(phba); 3857 3858 if (phba->cfg_xri_rebalancing) 3859 lpfc_destroy_multixri_pools(phba); 3860 } 3861 3862 /** 3863 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3864 * @phba: pointer to lpfc hba data structure. 3865 * 3866 * This routine is to free all the SCSI buffers and IOCBs from the driver 3867 * list back to kernel. It is called from lpfc_pci_remove_one to free 3868 * the internal resources before the device is removed from the system. 3869 **/ 3870 static void 3871 lpfc_scsi_free(struct lpfc_hba *phba) 3872 { 3873 struct lpfc_io_buf *sb, *sb_next; 3874 3875 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3876 return; 3877 3878 spin_lock_irq(&phba->hbalock); 3879 3880 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3881 3882 spin_lock(&phba->scsi_buf_list_put_lock); 3883 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3884 list) { 3885 list_del(&sb->list); 3886 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3887 sb->dma_handle); 3888 kfree(sb); 3889 phba->total_scsi_bufs--; 3890 } 3891 spin_unlock(&phba->scsi_buf_list_put_lock); 3892 3893 spin_lock(&phba->scsi_buf_list_get_lock); 3894 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3895 list) { 3896 list_del(&sb->list); 3897 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3898 sb->dma_handle); 3899 kfree(sb); 3900 phba->total_scsi_bufs--; 3901 } 3902 spin_unlock(&phba->scsi_buf_list_get_lock); 3903 spin_unlock_irq(&phba->hbalock); 3904 } 3905 3906 /** 3907 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3908 * @phba: pointer to lpfc hba data structure. 3909 * 3910 * This routine is to free all the IO buffers and IOCBs from the driver 3911 * list back to kernel. It is called from lpfc_pci_remove_one to free 3912 * the internal resources before the device is removed from the system. 3913 **/ 3914 void 3915 lpfc_io_free(struct lpfc_hba *phba) 3916 { 3917 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3918 struct lpfc_sli4_hdw_queue *qp; 3919 int idx; 3920 3921 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3922 qp = &phba->sli4_hba.hdwq[idx]; 3923 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3924 spin_lock(&qp->io_buf_list_put_lock); 3925 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3926 &qp->lpfc_io_buf_list_put, 3927 list) { 3928 list_del(&lpfc_ncmd->list); 3929 qp->put_io_bufs--; 3930 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3931 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3932 if (phba->cfg_xpsgl && !phba->nvmet_support) 3933 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3934 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3935 kfree(lpfc_ncmd); 3936 qp->total_io_bufs--; 3937 } 3938 spin_unlock(&qp->io_buf_list_put_lock); 3939 3940 spin_lock(&qp->io_buf_list_get_lock); 3941 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3942 &qp->lpfc_io_buf_list_get, 3943 list) { 3944 list_del(&lpfc_ncmd->list); 3945 qp->get_io_bufs--; 3946 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3947 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3948 if (phba->cfg_xpsgl && !phba->nvmet_support) 3949 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3950 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3951 kfree(lpfc_ncmd); 3952 qp->total_io_bufs--; 3953 } 3954 spin_unlock(&qp->io_buf_list_get_lock); 3955 } 3956 } 3957 3958 /** 3959 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3960 * @phba: pointer to lpfc hba data structure. 3961 * 3962 * This routine first calculates the sizes of the current els and allocated 3963 * scsi sgl lists, and then goes through all sgls to updates the physical 3964 * XRIs assigned due to port function reset. During port initialization, the 3965 * current els and allocated scsi sgl lists are 0s. 3966 * 3967 * Return codes 3968 * 0 - successful (for now, it always returns 0) 3969 **/ 3970 int 3971 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3972 { 3973 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3974 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3975 LIST_HEAD(els_sgl_list); 3976 int rc; 3977 3978 /* 3979 * update on pci function's els xri-sgl list 3980 */ 3981 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3982 3983 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3984 /* els xri-sgl expanded */ 3985 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3986 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3987 "3157 ELS xri-sgl count increased from " 3988 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3989 els_xri_cnt); 3990 /* allocate the additional els sgls */ 3991 for (i = 0; i < xri_cnt; i++) { 3992 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3993 GFP_KERNEL); 3994 if (sglq_entry == NULL) { 3995 lpfc_printf_log(phba, KERN_ERR, 3996 LOG_TRACE_EVENT, 3997 "2562 Failure to allocate an " 3998 "ELS sgl entry:%d\n", i); 3999 rc = -ENOMEM; 4000 goto out_free_mem; 4001 } 4002 sglq_entry->buff_type = GEN_BUFF_TYPE; 4003 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 4004 &sglq_entry->phys); 4005 if (sglq_entry->virt == NULL) { 4006 kfree(sglq_entry); 4007 lpfc_printf_log(phba, KERN_ERR, 4008 LOG_TRACE_EVENT, 4009 "2563 Failure to allocate an " 4010 "ELS mbuf:%d\n", i); 4011 rc = -ENOMEM; 4012 goto out_free_mem; 4013 } 4014 sglq_entry->sgl = sglq_entry->virt; 4015 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4016 sglq_entry->state = SGL_FREED; 4017 list_add_tail(&sglq_entry->list, &els_sgl_list); 4018 } 4019 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4020 list_splice_init(&els_sgl_list, 4021 &phba->sli4_hba.lpfc_els_sgl_list); 4022 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4023 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 4024 /* els xri-sgl shrinked */ 4025 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 4026 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4027 "3158 ELS xri-sgl count decreased from " 4028 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4029 els_xri_cnt); 4030 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4031 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4032 &els_sgl_list); 4033 /* release extra els sgls from list */ 4034 for (i = 0; i < xri_cnt; i++) { 4035 list_remove_head(&els_sgl_list, 4036 sglq_entry, struct lpfc_sglq, list); 4037 if (sglq_entry) { 4038 __lpfc_mbuf_free(phba, sglq_entry->virt, 4039 sglq_entry->phys); 4040 kfree(sglq_entry); 4041 } 4042 } 4043 list_splice_init(&els_sgl_list, 4044 &phba->sli4_hba.lpfc_els_sgl_list); 4045 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4046 } else 4047 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4048 "3163 ELS xri-sgl count unchanged: %d\n", 4049 els_xri_cnt); 4050 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4051 4052 /* update xris to els sgls on the list */ 4053 sglq_entry = NULL; 4054 sglq_entry_next = NULL; 4055 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4056 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4057 lxri = lpfc_sli4_next_xritag(phba); 4058 if (lxri == NO_XRI) { 4059 lpfc_printf_log(phba, KERN_ERR, 4060 LOG_TRACE_EVENT, 4061 "2400 Failed to allocate xri for " 4062 "ELS sgl\n"); 4063 rc = -ENOMEM; 4064 goto out_free_mem; 4065 } 4066 sglq_entry->sli4_lxritag = lxri; 4067 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4068 } 4069 return 0; 4070 4071 out_free_mem: 4072 lpfc_free_els_sgl_list(phba); 4073 return rc; 4074 } 4075 4076 /** 4077 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4078 * @phba: pointer to lpfc hba data structure. 4079 * 4080 * This routine first calculates the sizes of the current els and allocated 4081 * scsi sgl lists, and then goes through all sgls to updates the physical 4082 * XRIs assigned due to port function reset. During port initialization, the 4083 * current els and allocated scsi sgl lists are 0s. 4084 * 4085 * Return codes 4086 * 0 - successful (for now, it always returns 0) 4087 **/ 4088 int 4089 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4090 { 4091 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4092 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4093 uint16_t nvmet_xri_cnt; 4094 LIST_HEAD(nvmet_sgl_list); 4095 int rc; 4096 4097 /* 4098 * update on pci function's nvmet xri-sgl list 4099 */ 4100 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4101 4102 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4103 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4104 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4105 /* els xri-sgl expanded */ 4106 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4107 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4108 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4109 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4110 /* allocate the additional nvmet sgls */ 4111 for (i = 0; i < xri_cnt; i++) { 4112 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4113 GFP_KERNEL); 4114 if (sglq_entry == NULL) { 4115 lpfc_printf_log(phba, KERN_ERR, 4116 LOG_TRACE_EVENT, 4117 "6303 Failure to allocate an " 4118 "NVMET sgl entry:%d\n", i); 4119 rc = -ENOMEM; 4120 goto out_free_mem; 4121 } 4122 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4123 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4124 &sglq_entry->phys); 4125 if (sglq_entry->virt == NULL) { 4126 kfree(sglq_entry); 4127 lpfc_printf_log(phba, KERN_ERR, 4128 LOG_TRACE_EVENT, 4129 "6304 Failure to allocate an " 4130 "NVMET buf:%d\n", i); 4131 rc = -ENOMEM; 4132 goto out_free_mem; 4133 } 4134 sglq_entry->sgl = sglq_entry->virt; 4135 memset(sglq_entry->sgl, 0, 4136 phba->cfg_sg_dma_buf_size); 4137 sglq_entry->state = SGL_FREED; 4138 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4139 } 4140 spin_lock_irq(&phba->hbalock); 4141 spin_lock(&phba->sli4_hba.sgl_list_lock); 4142 list_splice_init(&nvmet_sgl_list, 4143 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4144 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4145 spin_unlock_irq(&phba->hbalock); 4146 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4147 /* nvmet xri-sgl shrunk */ 4148 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4149 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4150 "6305 NVMET xri-sgl count decreased from " 4151 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4152 nvmet_xri_cnt); 4153 spin_lock_irq(&phba->hbalock); 4154 spin_lock(&phba->sli4_hba.sgl_list_lock); 4155 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4156 &nvmet_sgl_list); 4157 /* release extra nvmet sgls from list */ 4158 for (i = 0; i < xri_cnt; i++) { 4159 list_remove_head(&nvmet_sgl_list, 4160 sglq_entry, struct lpfc_sglq, list); 4161 if (sglq_entry) { 4162 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4163 sglq_entry->phys); 4164 kfree(sglq_entry); 4165 } 4166 } 4167 list_splice_init(&nvmet_sgl_list, 4168 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4169 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4170 spin_unlock_irq(&phba->hbalock); 4171 } else 4172 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4173 "6306 NVMET xri-sgl count unchanged: %d\n", 4174 nvmet_xri_cnt); 4175 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4176 4177 /* update xris to nvmet sgls on the list */ 4178 sglq_entry = NULL; 4179 sglq_entry_next = NULL; 4180 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4181 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4182 lxri = lpfc_sli4_next_xritag(phba); 4183 if (lxri == NO_XRI) { 4184 lpfc_printf_log(phba, KERN_ERR, 4185 LOG_TRACE_EVENT, 4186 "6307 Failed to allocate xri for " 4187 "NVMET sgl\n"); 4188 rc = -ENOMEM; 4189 goto out_free_mem; 4190 } 4191 sglq_entry->sli4_lxritag = lxri; 4192 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4193 } 4194 return 0; 4195 4196 out_free_mem: 4197 lpfc_free_nvmet_sgl_list(phba); 4198 return rc; 4199 } 4200 4201 int 4202 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4203 { 4204 LIST_HEAD(blist); 4205 struct lpfc_sli4_hdw_queue *qp; 4206 struct lpfc_io_buf *lpfc_cmd; 4207 struct lpfc_io_buf *iobufp, *prev_iobufp; 4208 int idx, cnt, xri, inserted; 4209 4210 cnt = 0; 4211 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4212 qp = &phba->sli4_hba.hdwq[idx]; 4213 spin_lock_irq(&qp->io_buf_list_get_lock); 4214 spin_lock(&qp->io_buf_list_put_lock); 4215 4216 /* Take everything off the get and put lists */ 4217 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4218 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4219 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4220 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4221 cnt += qp->get_io_bufs + qp->put_io_bufs; 4222 qp->get_io_bufs = 0; 4223 qp->put_io_bufs = 0; 4224 qp->total_io_bufs = 0; 4225 spin_unlock(&qp->io_buf_list_put_lock); 4226 spin_unlock_irq(&qp->io_buf_list_get_lock); 4227 } 4228 4229 /* 4230 * Take IO buffers off blist and put on cbuf sorted by XRI. 4231 * This is because POST_SGL takes a sequential range of XRIs 4232 * to post to the firmware. 4233 */ 4234 for (idx = 0; idx < cnt; idx++) { 4235 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4236 if (!lpfc_cmd) 4237 return cnt; 4238 if (idx == 0) { 4239 list_add_tail(&lpfc_cmd->list, cbuf); 4240 continue; 4241 } 4242 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4243 inserted = 0; 4244 prev_iobufp = NULL; 4245 list_for_each_entry(iobufp, cbuf, list) { 4246 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4247 if (prev_iobufp) 4248 list_add(&lpfc_cmd->list, 4249 &prev_iobufp->list); 4250 else 4251 list_add(&lpfc_cmd->list, cbuf); 4252 inserted = 1; 4253 break; 4254 } 4255 prev_iobufp = iobufp; 4256 } 4257 if (!inserted) 4258 list_add_tail(&lpfc_cmd->list, cbuf); 4259 } 4260 return cnt; 4261 } 4262 4263 int 4264 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4265 { 4266 struct lpfc_sli4_hdw_queue *qp; 4267 struct lpfc_io_buf *lpfc_cmd; 4268 int idx, cnt; 4269 4270 qp = phba->sli4_hba.hdwq; 4271 cnt = 0; 4272 while (!list_empty(cbuf)) { 4273 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4274 list_remove_head(cbuf, lpfc_cmd, 4275 struct lpfc_io_buf, list); 4276 if (!lpfc_cmd) 4277 return cnt; 4278 cnt++; 4279 qp = &phba->sli4_hba.hdwq[idx]; 4280 lpfc_cmd->hdwq_no = idx; 4281 lpfc_cmd->hdwq = qp; 4282 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; 4283 spin_lock(&qp->io_buf_list_put_lock); 4284 list_add_tail(&lpfc_cmd->list, 4285 &qp->lpfc_io_buf_list_put); 4286 qp->put_io_bufs++; 4287 qp->total_io_bufs++; 4288 spin_unlock(&qp->io_buf_list_put_lock); 4289 } 4290 } 4291 return cnt; 4292 } 4293 4294 /** 4295 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4296 * @phba: pointer to lpfc hba data structure. 4297 * 4298 * This routine first calculates the sizes of the current els and allocated 4299 * scsi sgl lists, and then goes through all sgls to updates the physical 4300 * XRIs assigned due to port function reset. During port initialization, the 4301 * current els and allocated scsi sgl lists are 0s. 4302 * 4303 * Return codes 4304 * 0 - successful (for now, it always returns 0) 4305 **/ 4306 int 4307 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4308 { 4309 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4310 uint16_t i, lxri, els_xri_cnt; 4311 uint16_t io_xri_cnt, io_xri_max; 4312 LIST_HEAD(io_sgl_list); 4313 int rc, cnt; 4314 4315 /* 4316 * update on pci function's allocated nvme xri-sgl list 4317 */ 4318 4319 /* maximum number of xris available for nvme buffers */ 4320 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4321 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4322 phba->sli4_hba.io_xri_max = io_xri_max; 4323 4324 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4325 "6074 Current allocated XRI sgl count:%d, " 4326 "maximum XRI count:%d els_xri_cnt:%d\n\n", 4327 phba->sli4_hba.io_xri_cnt, 4328 phba->sli4_hba.io_xri_max, 4329 els_xri_cnt); 4330 4331 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4332 4333 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4334 /* max nvme xri shrunk below the allocated nvme buffers */ 4335 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4336 phba->sli4_hba.io_xri_max; 4337 /* release the extra allocated nvme buffers */ 4338 for (i = 0; i < io_xri_cnt; i++) { 4339 list_remove_head(&io_sgl_list, lpfc_ncmd, 4340 struct lpfc_io_buf, list); 4341 if (lpfc_ncmd) { 4342 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4343 lpfc_ncmd->data, 4344 lpfc_ncmd->dma_handle); 4345 kfree(lpfc_ncmd); 4346 } 4347 } 4348 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4349 } 4350 4351 /* update xris associated to remaining allocated nvme buffers */ 4352 lpfc_ncmd = NULL; 4353 lpfc_ncmd_next = NULL; 4354 phba->sli4_hba.io_xri_cnt = cnt; 4355 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4356 &io_sgl_list, list) { 4357 lxri = lpfc_sli4_next_xritag(phba); 4358 if (lxri == NO_XRI) { 4359 lpfc_printf_log(phba, KERN_ERR, 4360 LOG_TRACE_EVENT, 4361 "6075 Failed to allocate xri for " 4362 "nvme buffer\n"); 4363 rc = -ENOMEM; 4364 goto out_free_mem; 4365 } 4366 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4367 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4368 } 4369 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4370 return 0; 4371 4372 out_free_mem: 4373 lpfc_io_free(phba); 4374 return rc; 4375 } 4376 4377 /** 4378 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4379 * @phba: Pointer to lpfc hba data structure. 4380 * @num_to_alloc: The requested number of buffers to allocate. 4381 * 4382 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4383 * the nvme buffer contains all the necessary information needed to initiate 4384 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4385 * them on a list, it post them to the port by using SGL block post. 4386 * 4387 * Return codes: 4388 * int - number of IO buffers that were allocated and posted. 4389 * 0 = failure, less than num_to_alloc is a partial failure. 4390 **/ 4391 int 4392 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4393 { 4394 struct lpfc_io_buf *lpfc_ncmd; 4395 struct lpfc_iocbq *pwqeq; 4396 uint16_t iotag, lxri = 0; 4397 int bcnt, num_posted; 4398 LIST_HEAD(prep_nblist); 4399 LIST_HEAD(post_nblist); 4400 LIST_HEAD(nvme_nblist); 4401 4402 phba->sli4_hba.io_xri_cnt = 0; 4403 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4404 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4405 if (!lpfc_ncmd) 4406 break; 4407 /* 4408 * Get memory from the pci pool to map the virt space to 4409 * pci bus space for an I/O. The DMA buffer includes the 4410 * number of SGE's necessary to support the sg_tablesize. 4411 */ 4412 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4413 GFP_KERNEL, 4414 &lpfc_ncmd->dma_handle); 4415 if (!lpfc_ncmd->data) { 4416 kfree(lpfc_ncmd); 4417 break; 4418 } 4419 4420 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4421 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4422 } else { 4423 /* 4424 * 4K Page alignment is CRITICAL to BlockGuard, double 4425 * check to be sure. 4426 */ 4427 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4428 (((unsigned long)(lpfc_ncmd->data) & 4429 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4430 lpfc_printf_log(phba, KERN_ERR, 4431 LOG_TRACE_EVENT, 4432 "3369 Memory alignment err: " 4433 "addr=%lx\n", 4434 (unsigned long)lpfc_ncmd->data); 4435 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4436 lpfc_ncmd->data, 4437 lpfc_ncmd->dma_handle); 4438 kfree(lpfc_ncmd); 4439 break; 4440 } 4441 } 4442 4443 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4444 4445 lxri = lpfc_sli4_next_xritag(phba); 4446 if (lxri == NO_XRI) { 4447 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4448 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4449 kfree(lpfc_ncmd); 4450 break; 4451 } 4452 pwqeq = &lpfc_ncmd->cur_iocbq; 4453 4454 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4455 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4456 if (iotag == 0) { 4457 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4458 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4459 kfree(lpfc_ncmd); 4460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4461 "6121 Failed to allocate IOTAG for" 4462 " XRI:0x%x\n", lxri); 4463 lpfc_sli4_free_xri(phba, lxri); 4464 break; 4465 } 4466 pwqeq->sli4_lxritag = lxri; 4467 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4468 4469 /* Initialize local short-hand pointers. */ 4470 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4471 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4472 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; 4473 spin_lock_init(&lpfc_ncmd->buf_lock); 4474 4475 /* add the nvme buffer to a post list */ 4476 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4477 phba->sli4_hba.io_xri_cnt++; 4478 } 4479 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4480 "6114 Allocate %d out of %d requested new NVME " 4481 "buffers of size x%zu bytes\n", bcnt, num_to_alloc, 4482 sizeof(*lpfc_ncmd)); 4483 4484 4485 /* post the list of nvme buffer sgls to port if available */ 4486 if (!list_empty(&post_nblist)) 4487 num_posted = lpfc_sli4_post_io_sgl_list( 4488 phba, &post_nblist, bcnt); 4489 else 4490 num_posted = 0; 4491 4492 return num_posted; 4493 } 4494 4495 static uint64_t 4496 lpfc_get_wwpn(struct lpfc_hba *phba) 4497 { 4498 uint64_t wwn; 4499 int rc; 4500 LPFC_MBOXQ_t *mboxq; 4501 MAILBOX_t *mb; 4502 4503 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4504 GFP_KERNEL); 4505 if (!mboxq) 4506 return (uint64_t)-1; 4507 4508 /* First get WWN of HBA instance */ 4509 lpfc_read_nv(phba, mboxq); 4510 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4511 if (rc != MBX_SUCCESS) { 4512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4513 "6019 Mailbox failed , mbxCmd x%x " 4514 "READ_NV, mbxStatus x%x\n", 4515 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4516 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4517 mempool_free(mboxq, phba->mbox_mem_pool); 4518 return (uint64_t) -1; 4519 } 4520 mb = &mboxq->u.mb; 4521 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4522 /* wwn is WWPN of HBA instance */ 4523 mempool_free(mboxq, phba->mbox_mem_pool); 4524 if (phba->sli_rev == LPFC_SLI_REV4) 4525 return be64_to_cpu(wwn); 4526 else 4527 return rol64(wwn, 32); 4528 } 4529 4530 /** 4531 * lpfc_vmid_res_alloc - Allocates resources for VMID 4532 * @phba: pointer to lpfc hba data structure. 4533 * @vport: pointer to vport data structure 4534 * 4535 * This routine allocated the resources needed for the VMID. 4536 * 4537 * Return codes 4538 * 0 on Success 4539 * Non-0 on Failure 4540 */ 4541 static int 4542 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4543 { 4544 /* VMID feature is supported only on SLI4 */ 4545 if (phba->sli_rev == LPFC_SLI_REV3) { 4546 phba->cfg_vmid_app_header = 0; 4547 phba->cfg_vmid_priority_tagging = 0; 4548 } 4549 4550 if (lpfc_is_vmid_enabled(phba)) { 4551 vport->vmid = 4552 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4553 GFP_KERNEL); 4554 if (!vport->vmid) 4555 return -ENOMEM; 4556 4557 rwlock_init(&vport->vmid_lock); 4558 4559 /* Set the VMID parameters for the vport */ 4560 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4561 vport->vmid_inactivity_timeout = 4562 phba->cfg_vmid_inactivity_timeout; 4563 vport->max_vmid = phba->cfg_max_vmid; 4564 vport->cur_vmid_cnt = 0; 4565 4566 vport->vmid_priority_range = bitmap_zalloc 4567 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4568 4569 if (!vport->vmid_priority_range) { 4570 kfree(vport->vmid); 4571 return -ENOMEM; 4572 } 4573 4574 hash_init(vport->hash_table); 4575 } 4576 return 0; 4577 } 4578 4579 /** 4580 * lpfc_create_port - Create an FC port 4581 * @phba: pointer to lpfc hba data structure. 4582 * @instance: a unique integer ID to this FC port. 4583 * @dev: pointer to the device data structure. 4584 * 4585 * This routine creates a FC port for the upper layer protocol. The FC port 4586 * can be created on top of either a physical port or a virtual port provided 4587 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4588 * and associates the FC port created before adding the shost into the SCSI 4589 * layer. 4590 * 4591 * Return codes 4592 * @vport - pointer to the virtual N_Port data structure. 4593 * NULL - port create failed. 4594 **/ 4595 struct lpfc_vport * 4596 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4597 { 4598 struct lpfc_vport *vport; 4599 struct Scsi_Host *shost = NULL; 4600 struct scsi_host_template *template; 4601 int error = 0; 4602 int i; 4603 uint64_t wwn; 4604 bool use_no_reset_hba = false; 4605 int rc; 4606 4607 if (lpfc_no_hba_reset_cnt) { 4608 if (phba->sli_rev < LPFC_SLI_REV4 && 4609 dev == &phba->pcidev->dev) { 4610 /* Reset the port first */ 4611 lpfc_sli_brdrestart(phba); 4612 rc = lpfc_sli_chipset_init(phba); 4613 if (rc) 4614 return NULL; 4615 } 4616 wwn = lpfc_get_wwpn(phba); 4617 } 4618 4619 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4620 if (wwn == lpfc_no_hba_reset[i]) { 4621 lpfc_printf_log(phba, KERN_ERR, 4622 LOG_TRACE_EVENT, 4623 "6020 Setting use_no_reset port=%llx\n", 4624 wwn); 4625 use_no_reset_hba = true; 4626 break; 4627 } 4628 } 4629 4630 /* Seed template for SCSI host registration */ 4631 if (dev == &phba->pcidev->dev) { 4632 template = &phba->port_template; 4633 4634 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4635 /* Seed physical port template */ 4636 memcpy(template, &lpfc_template, sizeof(*template)); 4637 4638 if (use_no_reset_hba) 4639 /* template is for a no reset SCSI Host */ 4640 template->eh_host_reset_handler = NULL; 4641 4642 /* Template for all vports this physical port creates */ 4643 memcpy(&phba->vport_template, &lpfc_template, 4644 sizeof(*template)); 4645 phba->vport_template.shost_groups = lpfc_vport_groups; 4646 phba->vport_template.eh_bus_reset_handler = NULL; 4647 phba->vport_template.eh_host_reset_handler = NULL; 4648 phba->vport_template.vendor_id = 0; 4649 4650 /* Initialize the host templates with updated value */ 4651 if (phba->sli_rev == LPFC_SLI_REV4) { 4652 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4653 phba->vport_template.sg_tablesize = 4654 phba->cfg_scsi_seg_cnt; 4655 } else { 4656 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4657 phba->vport_template.sg_tablesize = 4658 phba->cfg_sg_seg_cnt; 4659 } 4660 4661 } else { 4662 /* NVMET is for physical port only */ 4663 memcpy(template, &lpfc_template_nvme, 4664 sizeof(*template)); 4665 } 4666 } else { 4667 template = &phba->vport_template; 4668 } 4669 4670 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4671 if (!shost) 4672 goto out; 4673 4674 vport = (struct lpfc_vport *) shost->hostdata; 4675 vport->phba = phba; 4676 vport->load_flag |= FC_LOADING; 4677 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4678 vport->fc_rscn_flush = 0; 4679 lpfc_get_vport_cfgparam(vport); 4680 4681 /* Adjust value in vport */ 4682 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4683 4684 shost->unique_id = instance; 4685 shost->max_id = LPFC_MAX_TARGET; 4686 shost->max_lun = vport->cfg_max_luns; 4687 shost->this_id = -1; 4688 shost->max_cmd_len = 16; 4689 4690 if (phba->sli_rev == LPFC_SLI_REV4) { 4691 if (!phba->cfg_fcp_mq_threshold || 4692 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4693 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4694 4695 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4696 phba->cfg_fcp_mq_threshold); 4697 4698 shost->dma_boundary = 4699 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4700 4701 if (phba->cfg_xpsgl && !phba->nvmet_support) 4702 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4703 else 4704 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4705 } else 4706 /* SLI-3 has a limited number of hardware queues (3), 4707 * thus there is only one for FCP processing. 4708 */ 4709 shost->nr_hw_queues = 1; 4710 4711 /* 4712 * Set initial can_queue value since 0 is no longer supported and 4713 * scsi_add_host will fail. This will be adjusted later based on the 4714 * max xri value determined in hba setup. 4715 */ 4716 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4717 if (dev != &phba->pcidev->dev) { 4718 shost->transportt = lpfc_vport_transport_template; 4719 vport->port_type = LPFC_NPIV_PORT; 4720 } else { 4721 shost->transportt = lpfc_transport_template; 4722 vport->port_type = LPFC_PHYSICAL_PORT; 4723 } 4724 4725 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4726 "9081 CreatePort TMPLATE type %x TBLsize %d " 4727 "SEGcnt %d/%d\n", 4728 vport->port_type, shost->sg_tablesize, 4729 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4730 4731 /* Allocate the resources for VMID */ 4732 rc = lpfc_vmid_res_alloc(phba, vport); 4733 4734 if (rc) 4735 goto out; 4736 4737 /* Initialize all internally managed lists. */ 4738 INIT_LIST_HEAD(&vport->fc_nodes); 4739 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4740 spin_lock_init(&vport->work_port_lock); 4741 4742 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4743 4744 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4745 4746 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4747 4748 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4749 lpfc_setup_bg(phba, shost); 4750 4751 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4752 if (error) 4753 goto out_put_shost; 4754 4755 spin_lock_irq(&phba->port_list_lock); 4756 list_add_tail(&vport->listentry, &phba->port_list); 4757 spin_unlock_irq(&phba->port_list_lock); 4758 return vport; 4759 4760 out_put_shost: 4761 kfree(vport->vmid); 4762 bitmap_free(vport->vmid_priority_range); 4763 scsi_host_put(shost); 4764 out: 4765 return NULL; 4766 } 4767 4768 /** 4769 * destroy_port - destroy an FC port 4770 * @vport: pointer to an lpfc virtual N_Port data structure. 4771 * 4772 * This routine destroys a FC port from the upper layer protocol. All the 4773 * resources associated with the port are released. 4774 **/ 4775 void 4776 destroy_port(struct lpfc_vport *vport) 4777 { 4778 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4779 struct lpfc_hba *phba = vport->phba; 4780 4781 lpfc_debugfs_terminate(vport); 4782 fc_remove_host(shost); 4783 scsi_remove_host(shost); 4784 4785 spin_lock_irq(&phba->port_list_lock); 4786 list_del_init(&vport->listentry); 4787 spin_unlock_irq(&phba->port_list_lock); 4788 4789 lpfc_cleanup(vport); 4790 return; 4791 } 4792 4793 /** 4794 * lpfc_get_instance - Get a unique integer ID 4795 * 4796 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4797 * uses the kernel idr facility to perform the task. 4798 * 4799 * Return codes: 4800 * instance - a unique integer ID allocated as the new instance. 4801 * -1 - lpfc get instance failed. 4802 **/ 4803 int 4804 lpfc_get_instance(void) 4805 { 4806 int ret; 4807 4808 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4809 return ret < 0 ? -1 : ret; 4810 } 4811 4812 /** 4813 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4814 * @shost: pointer to SCSI host data structure. 4815 * @time: elapsed time of the scan in jiffies. 4816 * 4817 * This routine is called by the SCSI layer with a SCSI host to determine 4818 * whether the scan host is finished. 4819 * 4820 * Note: there is no scan_start function as adapter initialization will have 4821 * asynchronously kicked off the link initialization. 4822 * 4823 * Return codes 4824 * 0 - SCSI host scan is not over yet. 4825 * 1 - SCSI host scan is over. 4826 **/ 4827 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4828 { 4829 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4830 struct lpfc_hba *phba = vport->phba; 4831 int stat = 0; 4832 4833 spin_lock_irq(shost->host_lock); 4834 4835 if (vport->load_flag & FC_UNLOADING) { 4836 stat = 1; 4837 goto finished; 4838 } 4839 if (time >= msecs_to_jiffies(30 * 1000)) { 4840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4841 "0461 Scanning longer than 30 " 4842 "seconds. Continuing initialization\n"); 4843 stat = 1; 4844 goto finished; 4845 } 4846 if (time >= msecs_to_jiffies(15 * 1000) && 4847 phba->link_state <= LPFC_LINK_DOWN) { 4848 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4849 "0465 Link down longer than 15 " 4850 "seconds. Continuing initialization\n"); 4851 stat = 1; 4852 goto finished; 4853 } 4854 4855 if (vport->port_state != LPFC_VPORT_READY) 4856 goto finished; 4857 if (vport->num_disc_nodes || vport->fc_prli_sent) 4858 goto finished; 4859 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4860 goto finished; 4861 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4862 goto finished; 4863 4864 stat = 1; 4865 4866 finished: 4867 spin_unlock_irq(shost->host_lock); 4868 return stat; 4869 } 4870 4871 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4872 { 4873 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4874 struct lpfc_hba *phba = vport->phba; 4875 4876 fc_host_supported_speeds(shost) = 0; 4877 /* 4878 * Avoid reporting supported link speed for FCoE as it can't be 4879 * controlled via FCoE. 4880 */ 4881 if (phba->hba_flag & HBA_FCOE_MODE) 4882 return; 4883 4884 if (phba->lmt & LMT_256Gb) 4885 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4886 if (phba->lmt & LMT_128Gb) 4887 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4888 if (phba->lmt & LMT_64Gb) 4889 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4890 if (phba->lmt & LMT_32Gb) 4891 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4892 if (phba->lmt & LMT_16Gb) 4893 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4894 if (phba->lmt & LMT_10Gb) 4895 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4896 if (phba->lmt & LMT_8Gb) 4897 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4898 if (phba->lmt & LMT_4Gb) 4899 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4900 if (phba->lmt & LMT_2Gb) 4901 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4902 if (phba->lmt & LMT_1Gb) 4903 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4904 } 4905 4906 /** 4907 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4908 * @shost: pointer to SCSI host data structure. 4909 * 4910 * This routine initializes a given SCSI host attributes on a FC port. The 4911 * SCSI host can be either on top of a physical port or a virtual port. 4912 **/ 4913 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4914 { 4915 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4916 struct lpfc_hba *phba = vport->phba; 4917 /* 4918 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4919 */ 4920 4921 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4922 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4923 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4924 4925 memset(fc_host_supported_fc4s(shost), 0, 4926 sizeof(fc_host_supported_fc4s(shost))); 4927 fc_host_supported_fc4s(shost)[2] = 1; 4928 fc_host_supported_fc4s(shost)[7] = 1; 4929 4930 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4931 sizeof fc_host_symbolic_name(shost)); 4932 4933 lpfc_host_supported_speeds_set(shost); 4934 4935 fc_host_maxframe_size(shost) = 4936 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4937 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4938 4939 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4940 4941 /* This value is also unchanging */ 4942 memset(fc_host_active_fc4s(shost), 0, 4943 sizeof(fc_host_active_fc4s(shost))); 4944 fc_host_active_fc4s(shost)[2] = 1; 4945 fc_host_active_fc4s(shost)[7] = 1; 4946 4947 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4948 spin_lock_irq(shost->host_lock); 4949 vport->load_flag &= ~FC_LOADING; 4950 spin_unlock_irq(shost->host_lock); 4951 } 4952 4953 /** 4954 * lpfc_stop_port_s3 - Stop SLI3 device port 4955 * @phba: pointer to lpfc hba data structure. 4956 * 4957 * This routine is invoked to stop an SLI3 device port, it stops the device 4958 * from generating interrupts and stops the device driver's timers for the 4959 * device. 4960 **/ 4961 static void 4962 lpfc_stop_port_s3(struct lpfc_hba *phba) 4963 { 4964 /* Clear all interrupt enable conditions */ 4965 writel(0, phba->HCregaddr); 4966 readl(phba->HCregaddr); /* flush */ 4967 /* Clear all pending interrupts */ 4968 writel(0xffffffff, phba->HAregaddr); 4969 readl(phba->HAregaddr); /* flush */ 4970 4971 /* Reset some HBA SLI setup states */ 4972 lpfc_stop_hba_timers(phba); 4973 phba->pport->work_port_events = 0; 4974 } 4975 4976 /** 4977 * lpfc_stop_port_s4 - Stop SLI4 device port 4978 * @phba: pointer to lpfc hba data structure. 4979 * 4980 * This routine is invoked to stop an SLI4 device port, it stops the device 4981 * from generating interrupts and stops the device driver's timers for the 4982 * device. 4983 **/ 4984 static void 4985 lpfc_stop_port_s4(struct lpfc_hba *phba) 4986 { 4987 /* Reset some HBA SLI4 setup states */ 4988 lpfc_stop_hba_timers(phba); 4989 if (phba->pport) 4990 phba->pport->work_port_events = 0; 4991 phba->sli4_hba.intr_enable = 0; 4992 } 4993 4994 /** 4995 * lpfc_stop_port - Wrapper function for stopping hba port 4996 * @phba: Pointer to HBA context object. 4997 * 4998 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4999 * the API jump table function pointer from the lpfc_hba struct. 5000 **/ 5001 void 5002 lpfc_stop_port(struct lpfc_hba *phba) 5003 { 5004 phba->lpfc_stop_port(phba); 5005 5006 if (phba->wq) 5007 flush_workqueue(phba->wq); 5008 } 5009 5010 /** 5011 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 5012 * @phba: Pointer to hba for which this call is being executed. 5013 * 5014 * This routine starts the timer waiting for the FCF rediscovery to complete. 5015 **/ 5016 void 5017 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 5018 { 5019 unsigned long fcf_redisc_wait_tmo = 5020 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 5021 /* Start fcf rediscovery wait period timer */ 5022 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 5023 spin_lock_irq(&phba->hbalock); 5024 /* Allow action to new fcf asynchronous event */ 5025 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 5026 /* Mark the FCF rediscovery pending state */ 5027 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 5028 spin_unlock_irq(&phba->hbalock); 5029 } 5030 5031 /** 5032 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5033 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5034 * 5035 * This routine is invoked when waiting for FCF table rediscover has been 5036 * timed out. If new FCF record(s) has (have) been discovered during the 5037 * wait period, a new FCF event shall be added to the FCOE async event 5038 * list, and then worker thread shall be waked up for processing from the 5039 * worker thread context. 5040 **/ 5041 static void 5042 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5043 { 5044 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5045 5046 /* Don't send FCF rediscovery event if timer cancelled */ 5047 spin_lock_irq(&phba->hbalock); 5048 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5049 spin_unlock_irq(&phba->hbalock); 5050 return; 5051 } 5052 /* Clear FCF rediscovery timer pending flag */ 5053 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5054 /* FCF rediscovery event to worker thread */ 5055 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5056 spin_unlock_irq(&phba->hbalock); 5057 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5058 "2776 FCF rediscover quiescent timer expired\n"); 5059 /* wake up worker thread */ 5060 lpfc_worker_wake_up(phba); 5061 } 5062 5063 /** 5064 * lpfc_vmid_poll - VMID timeout detection 5065 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5066 * 5067 * This routine is invoked when there is no I/O on by a VM for the specified 5068 * amount of time. When this situation is detected, the VMID has to be 5069 * deregistered from the switch and all the local resources freed. The VMID 5070 * will be reassigned to the VM once the I/O begins. 5071 **/ 5072 static void 5073 lpfc_vmid_poll(struct timer_list *t) 5074 { 5075 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5076 u32 wake_up = 0; 5077 5078 /* check if there is a need to issue QFPA */ 5079 if (phba->pport->vmid_priority_tagging) { 5080 wake_up = 1; 5081 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5082 } 5083 5084 /* Is the vmid inactivity timer enabled */ 5085 if (phba->pport->vmid_inactivity_timeout || 5086 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { 5087 wake_up = 1; 5088 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5089 } 5090 5091 if (wake_up) 5092 lpfc_worker_wake_up(phba); 5093 5094 /* restart the timer for the next iteration */ 5095 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5096 LPFC_VMID_TIMER)); 5097 } 5098 5099 /** 5100 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5101 * @phba: pointer to lpfc hba data structure. 5102 * @acqe_link: pointer to the async link completion queue entry. 5103 * 5104 * This routine is to parse the SLI4 link-attention link fault code. 5105 **/ 5106 static void 5107 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5108 struct lpfc_acqe_link *acqe_link) 5109 { 5110 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5111 case LPFC_ASYNC_LINK_FAULT_NONE: 5112 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5113 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5114 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5115 break; 5116 default: 5117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5118 "0398 Unknown link fault code: x%x\n", 5119 bf_get(lpfc_acqe_link_fault, acqe_link)); 5120 break; 5121 } 5122 } 5123 5124 /** 5125 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5126 * @phba: pointer to lpfc hba data structure. 5127 * @acqe_link: pointer to the async link completion queue entry. 5128 * 5129 * This routine is to parse the SLI4 link attention type and translate it 5130 * into the base driver's link attention type coding. 5131 * 5132 * Return: Link attention type in terms of base driver's coding. 5133 **/ 5134 static uint8_t 5135 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5136 struct lpfc_acqe_link *acqe_link) 5137 { 5138 uint8_t att_type; 5139 5140 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5141 case LPFC_ASYNC_LINK_STATUS_DOWN: 5142 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5143 att_type = LPFC_ATT_LINK_DOWN; 5144 break; 5145 case LPFC_ASYNC_LINK_STATUS_UP: 5146 /* Ignore physical link up events - wait for logical link up */ 5147 att_type = LPFC_ATT_RESERVED; 5148 break; 5149 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5150 att_type = LPFC_ATT_LINK_UP; 5151 break; 5152 default: 5153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5154 "0399 Invalid link attention type: x%x\n", 5155 bf_get(lpfc_acqe_link_status, acqe_link)); 5156 att_type = LPFC_ATT_RESERVED; 5157 break; 5158 } 5159 return att_type; 5160 } 5161 5162 /** 5163 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5164 * @phba: pointer to lpfc hba data structure. 5165 * 5166 * This routine is to get an SLI3 FC port's link speed in Mbps. 5167 * 5168 * Return: link speed in terms of Mbps. 5169 **/ 5170 uint32_t 5171 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5172 { 5173 uint32_t link_speed; 5174 5175 if (!lpfc_is_link_up(phba)) 5176 return 0; 5177 5178 if (phba->sli_rev <= LPFC_SLI_REV3) { 5179 switch (phba->fc_linkspeed) { 5180 case LPFC_LINK_SPEED_1GHZ: 5181 link_speed = 1000; 5182 break; 5183 case LPFC_LINK_SPEED_2GHZ: 5184 link_speed = 2000; 5185 break; 5186 case LPFC_LINK_SPEED_4GHZ: 5187 link_speed = 4000; 5188 break; 5189 case LPFC_LINK_SPEED_8GHZ: 5190 link_speed = 8000; 5191 break; 5192 case LPFC_LINK_SPEED_10GHZ: 5193 link_speed = 10000; 5194 break; 5195 case LPFC_LINK_SPEED_16GHZ: 5196 link_speed = 16000; 5197 break; 5198 default: 5199 link_speed = 0; 5200 } 5201 } else { 5202 if (phba->sli4_hba.link_state.logical_speed) 5203 link_speed = 5204 phba->sli4_hba.link_state.logical_speed; 5205 else 5206 link_speed = phba->sli4_hba.link_state.speed; 5207 } 5208 return link_speed; 5209 } 5210 5211 /** 5212 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5213 * @phba: pointer to lpfc hba data structure. 5214 * @evt_code: asynchronous event code. 5215 * @speed_code: asynchronous event link speed code. 5216 * 5217 * This routine is to parse the giving SLI4 async event link speed code into 5218 * value of Mbps for the link speed. 5219 * 5220 * Return: link speed in terms of Mbps. 5221 **/ 5222 static uint32_t 5223 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5224 uint8_t speed_code) 5225 { 5226 uint32_t port_speed; 5227 5228 switch (evt_code) { 5229 case LPFC_TRAILER_CODE_LINK: 5230 switch (speed_code) { 5231 case LPFC_ASYNC_LINK_SPEED_ZERO: 5232 port_speed = 0; 5233 break; 5234 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5235 port_speed = 10; 5236 break; 5237 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5238 port_speed = 100; 5239 break; 5240 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5241 port_speed = 1000; 5242 break; 5243 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5244 port_speed = 10000; 5245 break; 5246 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5247 port_speed = 20000; 5248 break; 5249 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5250 port_speed = 25000; 5251 break; 5252 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5253 port_speed = 40000; 5254 break; 5255 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5256 port_speed = 100000; 5257 break; 5258 default: 5259 port_speed = 0; 5260 } 5261 break; 5262 case LPFC_TRAILER_CODE_FC: 5263 switch (speed_code) { 5264 case LPFC_FC_LA_SPEED_UNKNOWN: 5265 port_speed = 0; 5266 break; 5267 case LPFC_FC_LA_SPEED_1G: 5268 port_speed = 1000; 5269 break; 5270 case LPFC_FC_LA_SPEED_2G: 5271 port_speed = 2000; 5272 break; 5273 case LPFC_FC_LA_SPEED_4G: 5274 port_speed = 4000; 5275 break; 5276 case LPFC_FC_LA_SPEED_8G: 5277 port_speed = 8000; 5278 break; 5279 case LPFC_FC_LA_SPEED_10G: 5280 port_speed = 10000; 5281 break; 5282 case LPFC_FC_LA_SPEED_16G: 5283 port_speed = 16000; 5284 break; 5285 case LPFC_FC_LA_SPEED_32G: 5286 port_speed = 32000; 5287 break; 5288 case LPFC_FC_LA_SPEED_64G: 5289 port_speed = 64000; 5290 break; 5291 case LPFC_FC_LA_SPEED_128G: 5292 port_speed = 128000; 5293 break; 5294 case LPFC_FC_LA_SPEED_256G: 5295 port_speed = 256000; 5296 break; 5297 default: 5298 port_speed = 0; 5299 } 5300 break; 5301 default: 5302 port_speed = 0; 5303 } 5304 return port_speed; 5305 } 5306 5307 /** 5308 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5309 * @phba: pointer to lpfc hba data structure. 5310 * @acqe_link: pointer to the async link completion queue entry. 5311 * 5312 * This routine is to handle the SLI4 asynchronous FCoE link event. 5313 **/ 5314 static void 5315 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5316 struct lpfc_acqe_link *acqe_link) 5317 { 5318 LPFC_MBOXQ_t *pmb; 5319 MAILBOX_t *mb; 5320 struct lpfc_mbx_read_top *la; 5321 uint8_t att_type; 5322 int rc; 5323 5324 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5325 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5326 return; 5327 phba->fcoe_eventtag = acqe_link->event_tag; 5328 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5329 if (!pmb) { 5330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5331 "0395 The mboxq allocation failed\n"); 5332 return; 5333 } 5334 5335 rc = lpfc_mbox_rsrc_prep(phba, pmb); 5336 if (rc) { 5337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5338 "0396 mailbox allocation failed\n"); 5339 goto out_free_pmb; 5340 } 5341 5342 /* Cleanup any outstanding ELS commands */ 5343 lpfc_els_flush_all_cmd(phba); 5344 5345 /* Block ELS IOCBs until we have done process link event */ 5346 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5347 5348 /* Update link event statistics */ 5349 phba->sli.slistat.link_event++; 5350 5351 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5352 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 5353 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5354 pmb->vport = phba->pport; 5355 5356 /* Keep the link status for extra SLI4 state machine reference */ 5357 phba->sli4_hba.link_state.speed = 5358 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5359 bf_get(lpfc_acqe_link_speed, acqe_link)); 5360 phba->sli4_hba.link_state.duplex = 5361 bf_get(lpfc_acqe_link_duplex, acqe_link); 5362 phba->sli4_hba.link_state.status = 5363 bf_get(lpfc_acqe_link_status, acqe_link); 5364 phba->sli4_hba.link_state.type = 5365 bf_get(lpfc_acqe_link_type, acqe_link); 5366 phba->sli4_hba.link_state.number = 5367 bf_get(lpfc_acqe_link_number, acqe_link); 5368 phba->sli4_hba.link_state.fault = 5369 bf_get(lpfc_acqe_link_fault, acqe_link); 5370 phba->sli4_hba.link_state.logical_speed = 5371 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5372 5373 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5374 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5375 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5376 "Logical speed:%dMbps Fault:%d\n", 5377 phba->sli4_hba.link_state.speed, 5378 phba->sli4_hba.link_state.topology, 5379 phba->sli4_hba.link_state.status, 5380 phba->sli4_hba.link_state.type, 5381 phba->sli4_hba.link_state.number, 5382 phba->sli4_hba.link_state.logical_speed, 5383 phba->sli4_hba.link_state.fault); 5384 /* 5385 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5386 * topology info. Note: Optional for non FC-AL ports. 5387 */ 5388 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5389 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5390 if (rc == MBX_NOT_FINISHED) 5391 goto out_free_pmb; 5392 return; 5393 } 5394 /* 5395 * For FCoE Mode: fill in all the topology information we need and call 5396 * the READ_TOPOLOGY completion routine to continue without actually 5397 * sending the READ_TOPOLOGY mailbox command to the port. 5398 */ 5399 /* Initialize completion status */ 5400 mb = &pmb->u.mb; 5401 mb->mbxStatus = MBX_SUCCESS; 5402 5403 /* Parse port fault information field */ 5404 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5405 5406 /* Parse and translate link attention fields */ 5407 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5408 la->eventTag = acqe_link->event_tag; 5409 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5410 bf_set(lpfc_mbx_read_top_link_spd, la, 5411 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5412 5413 /* Fake the the following irrelvant fields */ 5414 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5415 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5416 bf_set(lpfc_mbx_read_top_il, la, 0); 5417 bf_set(lpfc_mbx_read_top_pb, la, 0); 5418 bf_set(lpfc_mbx_read_top_fa, la, 0); 5419 bf_set(lpfc_mbx_read_top_mm, la, 0); 5420 5421 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5422 lpfc_mbx_cmpl_read_topology(phba, pmb); 5423 5424 return; 5425 5426 out_free_pmb: 5427 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5428 } 5429 5430 /** 5431 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5432 * topology. 5433 * @phba: pointer to lpfc hba data structure. 5434 * @speed_code: asynchronous event link speed code. 5435 * 5436 * This routine is to parse the giving SLI4 async event link speed code into 5437 * value of Read topology link speed. 5438 * 5439 * Return: link speed in terms of Read topology. 5440 **/ 5441 static uint8_t 5442 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5443 { 5444 uint8_t port_speed; 5445 5446 switch (speed_code) { 5447 case LPFC_FC_LA_SPEED_1G: 5448 port_speed = LPFC_LINK_SPEED_1GHZ; 5449 break; 5450 case LPFC_FC_LA_SPEED_2G: 5451 port_speed = LPFC_LINK_SPEED_2GHZ; 5452 break; 5453 case LPFC_FC_LA_SPEED_4G: 5454 port_speed = LPFC_LINK_SPEED_4GHZ; 5455 break; 5456 case LPFC_FC_LA_SPEED_8G: 5457 port_speed = LPFC_LINK_SPEED_8GHZ; 5458 break; 5459 case LPFC_FC_LA_SPEED_16G: 5460 port_speed = LPFC_LINK_SPEED_16GHZ; 5461 break; 5462 case LPFC_FC_LA_SPEED_32G: 5463 port_speed = LPFC_LINK_SPEED_32GHZ; 5464 break; 5465 case LPFC_FC_LA_SPEED_64G: 5466 port_speed = LPFC_LINK_SPEED_64GHZ; 5467 break; 5468 case LPFC_FC_LA_SPEED_128G: 5469 port_speed = LPFC_LINK_SPEED_128GHZ; 5470 break; 5471 case LPFC_FC_LA_SPEED_256G: 5472 port_speed = LPFC_LINK_SPEED_256GHZ; 5473 break; 5474 default: 5475 port_speed = 0; 5476 break; 5477 } 5478 5479 return port_speed; 5480 } 5481 5482 void 5483 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5484 { 5485 struct rxtable_entry *entry; 5486 int cnt = 0, head, tail, last, start; 5487 5488 head = atomic_read(&phba->rxtable_idx_head); 5489 tail = atomic_read(&phba->rxtable_idx_tail); 5490 if (!phba->rxtable || head == tail) { 5491 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 5492 "4411 Rxtable is empty\n"); 5493 return; 5494 } 5495 last = tail; 5496 start = head; 5497 5498 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */ 5499 while (start != last) { 5500 if (start) 5501 start--; 5502 else 5503 start = LPFC_MAX_RXMONITOR_ENTRY - 1; 5504 entry = &phba->rxtable[start]; 5505 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5506 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld " 5507 "Lat %lld ASz %lld Info %02d BWUtil %d " 5508 "Int %d slot %d\n", 5509 cnt, entry->max_bytes_per_interval, 5510 entry->total_bytes, entry->rcv_bytes, 5511 entry->avg_io_latency, entry->avg_io_size, 5512 entry->cmf_info, entry->timer_utilization, 5513 entry->timer_interval, start); 5514 cnt++; 5515 if (cnt >= LPFC_MAX_RXMONITOR_DUMP) 5516 return; 5517 } 5518 } 5519 5520 /** 5521 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5522 * @phba: pointer to lpfc hba data structure. 5523 * @dtag: FPIN descriptor received 5524 * 5525 * Increment the FPIN received counter/time when it happens. 5526 */ 5527 void 5528 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5529 { 5530 struct lpfc_cgn_info *cp; 5531 struct tm broken; 5532 struct timespec64 cur_time; 5533 u32 cnt; 5534 u32 value; 5535 5536 /* Make sure we have a congestion info buffer */ 5537 if (!phba->cgn_i) 5538 return; 5539 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5540 ktime_get_real_ts64(&cur_time); 5541 time64_to_tm(cur_time.tv_sec, 0, &broken); 5542 5543 /* Update congestion statistics */ 5544 switch (dtag) { 5545 case ELS_DTAG_LNK_INTEGRITY: 5546 cnt = le32_to_cpu(cp->link_integ_notification); 5547 cnt++; 5548 cp->link_integ_notification = cpu_to_le32(cnt); 5549 5550 cp->cgn_stat_lnk_month = broken.tm_mon + 1; 5551 cp->cgn_stat_lnk_day = broken.tm_mday; 5552 cp->cgn_stat_lnk_year = broken.tm_year - 100; 5553 cp->cgn_stat_lnk_hour = broken.tm_hour; 5554 cp->cgn_stat_lnk_min = broken.tm_min; 5555 cp->cgn_stat_lnk_sec = broken.tm_sec; 5556 break; 5557 case ELS_DTAG_DELIVERY: 5558 cnt = le32_to_cpu(cp->delivery_notification); 5559 cnt++; 5560 cp->delivery_notification = cpu_to_le32(cnt); 5561 5562 cp->cgn_stat_del_month = broken.tm_mon + 1; 5563 cp->cgn_stat_del_day = broken.tm_mday; 5564 cp->cgn_stat_del_year = broken.tm_year - 100; 5565 cp->cgn_stat_del_hour = broken.tm_hour; 5566 cp->cgn_stat_del_min = broken.tm_min; 5567 cp->cgn_stat_del_sec = broken.tm_sec; 5568 break; 5569 case ELS_DTAG_PEER_CONGEST: 5570 cnt = le32_to_cpu(cp->cgn_peer_notification); 5571 cnt++; 5572 cp->cgn_peer_notification = cpu_to_le32(cnt); 5573 5574 cp->cgn_stat_peer_month = broken.tm_mon + 1; 5575 cp->cgn_stat_peer_day = broken.tm_mday; 5576 cp->cgn_stat_peer_year = broken.tm_year - 100; 5577 cp->cgn_stat_peer_hour = broken.tm_hour; 5578 cp->cgn_stat_peer_min = broken.tm_min; 5579 cp->cgn_stat_peer_sec = broken.tm_sec; 5580 break; 5581 case ELS_DTAG_CONGESTION: 5582 cnt = le32_to_cpu(cp->cgn_notification); 5583 cnt++; 5584 cp->cgn_notification = cpu_to_le32(cnt); 5585 5586 cp->cgn_stat_cgn_month = broken.tm_mon + 1; 5587 cp->cgn_stat_cgn_day = broken.tm_mday; 5588 cp->cgn_stat_cgn_year = broken.tm_year - 100; 5589 cp->cgn_stat_cgn_hour = broken.tm_hour; 5590 cp->cgn_stat_cgn_min = broken.tm_min; 5591 cp->cgn_stat_cgn_sec = broken.tm_sec; 5592 } 5593 if (phba->cgn_fpin_frequency && 5594 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5595 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5596 cp->cgn_stat_npm = value; 5597 } 5598 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5599 LPFC_CGN_CRC32_SEED); 5600 cp->cgn_info_crc = cpu_to_le32(value); 5601 } 5602 5603 /** 5604 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer 5605 * @phba: pointer to lpfc hba data structure. 5606 * 5607 * Save the congestion event data every minute. 5608 * On the hour collapse all the minute data into hour data. Every day 5609 * collapse all the hour data into daily data. Separate driver 5610 * and fabrc congestion event counters that will be saved out 5611 * to the registered congestion buffer every minute. 5612 */ 5613 static void 5614 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) 5615 { 5616 struct lpfc_cgn_info *cp; 5617 struct tm broken; 5618 struct timespec64 cur_time; 5619 uint32_t i, index; 5620 uint16_t value, mvalue; 5621 uint64_t bps; 5622 uint32_t mbps; 5623 uint32_t dvalue, wvalue, lvalue, avalue; 5624 uint64_t latsum; 5625 __le16 *ptr; 5626 __le32 *lptr; 5627 __le16 *mptr; 5628 5629 /* Make sure we have a congestion info buffer */ 5630 if (!phba->cgn_i) 5631 return; 5632 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5633 5634 if (time_before(jiffies, phba->cgn_evt_timestamp)) 5635 return; 5636 phba->cgn_evt_timestamp = jiffies + 5637 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5638 phba->cgn_evt_minute++; 5639 5640 /* We should get to this point in the routine on 1 minute intervals */ 5641 5642 ktime_get_real_ts64(&cur_time); 5643 time64_to_tm(cur_time.tv_sec, 0, &broken); 5644 5645 if (phba->cgn_fpin_frequency && 5646 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5647 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5648 cp->cgn_stat_npm = value; 5649 } 5650 5651 /* Read and clear the latency counters for this minute */ 5652 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5653 latsum = atomic64_read(&phba->cgn_latency_evt); 5654 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5655 atomic64_set(&phba->cgn_latency_evt, 0); 5656 5657 /* We need to store MB/sec bandwidth in the congestion information. 5658 * block_cnt is count of 512 byte blocks for the entire minute, 5659 * bps will get bytes per sec before finally converting to MB/sec. 5660 */ 5661 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5662 phba->rx_block_cnt = 0; 5663 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5664 5665 /* Every minute */ 5666 /* cgn parameters */ 5667 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5668 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5669 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5670 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5671 5672 /* Fill in default LUN qdepth */ 5673 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5674 cp->cgn_lunq = cpu_to_le16(value); 5675 5676 /* Record congestion buffer info - every minute 5677 * cgn_driver_evt_cnt (Driver events) 5678 * cgn_fabric_warn_cnt (Congestion Warnings) 5679 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5680 * cgn_fabric_alarm_cnt (Congestion Alarms) 5681 */ 5682 index = ++cp->cgn_index_minute; 5683 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5684 cp->cgn_index_minute = 0; 5685 index = 0; 5686 } 5687 5688 /* Get the number of driver events in this sample and reset counter */ 5689 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5690 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5691 5692 /* Get the number of warning events - FPIN and Signal for this minute */ 5693 wvalue = 0; 5694 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5695 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5696 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5697 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5698 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5699 5700 /* Get the number of alarm events - FPIN and Signal for this minute */ 5701 avalue = 0; 5702 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5703 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5704 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5705 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5706 5707 /* Collect the driver, warning, alarm and latency counts for this 5708 * minute into the driver congestion buffer. 5709 */ 5710 ptr = &cp->cgn_drvr_min[index]; 5711 value = (uint16_t)dvalue; 5712 *ptr = cpu_to_le16(value); 5713 5714 ptr = &cp->cgn_warn_min[index]; 5715 value = (uint16_t)wvalue; 5716 *ptr = cpu_to_le16(value); 5717 5718 ptr = &cp->cgn_alarm_min[index]; 5719 value = (uint16_t)avalue; 5720 *ptr = cpu_to_le16(value); 5721 5722 lptr = &cp->cgn_latency_min[index]; 5723 if (lvalue) { 5724 lvalue = (uint32_t)div_u64(latsum, lvalue); 5725 *lptr = cpu_to_le32(lvalue); 5726 } else { 5727 *lptr = 0; 5728 } 5729 5730 /* Collect the bandwidth value into the driver's congesion buffer. */ 5731 mptr = &cp->cgn_bw_min[index]; 5732 *mptr = cpu_to_le16(mvalue); 5733 5734 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5735 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5736 index, dvalue, wvalue, *lptr, mvalue, avalue); 5737 5738 /* Every hour */ 5739 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5740 /* Record congestion buffer info - every hour 5741 * Collapse all minutes into an hour 5742 */ 5743 index = ++cp->cgn_index_hour; 5744 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5745 cp->cgn_index_hour = 0; 5746 index = 0; 5747 } 5748 5749 dvalue = 0; 5750 wvalue = 0; 5751 lvalue = 0; 5752 avalue = 0; 5753 mvalue = 0; 5754 mbps = 0; 5755 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5756 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5757 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5758 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5759 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5760 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5761 } 5762 if (lvalue) /* Avg of latency averages */ 5763 lvalue /= LPFC_MIN_HOUR; 5764 if (mbps) /* Avg of Bandwidth averages */ 5765 mvalue = mbps / LPFC_MIN_HOUR; 5766 5767 lptr = &cp->cgn_drvr_hr[index]; 5768 *lptr = cpu_to_le32(dvalue); 5769 lptr = &cp->cgn_warn_hr[index]; 5770 *lptr = cpu_to_le32(wvalue); 5771 lptr = &cp->cgn_latency_hr[index]; 5772 *lptr = cpu_to_le32(lvalue); 5773 mptr = &cp->cgn_bw_hr[index]; 5774 *mptr = cpu_to_le16(mvalue); 5775 lptr = &cp->cgn_alarm_hr[index]; 5776 *lptr = cpu_to_le32(avalue); 5777 5778 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5779 "2419 Congestion Info - hour " 5780 "(%d): %d %d %d %d %d\n", 5781 index, dvalue, wvalue, lvalue, mvalue, avalue); 5782 } 5783 5784 /* Every day */ 5785 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5786 /* Record congestion buffer info - every hour 5787 * Collapse all hours into a day. Rotate days 5788 * after LPFC_MAX_CGN_DAYS. 5789 */ 5790 index = ++cp->cgn_index_day; 5791 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5792 cp->cgn_index_day = 0; 5793 index = 0; 5794 } 5795 5796 /* Anytime we overwrite daily index 0, after we wrap, 5797 * we will be overwriting the oldest day, so we must 5798 * update the congestion data start time for that day. 5799 * That start time should have previously been saved after 5800 * we wrote the last days worth of data. 5801 */ 5802 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { 5803 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); 5804 5805 cp->cgn_info_month = broken.tm_mon + 1; 5806 cp->cgn_info_day = broken.tm_mday; 5807 cp->cgn_info_year = broken.tm_year - 100; 5808 cp->cgn_info_hour = broken.tm_hour; 5809 cp->cgn_info_minute = broken.tm_min; 5810 cp->cgn_info_second = broken.tm_sec; 5811 5812 lpfc_printf_log 5813 (phba, KERN_INFO, LOG_CGN_MGMT, 5814 "2646 CGNInfo idx0 Start Time: " 5815 "%d/%d/%d %d:%d:%d\n", 5816 cp->cgn_info_day, cp->cgn_info_month, 5817 cp->cgn_info_year, cp->cgn_info_hour, 5818 cp->cgn_info_minute, cp->cgn_info_second); 5819 } 5820 5821 dvalue = 0; 5822 wvalue = 0; 5823 lvalue = 0; 5824 mvalue = 0; 5825 mbps = 0; 5826 avalue = 0; 5827 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5828 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5829 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5830 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5831 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5832 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5833 } 5834 if (lvalue) /* Avg of latency averages */ 5835 lvalue /= LPFC_HOUR_DAY; 5836 if (mbps) /* Avg of Bandwidth averages */ 5837 mvalue = mbps / LPFC_HOUR_DAY; 5838 5839 lptr = &cp->cgn_drvr_day[index]; 5840 *lptr = cpu_to_le32(dvalue); 5841 lptr = &cp->cgn_warn_day[index]; 5842 *lptr = cpu_to_le32(wvalue); 5843 lptr = &cp->cgn_latency_day[index]; 5844 *lptr = cpu_to_le32(lvalue); 5845 mptr = &cp->cgn_bw_day[index]; 5846 *mptr = cpu_to_le16(mvalue); 5847 lptr = &cp->cgn_alarm_day[index]; 5848 *lptr = cpu_to_le32(avalue); 5849 5850 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5851 "2420 Congestion Info - daily (%d): " 5852 "%d %d %d %d %d\n", 5853 index, dvalue, wvalue, lvalue, mvalue, avalue); 5854 5855 /* We just wrote LPFC_MAX_CGN_DAYS of data, 5856 * so we are wrapped on any data after this. 5857 * Save this as the start time for the next day. 5858 */ 5859 if (index == (LPFC_MAX_CGN_DAYS - 1)) { 5860 phba->hba_flag |= HBA_CGN_DAY_WRAP; 5861 ktime_get_real_ts64(&phba->cgn_daily_ts); 5862 } 5863 } 5864 5865 /* Use the frequency found in the last rcv'ed FPIN */ 5866 value = phba->cgn_fpin_frequency; 5867 cp->cgn_warn_freq = cpu_to_le16(value); 5868 cp->cgn_alarm_freq = cpu_to_le16(value); 5869 5870 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5871 LPFC_CGN_CRC32_SEED); 5872 cp->cgn_info_crc = cpu_to_le32(lvalue); 5873 } 5874 5875 /** 5876 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5877 * @phba: The Hba for which this call is being executed. 5878 * 5879 * The routine calculates the latency from the beginning of the CMF timer 5880 * interval to the current point in time. It is called from IO completion 5881 * when we exceed our Bandwidth limitation for the time interval. 5882 */ 5883 uint32_t 5884 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5885 { 5886 struct timespec64 cmpl_time; 5887 uint32_t msec = 0; 5888 5889 ktime_get_real_ts64(&cmpl_time); 5890 5891 /* This routine works on a ms granularity so sec and usec are 5892 * converted accordingly. 5893 */ 5894 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5895 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5896 NSEC_PER_MSEC; 5897 } else { 5898 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5899 msec = (cmpl_time.tv_sec - 5900 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5901 msec += ((cmpl_time.tv_nsec - 5902 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5903 } else { 5904 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5905 1) * MSEC_PER_SEC; 5906 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5907 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5908 } 5909 } 5910 return msec; 5911 } 5912 5913 /** 5914 * lpfc_cmf_timer - This is the timer function for one congestion 5915 * rate interval. 5916 * @timer: Pointer to the high resolution timer that expired 5917 */ 5918 static enum hrtimer_restart 5919 lpfc_cmf_timer(struct hrtimer *timer) 5920 { 5921 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5922 cmf_timer); 5923 struct rxtable_entry *entry; 5924 uint32_t io_cnt; 5925 uint32_t head, tail; 5926 uint32_t busy, max_read; 5927 uint64_t total, rcv, lat, mbpi, extra, cnt; 5928 int timer_interval = LPFC_CMF_INTERVAL; 5929 uint32_t ms; 5930 struct lpfc_cgn_stat *cgs; 5931 int cpu; 5932 5933 /* Only restart the timer if congestion mgmt is on */ 5934 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5935 !phba->cmf_latency.tv_sec) { 5936 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5937 "6224 CMF timer exit: %d %lld\n", 5938 phba->cmf_active_mode, 5939 (uint64_t)phba->cmf_latency.tv_sec); 5940 return HRTIMER_NORESTART; 5941 } 5942 5943 /* If pport is not ready yet, just exit and wait for 5944 * the next timer cycle to hit. 5945 */ 5946 if (!phba->pport) 5947 goto skip; 5948 5949 /* Do not block SCSI IO while in the timer routine since 5950 * total_bytes will be cleared 5951 */ 5952 atomic_set(&phba->cmf_stop_io, 1); 5953 5954 /* First we need to calculate the actual ms between 5955 * the last timer interrupt and this one. We ask for 5956 * LPFC_CMF_INTERVAL, however the actual time may 5957 * vary depending on system overhead. 5958 */ 5959 ms = lpfc_calc_cmf_latency(phba); 5960 5961 5962 /* Immediately after we calculate the time since the last 5963 * timer interrupt, set the start time for the next 5964 * interrupt 5965 */ 5966 ktime_get_real_ts64(&phba->cmf_latency); 5967 5968 phba->cmf_link_byte_count = 5969 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 5970 5971 /* Collect all the stats from the prior timer interval */ 5972 total = 0; 5973 io_cnt = 0; 5974 lat = 0; 5975 rcv = 0; 5976 for_each_present_cpu(cpu) { 5977 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 5978 total += atomic64_xchg(&cgs->total_bytes, 0); 5979 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 5980 lat += atomic64_xchg(&cgs->rx_latency, 0); 5981 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 5982 } 5983 5984 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 5985 * returned from the last CMF_SYNC_WQE issued, from 5986 * cmf_last_sync_bw. This will be the target BW for 5987 * this next timer interval. 5988 */ 5989 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 5990 phba->link_state != LPFC_LINK_DOWN && 5991 phba->hba_flag & HBA_SETUP) { 5992 mbpi = phba->cmf_last_sync_bw; 5993 phba->cmf_last_sync_bw = 0; 5994 extra = 0; 5995 5996 /* Calculate any extra bytes needed to account for the 5997 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 5998 * calculate the adjustment needed for total to reflect 5999 * a full LPFC_CMF_INTERVAL. 6000 */ 6001 if (ms && ms < LPFC_CMF_INTERVAL) { 6002 cnt = div_u64(total, ms); /* bytes per ms */ 6003 cnt *= LPFC_CMF_INTERVAL; /* what total should be */ 6004 6005 /* If the timeout is scheduled to be shorter, 6006 * this value may skew the data, so cap it at mbpi. 6007 */ 6008 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) 6009 cnt = mbpi; 6010 6011 extra = cnt - total; 6012 } 6013 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6014 } else { 6015 /* For Monitor mode or link down we want mbpi 6016 * to be the full link speed 6017 */ 6018 mbpi = phba->cmf_link_byte_count; 6019 extra = 0; 6020 } 6021 phba->cmf_timer_cnt++; 6022 6023 if (io_cnt) { 6024 /* Update congestion info buffer latency in us */ 6025 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6026 atomic64_add(lat, &phba->cgn_latency_evt); 6027 } 6028 busy = atomic_xchg(&phba->cmf_busy, 0); 6029 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6030 6031 /* Calculate MBPI for the next timer interval */ 6032 if (mbpi) { 6033 if (mbpi > phba->cmf_link_byte_count || 6034 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6035 mbpi = phba->cmf_link_byte_count; 6036 6037 /* Change max_bytes_per_interval to what the prior 6038 * CMF_SYNC_WQE cmpl indicated. 6039 */ 6040 if (mbpi != phba->cmf_max_bytes_per_interval) 6041 phba->cmf_max_bytes_per_interval = mbpi; 6042 } 6043 6044 /* Save rxmonitor information for debug */ 6045 if (phba->rxtable) { 6046 head = atomic_xchg(&phba->rxtable_idx_head, 6047 LPFC_RXMONITOR_TABLE_IN_USE); 6048 entry = &phba->rxtable[head]; 6049 entry->total_bytes = total; 6050 entry->cmf_bytes = total + extra; 6051 entry->rcv_bytes = rcv; 6052 entry->cmf_busy = busy; 6053 entry->cmf_info = phba->cmf_active_info; 6054 if (io_cnt) { 6055 entry->avg_io_latency = div_u64(lat, io_cnt); 6056 entry->avg_io_size = div_u64(rcv, io_cnt); 6057 } else { 6058 entry->avg_io_latency = 0; 6059 entry->avg_io_size = 0; 6060 } 6061 entry->max_read_cnt = max_read; 6062 entry->io_cnt = io_cnt; 6063 entry->max_bytes_per_interval = mbpi; 6064 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6065 entry->timer_utilization = phba->cmf_last_ts; 6066 else 6067 entry->timer_utilization = ms; 6068 entry->timer_interval = ms; 6069 phba->cmf_last_ts = 0; 6070 6071 /* Increment rxtable index */ 6072 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6073 tail = atomic_read(&phba->rxtable_idx_tail); 6074 if (head == tail) { 6075 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY; 6076 atomic_set(&phba->rxtable_idx_tail, tail); 6077 } 6078 atomic_set(&phba->rxtable_idx_head, head); 6079 } 6080 6081 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6082 /* If Monitor mode, check if we are oversubscribed 6083 * against the full line rate. 6084 */ 6085 if (mbpi && total > mbpi) 6086 atomic_inc(&phba->cgn_driver_evt_cnt); 6087 } 6088 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6089 6090 /* Each minute save Fabric and Driver congestion information */ 6091 lpfc_cgn_save_evt_cnt(phba); 6092 6093 phba->hba_flag &= ~HBA_SHORT_CMF; 6094 6095 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the 6096 * minute, adjust our next timer interval, if needed, to ensure a 6097 * 1 minute granularity when we get the next timer interrupt. 6098 */ 6099 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), 6100 phba->cgn_evt_timestamp)) { 6101 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - 6102 jiffies); 6103 if (timer_interval <= 0) 6104 timer_interval = LPFC_CMF_INTERVAL; 6105 else 6106 phba->hba_flag |= HBA_SHORT_CMF; 6107 6108 /* If we adjust timer_interval, max_bytes_per_interval 6109 * needs to be adjusted as well. 6110 */ 6111 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 6112 timer_interval, 1000); 6113 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) 6114 phba->cmf_max_bytes_per_interval = 6115 phba->cmf_link_byte_count; 6116 } 6117 6118 /* Since total_bytes has already been zero'ed, its okay to unblock 6119 * after max_bytes_per_interval is setup. 6120 */ 6121 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6122 queue_work(phba->wq, &phba->unblock_request_work); 6123 6124 /* SCSI IO is now unblocked */ 6125 atomic_set(&phba->cmf_stop_io, 0); 6126 6127 skip: 6128 hrtimer_forward_now(timer, 6129 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6130 return HRTIMER_RESTART; 6131 } 6132 6133 #define trunk_link_status(__idx)\ 6134 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6135 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6136 "Link up" : "Link down") : "NA" 6137 /* Did port __idx reported an error */ 6138 #define trunk_port_fault(__idx)\ 6139 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6140 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6141 6142 static void 6143 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6144 struct lpfc_acqe_fc_la *acqe_fc) 6145 { 6146 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6147 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6148 6149 phba->sli4_hba.link_state.speed = 6150 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6151 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6152 6153 phba->sli4_hba.link_state.logical_speed = 6154 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6155 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6156 phba->fc_linkspeed = 6157 lpfc_async_link_speed_to_read_top( 6158 phba, 6159 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6160 6161 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6162 phba->trunk_link.link0.state = 6163 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6164 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6165 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6166 } 6167 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6168 phba->trunk_link.link1.state = 6169 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6170 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6171 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6172 } 6173 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6174 phba->trunk_link.link2.state = 6175 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6176 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6177 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6178 } 6179 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6180 phba->trunk_link.link3.state = 6181 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6182 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6183 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6184 } 6185 6186 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6187 "2910 Async FC Trunking Event - Speed:%d\n" 6188 "\tLogical speed:%d " 6189 "port0: %s port1: %s port2: %s port3: %s\n", 6190 phba->sli4_hba.link_state.speed, 6191 phba->sli4_hba.link_state.logical_speed, 6192 trunk_link_status(0), trunk_link_status(1), 6193 trunk_link_status(2), trunk_link_status(3)); 6194 6195 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6196 lpfc_cmf_signal_init(phba); 6197 6198 if (port_fault) 6199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6200 "3202 trunk error:0x%x (%s) seen on port0:%s " 6201 /* 6202 * SLI-4: We have only 0xA error codes 6203 * defined as of now. print an appropriate 6204 * message in case driver needs to be updated. 6205 */ 6206 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6207 "UNDEFINED. update driver." : trunk_errmsg[err], 6208 trunk_port_fault(0), trunk_port_fault(1), 6209 trunk_port_fault(2), trunk_port_fault(3)); 6210 } 6211 6212 6213 /** 6214 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6215 * @phba: pointer to lpfc hba data structure. 6216 * @acqe_fc: pointer to the async fc completion queue entry. 6217 * 6218 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6219 * that the event was received and then issue a read_topology mailbox command so 6220 * that the rest of the driver will treat it the same as SLI3. 6221 **/ 6222 static void 6223 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6224 { 6225 LPFC_MBOXQ_t *pmb; 6226 MAILBOX_t *mb; 6227 struct lpfc_mbx_read_top *la; 6228 int rc; 6229 6230 if (bf_get(lpfc_trailer_type, acqe_fc) != 6231 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6233 "2895 Non FC link Event detected.(%d)\n", 6234 bf_get(lpfc_trailer_type, acqe_fc)); 6235 return; 6236 } 6237 6238 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6239 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6240 lpfc_update_trunk_link_status(phba, acqe_fc); 6241 return; 6242 } 6243 6244 /* Keep the link status for extra SLI4 state machine reference */ 6245 phba->sli4_hba.link_state.speed = 6246 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6247 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6248 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6249 phba->sli4_hba.link_state.topology = 6250 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6251 phba->sli4_hba.link_state.status = 6252 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6253 phba->sli4_hba.link_state.type = 6254 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6255 phba->sli4_hba.link_state.number = 6256 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6257 phba->sli4_hba.link_state.fault = 6258 bf_get(lpfc_acqe_link_fault, acqe_fc); 6259 6260 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6261 LPFC_FC_LA_TYPE_LINK_DOWN) 6262 phba->sli4_hba.link_state.logical_speed = 0; 6263 else if (!phba->sli4_hba.conf_trunk) 6264 phba->sli4_hba.link_state.logical_speed = 6265 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6266 6267 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6268 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6269 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6270 "%dMbps Fault:%d\n", 6271 phba->sli4_hba.link_state.speed, 6272 phba->sli4_hba.link_state.topology, 6273 phba->sli4_hba.link_state.status, 6274 phba->sli4_hba.link_state.type, 6275 phba->sli4_hba.link_state.number, 6276 phba->sli4_hba.link_state.logical_speed, 6277 phba->sli4_hba.link_state.fault); 6278 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6279 if (!pmb) { 6280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6281 "2897 The mboxq allocation failed\n"); 6282 return; 6283 } 6284 rc = lpfc_mbox_rsrc_prep(phba, pmb); 6285 if (rc) { 6286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6287 "2898 The mboxq prep failed\n"); 6288 goto out_free_pmb; 6289 } 6290 6291 /* Cleanup any outstanding ELS commands */ 6292 lpfc_els_flush_all_cmd(phba); 6293 6294 /* Block ELS IOCBs until we have done process link event */ 6295 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6296 6297 /* Update link event statistics */ 6298 phba->sli.slistat.link_event++; 6299 6300 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6301 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf); 6302 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6303 pmb->vport = phba->pport; 6304 6305 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6306 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6307 6308 switch (phba->sli4_hba.link_state.status) { 6309 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6310 phba->link_flag |= LS_MDS_LINK_DOWN; 6311 break; 6312 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6313 phba->link_flag |= LS_MDS_LOOPBACK; 6314 break; 6315 default: 6316 break; 6317 } 6318 6319 /* Initialize completion status */ 6320 mb = &pmb->u.mb; 6321 mb->mbxStatus = MBX_SUCCESS; 6322 6323 /* Parse port fault information field */ 6324 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6325 6326 /* Parse and translate link attention fields */ 6327 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6328 la->eventTag = acqe_fc->event_tag; 6329 6330 if (phba->sli4_hba.link_state.status == 6331 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6332 bf_set(lpfc_mbx_read_top_att_type, la, 6333 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6334 } else { 6335 bf_set(lpfc_mbx_read_top_att_type, la, 6336 LPFC_FC_LA_TYPE_LINK_DOWN); 6337 } 6338 /* Invoke the mailbox command callback function */ 6339 lpfc_mbx_cmpl_read_topology(phba, pmb); 6340 6341 return; 6342 } 6343 6344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6345 if (rc == MBX_NOT_FINISHED) 6346 goto out_free_pmb; 6347 return; 6348 6349 out_free_pmb: 6350 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 6351 } 6352 6353 /** 6354 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6355 * @phba: pointer to lpfc hba data structure. 6356 * @acqe_sli: pointer to the async SLI completion queue entry. 6357 * 6358 * This routine is to handle the SLI4 asynchronous SLI events. 6359 **/ 6360 static void 6361 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6362 { 6363 char port_name; 6364 char message[128]; 6365 uint8_t status; 6366 uint8_t evt_type; 6367 uint8_t operational = 0; 6368 struct temp_event temp_event_data; 6369 struct lpfc_acqe_misconfigured_event *misconfigured; 6370 struct lpfc_acqe_cgn_signal *cgn_signal; 6371 struct Scsi_Host *shost; 6372 struct lpfc_vport **vports; 6373 int rc, i, cnt; 6374 6375 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6376 6377 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6378 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6379 "x%08x x%08x x%08x\n", evt_type, 6380 acqe_sli->event_data1, acqe_sli->event_data2, 6381 acqe_sli->reserved, acqe_sli->trailer); 6382 6383 port_name = phba->Port[0]; 6384 if (port_name == 0x00) 6385 port_name = '?'; /* get port name is empty */ 6386 6387 switch (evt_type) { 6388 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6389 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6390 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6391 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6392 6393 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6394 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6395 acqe_sli->event_data1, port_name); 6396 6397 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6398 shost = lpfc_shost_from_vport(phba->pport); 6399 fc_host_post_vendor_event(shost, fc_get_event_number(), 6400 sizeof(temp_event_data), 6401 (char *)&temp_event_data, 6402 SCSI_NL_VID_TYPE_PCI 6403 | PCI_VENDOR_ID_EMULEX); 6404 break; 6405 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6406 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6407 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6408 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6409 6410 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6411 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6412 acqe_sli->event_data1, port_name); 6413 6414 shost = lpfc_shost_from_vport(phba->pport); 6415 fc_host_post_vendor_event(shost, fc_get_event_number(), 6416 sizeof(temp_event_data), 6417 (char *)&temp_event_data, 6418 SCSI_NL_VID_TYPE_PCI 6419 | PCI_VENDOR_ID_EMULEX); 6420 break; 6421 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6422 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6423 &acqe_sli->event_data1; 6424 6425 /* fetch the status for this port */ 6426 switch (phba->sli4_hba.lnk_info.lnk_no) { 6427 case LPFC_LINK_NUMBER_0: 6428 status = bf_get(lpfc_sli_misconfigured_port0_state, 6429 &misconfigured->theEvent); 6430 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6431 &misconfigured->theEvent); 6432 break; 6433 case LPFC_LINK_NUMBER_1: 6434 status = bf_get(lpfc_sli_misconfigured_port1_state, 6435 &misconfigured->theEvent); 6436 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6437 &misconfigured->theEvent); 6438 break; 6439 case LPFC_LINK_NUMBER_2: 6440 status = bf_get(lpfc_sli_misconfigured_port2_state, 6441 &misconfigured->theEvent); 6442 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6443 &misconfigured->theEvent); 6444 break; 6445 case LPFC_LINK_NUMBER_3: 6446 status = bf_get(lpfc_sli_misconfigured_port3_state, 6447 &misconfigured->theEvent); 6448 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6449 &misconfigured->theEvent); 6450 break; 6451 default: 6452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6453 "3296 " 6454 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6455 "event: Invalid link %d", 6456 phba->sli4_hba.lnk_info.lnk_no); 6457 return; 6458 } 6459 6460 /* Skip if optic state unchanged */ 6461 if (phba->sli4_hba.lnk_info.optic_state == status) 6462 return; 6463 6464 switch (status) { 6465 case LPFC_SLI_EVENT_STATUS_VALID: 6466 sprintf(message, "Physical Link is functional"); 6467 break; 6468 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6469 sprintf(message, "Optics faulted/incorrectly " 6470 "installed/not installed - Reseat optics, " 6471 "if issue not resolved, replace."); 6472 break; 6473 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6474 sprintf(message, 6475 "Optics of two types installed - Remove one " 6476 "optic or install matching pair of optics."); 6477 break; 6478 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6479 sprintf(message, "Incompatible optics - Replace with " 6480 "compatible optics for card to function."); 6481 break; 6482 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6483 sprintf(message, "Unqualified optics - Replace with " 6484 "Avago optics for Warranty and Technical " 6485 "Support - Link is%s operational", 6486 (operational) ? " not" : ""); 6487 break; 6488 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6489 sprintf(message, "Uncertified optics - Replace with " 6490 "Avago-certified optics to enable link " 6491 "operation - Link is%s operational", 6492 (operational) ? " not" : ""); 6493 break; 6494 default: 6495 /* firmware is reporting a status we don't know about */ 6496 sprintf(message, "Unknown event status x%02x", status); 6497 break; 6498 } 6499 6500 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6501 rc = lpfc_sli4_read_config(phba); 6502 if (rc) { 6503 phba->lmt = 0; 6504 lpfc_printf_log(phba, KERN_ERR, 6505 LOG_TRACE_EVENT, 6506 "3194 Unable to retrieve supported " 6507 "speeds, rc = 0x%x\n", rc); 6508 } 6509 rc = lpfc_sli4_refresh_params(phba); 6510 if (rc) { 6511 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6512 "3174 Unable to update pls support, " 6513 "rc x%x\n", rc); 6514 } 6515 vports = lpfc_create_vport_work_array(phba); 6516 if (vports != NULL) { 6517 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6518 i++) { 6519 shost = lpfc_shost_from_vport(vports[i]); 6520 lpfc_host_supported_speeds_set(shost); 6521 } 6522 } 6523 lpfc_destroy_vport_work_array(phba, vports); 6524 6525 phba->sli4_hba.lnk_info.optic_state = status; 6526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6527 "3176 Port Name %c %s\n", port_name, message); 6528 break; 6529 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6530 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6531 "3192 Remote DPort Test Initiated - " 6532 "Event Data1:x%08x Event Data2: x%08x\n", 6533 acqe_sli->event_data1, acqe_sli->event_data2); 6534 break; 6535 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6536 /* Call FW to obtain active parms */ 6537 lpfc_sli4_cgn_parm_chg_evt(phba); 6538 break; 6539 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6540 /* Misconfigured WWN. Reports that the SLI Port is configured 6541 * to use FA-WWN, but the attached device doesn’t support it. 6542 * Event Data1 - N.A, Event Data2 - N.A 6543 * This event only happens on the physical port. 6544 */ 6545 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY, 6546 "2699 Misconfigured FA-PWWN - Attached device " 6547 "does not support FA-PWWN\n"); 6548 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; 6549 memset(phba->pport->fc_portname.u.wwn, 0, 6550 sizeof(struct lpfc_name)); 6551 break; 6552 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6553 /* EEPROM failure. No driver action is required */ 6554 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6555 "2518 EEPROM failure - " 6556 "Event Data1: x%08x Event Data2: x%08x\n", 6557 acqe_sli->event_data1, acqe_sli->event_data2); 6558 break; 6559 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6560 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6561 break; 6562 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6563 &acqe_sli->event_data1; 6564 phba->cgn_acqe_cnt++; 6565 6566 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6567 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6568 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6569 6570 /* no threshold for CMF, even 1 signal will trigger an event */ 6571 6572 /* Alarm overrides warning, so check that first */ 6573 if (cgn_signal->alarm_cnt) { 6574 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6575 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6576 atomic_add(cgn_signal->alarm_cnt, 6577 &phba->cgn_sync_alarm_cnt); 6578 } 6579 } else if (cnt) { 6580 /* signal action needs to be taken */ 6581 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6582 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6583 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6584 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6585 } 6586 } 6587 break; 6588 default: 6589 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6590 "3193 Unrecognized SLI event, type: 0x%x", 6591 evt_type); 6592 break; 6593 } 6594 } 6595 6596 /** 6597 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6598 * @vport: pointer to vport data structure. 6599 * 6600 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6601 * response to a CVL event. 6602 * 6603 * Return the pointer to the ndlp with the vport if successful, otherwise 6604 * return NULL. 6605 **/ 6606 static struct lpfc_nodelist * 6607 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6608 { 6609 struct lpfc_nodelist *ndlp; 6610 struct Scsi_Host *shost; 6611 struct lpfc_hba *phba; 6612 6613 if (!vport) 6614 return NULL; 6615 phba = vport->phba; 6616 if (!phba) 6617 return NULL; 6618 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6619 if (!ndlp) { 6620 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6621 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6622 if (!ndlp) 6623 return NULL; 6624 /* Set the node type */ 6625 ndlp->nlp_type |= NLP_FABRIC; 6626 /* Put ndlp onto node list */ 6627 lpfc_enqueue_node(vport, ndlp); 6628 } 6629 if ((phba->pport->port_state < LPFC_FLOGI) && 6630 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6631 return NULL; 6632 /* If virtual link is not yet instantiated ignore CVL */ 6633 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6634 && (vport->port_state != LPFC_VPORT_FAILED)) 6635 return NULL; 6636 shost = lpfc_shost_from_vport(vport); 6637 if (!shost) 6638 return NULL; 6639 lpfc_linkdown_port(vport); 6640 lpfc_cleanup_pending_mbox(vport); 6641 spin_lock_irq(shost->host_lock); 6642 vport->fc_flag |= FC_VPORT_CVL_RCVD; 6643 spin_unlock_irq(shost->host_lock); 6644 6645 return ndlp; 6646 } 6647 6648 /** 6649 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6650 * @phba: pointer to lpfc hba data structure. 6651 * 6652 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6653 * response to a FCF dead event. 6654 **/ 6655 static void 6656 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6657 { 6658 struct lpfc_vport **vports; 6659 int i; 6660 6661 vports = lpfc_create_vport_work_array(phba); 6662 if (vports) 6663 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6664 lpfc_sli4_perform_vport_cvl(vports[i]); 6665 lpfc_destroy_vport_work_array(phba, vports); 6666 } 6667 6668 /** 6669 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6670 * @phba: pointer to lpfc hba data structure. 6671 * @acqe_fip: pointer to the async fcoe completion queue entry. 6672 * 6673 * This routine is to handle the SLI4 asynchronous fcoe event. 6674 **/ 6675 static void 6676 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6677 struct lpfc_acqe_fip *acqe_fip) 6678 { 6679 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6680 int rc; 6681 struct lpfc_vport *vport; 6682 struct lpfc_nodelist *ndlp; 6683 int active_vlink_present; 6684 struct lpfc_vport **vports; 6685 int i; 6686 6687 phba->fc_eventTag = acqe_fip->event_tag; 6688 phba->fcoe_eventtag = acqe_fip->event_tag; 6689 switch (event_type) { 6690 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6691 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6692 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6694 "2546 New FCF event, evt_tag:x%x, " 6695 "index:x%x\n", 6696 acqe_fip->event_tag, 6697 acqe_fip->index); 6698 else 6699 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6700 LOG_DISCOVERY, 6701 "2788 FCF param modified event, " 6702 "evt_tag:x%x, index:x%x\n", 6703 acqe_fip->event_tag, 6704 acqe_fip->index); 6705 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6706 /* 6707 * During period of FCF discovery, read the FCF 6708 * table record indexed by the event to update 6709 * FCF roundrobin failover eligible FCF bmask. 6710 */ 6711 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6712 LOG_DISCOVERY, 6713 "2779 Read FCF (x%x) for updating " 6714 "roundrobin FCF failover bmask\n", 6715 acqe_fip->index); 6716 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6717 } 6718 6719 /* If the FCF discovery is in progress, do nothing. */ 6720 spin_lock_irq(&phba->hbalock); 6721 if (phba->hba_flag & FCF_TS_INPROG) { 6722 spin_unlock_irq(&phba->hbalock); 6723 break; 6724 } 6725 /* If fast FCF failover rescan event is pending, do nothing */ 6726 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6727 spin_unlock_irq(&phba->hbalock); 6728 break; 6729 } 6730 6731 /* If the FCF has been in discovered state, do nothing. */ 6732 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6733 spin_unlock_irq(&phba->hbalock); 6734 break; 6735 } 6736 spin_unlock_irq(&phba->hbalock); 6737 6738 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6739 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6740 "2770 Start FCF table scan per async FCF " 6741 "event, evt_tag:x%x, index:x%x\n", 6742 acqe_fip->event_tag, acqe_fip->index); 6743 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6744 LPFC_FCOE_FCF_GET_FIRST); 6745 if (rc) 6746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6747 "2547 Issue FCF scan read FCF mailbox " 6748 "command failed (x%x)\n", rc); 6749 break; 6750 6751 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6752 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6753 "2548 FCF Table full count 0x%x tag 0x%x\n", 6754 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6755 acqe_fip->event_tag); 6756 break; 6757 6758 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6759 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6761 "2549 FCF (x%x) disconnected from network, " 6762 "tag:x%x\n", acqe_fip->index, 6763 acqe_fip->event_tag); 6764 /* 6765 * If we are in the middle of FCF failover process, clear 6766 * the corresponding FCF bit in the roundrobin bitmap. 6767 */ 6768 spin_lock_irq(&phba->hbalock); 6769 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6770 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6771 spin_unlock_irq(&phba->hbalock); 6772 /* Update FLOGI FCF failover eligible FCF bmask */ 6773 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6774 break; 6775 } 6776 spin_unlock_irq(&phba->hbalock); 6777 6778 /* If the event is not for currently used fcf do nothing */ 6779 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6780 break; 6781 6782 /* 6783 * Otherwise, request the port to rediscover the entire FCF 6784 * table for a fast recovery from case that the current FCF 6785 * is no longer valid as we are not in the middle of FCF 6786 * failover process already. 6787 */ 6788 spin_lock_irq(&phba->hbalock); 6789 /* Mark the fast failover process in progress */ 6790 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6791 spin_unlock_irq(&phba->hbalock); 6792 6793 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6794 "2771 Start FCF fast failover process due to " 6795 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6796 "\n", acqe_fip->event_tag, acqe_fip->index); 6797 rc = lpfc_sli4_redisc_fcf_table(phba); 6798 if (rc) { 6799 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6800 LOG_TRACE_EVENT, 6801 "2772 Issue FCF rediscover mailbox " 6802 "command failed, fail through to FCF " 6803 "dead event\n"); 6804 spin_lock_irq(&phba->hbalock); 6805 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6806 spin_unlock_irq(&phba->hbalock); 6807 /* 6808 * Last resort will fail over by treating this 6809 * as a link down to FCF registration. 6810 */ 6811 lpfc_sli4_fcf_dead_failthrough(phba); 6812 } else { 6813 /* Reset FCF roundrobin bmask for new discovery */ 6814 lpfc_sli4_clear_fcf_rr_bmask(phba); 6815 /* 6816 * Handling fast FCF failover to a DEAD FCF event is 6817 * considered equalivant to receiving CVL to all vports. 6818 */ 6819 lpfc_sli4_perform_all_vport_cvl(phba); 6820 } 6821 break; 6822 case LPFC_FIP_EVENT_TYPE_CVL: 6823 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6824 lpfc_printf_log(phba, KERN_ERR, 6825 LOG_TRACE_EVENT, 6826 "2718 Clear Virtual Link Received for VPI 0x%x" 6827 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6828 6829 vport = lpfc_find_vport_by_vpid(phba, 6830 acqe_fip->index); 6831 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6832 if (!ndlp) 6833 break; 6834 active_vlink_present = 0; 6835 6836 vports = lpfc_create_vport_work_array(phba); 6837 if (vports) { 6838 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6839 i++) { 6840 if ((!(vports[i]->fc_flag & 6841 FC_VPORT_CVL_RCVD)) && 6842 (vports[i]->port_state > LPFC_FDISC)) { 6843 active_vlink_present = 1; 6844 break; 6845 } 6846 } 6847 lpfc_destroy_vport_work_array(phba, vports); 6848 } 6849 6850 /* 6851 * Don't re-instantiate if vport is marked for deletion. 6852 * If we are here first then vport_delete is going to wait 6853 * for discovery to complete. 6854 */ 6855 if (!(vport->load_flag & FC_UNLOADING) && 6856 active_vlink_present) { 6857 /* 6858 * If there are other active VLinks present, 6859 * re-instantiate the Vlink using FDISC. 6860 */ 6861 mod_timer(&ndlp->nlp_delayfunc, 6862 jiffies + msecs_to_jiffies(1000)); 6863 spin_lock_irq(&ndlp->lock); 6864 ndlp->nlp_flag |= NLP_DELAY_TMO; 6865 spin_unlock_irq(&ndlp->lock); 6866 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6867 vport->port_state = LPFC_FDISC; 6868 } else { 6869 /* 6870 * Otherwise, we request port to rediscover 6871 * the entire FCF table for a fast recovery 6872 * from possible case that the current FCF 6873 * is no longer valid if we are not already 6874 * in the FCF failover process. 6875 */ 6876 spin_lock_irq(&phba->hbalock); 6877 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6878 spin_unlock_irq(&phba->hbalock); 6879 break; 6880 } 6881 /* Mark the fast failover process in progress */ 6882 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6883 spin_unlock_irq(&phba->hbalock); 6884 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6885 LOG_DISCOVERY, 6886 "2773 Start FCF failover per CVL, " 6887 "evt_tag:x%x\n", acqe_fip->event_tag); 6888 rc = lpfc_sli4_redisc_fcf_table(phba); 6889 if (rc) { 6890 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6891 LOG_TRACE_EVENT, 6892 "2774 Issue FCF rediscover " 6893 "mailbox command failed, " 6894 "through to CVL event\n"); 6895 spin_lock_irq(&phba->hbalock); 6896 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6897 spin_unlock_irq(&phba->hbalock); 6898 /* 6899 * Last resort will be re-try on the 6900 * the current registered FCF entry. 6901 */ 6902 lpfc_retry_pport_discovery(phba); 6903 } else 6904 /* 6905 * Reset FCF roundrobin bmask for new 6906 * discovery. 6907 */ 6908 lpfc_sli4_clear_fcf_rr_bmask(phba); 6909 } 6910 break; 6911 default: 6912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6913 "0288 Unknown FCoE event type 0x%x event tag " 6914 "0x%x\n", event_type, acqe_fip->event_tag); 6915 break; 6916 } 6917 } 6918 6919 /** 6920 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6921 * @phba: pointer to lpfc hba data structure. 6922 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6923 * 6924 * This routine is to handle the SLI4 asynchronous dcbx event. 6925 **/ 6926 static void 6927 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6928 struct lpfc_acqe_dcbx *acqe_dcbx) 6929 { 6930 phba->fc_eventTag = acqe_dcbx->event_tag; 6931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6932 "0290 The SLI4 DCBX asynchronous event is not " 6933 "handled yet\n"); 6934 } 6935 6936 /** 6937 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6938 * @phba: pointer to lpfc hba data structure. 6939 * @acqe_grp5: pointer to the async grp5 completion queue entry. 6940 * 6941 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 6942 * is an asynchronous notified of a logical link speed change. The Port 6943 * reports the logical link speed in units of 10Mbps. 6944 **/ 6945 static void 6946 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 6947 struct lpfc_acqe_grp5 *acqe_grp5) 6948 { 6949 uint16_t prev_ll_spd; 6950 6951 phba->fc_eventTag = acqe_grp5->event_tag; 6952 phba->fcoe_eventtag = acqe_grp5->event_tag; 6953 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 6954 phba->sli4_hba.link_state.logical_speed = 6955 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 6956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6957 "2789 GRP5 Async Event: Updating logical link speed " 6958 "from %dMbps to %dMbps\n", prev_ll_spd, 6959 phba->sli4_hba.link_state.logical_speed); 6960 } 6961 6962 /** 6963 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 6964 * @phba: pointer to lpfc hba data structure. 6965 * 6966 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 6967 * is an asynchronous notification of a request to reset CM stats. 6968 **/ 6969 static void 6970 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 6971 { 6972 if (!phba->cgn_i) 6973 return; 6974 lpfc_init_congestion_stat(phba); 6975 } 6976 6977 /** 6978 * lpfc_cgn_params_val - Validate FW congestion parameters. 6979 * @phba: pointer to lpfc hba data structure. 6980 * @p_cfg_param: pointer to FW provided congestion parameters. 6981 * 6982 * This routine validates the congestion parameters passed 6983 * by the FW to the driver via an ACQE event. 6984 **/ 6985 static void 6986 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 6987 { 6988 spin_lock_irq(&phba->hbalock); 6989 6990 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 6991 LPFC_CFG_MONITOR)) { 6992 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 6993 "6225 CMF mode param out of range: %d\n", 6994 p_cfg_param->cgn_param_mode); 6995 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 6996 } 6997 6998 spin_unlock_irq(&phba->hbalock); 6999 } 7000 7001 /** 7002 * lpfc_cgn_params_parse - Process a FW cong parm change event 7003 * @phba: pointer to lpfc hba data structure. 7004 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7005 * @len: the size of pdata in bytes. 7006 * 7007 * This routine validates the congestion management buffer signature 7008 * from the FW, validates the contents and makes corrections for 7009 * valid, in-range values. If the signature magic is correct and 7010 * after parameter validation, the contents are copied to the driver's 7011 * @phba structure. If the magic is incorrect, an error message is 7012 * logged. 7013 **/ 7014 static void 7015 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7016 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7017 { 7018 struct lpfc_cgn_info *cp; 7019 uint32_t crc, oldmode; 7020 7021 /* Make sure the FW has encoded the correct magic number to 7022 * validate the congestion parameter in FW memory. 7023 */ 7024 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7025 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7026 "4668 FW cgn parm buffer data: " 7027 "magic 0x%x version %d mode %d " 7028 "level0 %d level1 %d " 7029 "level2 %d byte13 %d " 7030 "byte14 %d byte15 %d " 7031 "byte11 %d byte12 %d activeMode %d\n", 7032 p_cgn_param->cgn_param_magic, 7033 p_cgn_param->cgn_param_version, 7034 p_cgn_param->cgn_param_mode, 7035 p_cgn_param->cgn_param_level0, 7036 p_cgn_param->cgn_param_level1, 7037 p_cgn_param->cgn_param_level2, 7038 p_cgn_param->byte13, 7039 p_cgn_param->byte14, 7040 p_cgn_param->byte15, 7041 p_cgn_param->byte11, 7042 p_cgn_param->byte12, 7043 phba->cmf_active_mode); 7044 7045 oldmode = phba->cmf_active_mode; 7046 7047 /* Any parameters out of range are corrected to defaults 7048 * by this routine. No need to fail. 7049 */ 7050 lpfc_cgn_params_val(phba, p_cgn_param); 7051 7052 /* Parameters are verified, move them into driver storage */ 7053 spin_lock_irq(&phba->hbalock); 7054 memcpy(&phba->cgn_p, p_cgn_param, 7055 sizeof(struct lpfc_cgn_param)); 7056 7057 /* Update parameters in congestion info buffer now */ 7058 if (phba->cgn_i) { 7059 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7060 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7061 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7062 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7063 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7064 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7065 LPFC_CGN_CRC32_SEED); 7066 cp->cgn_info_crc = cpu_to_le32(crc); 7067 } 7068 spin_unlock_irq(&phba->hbalock); 7069 7070 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7071 7072 switch (oldmode) { 7073 case LPFC_CFG_OFF: 7074 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7075 /* Turning CMF on */ 7076 lpfc_cmf_start(phba); 7077 7078 if (phba->link_state >= LPFC_LINK_UP) { 7079 phba->cgn_reg_fpin = 7080 phba->cgn_init_reg_fpin; 7081 phba->cgn_reg_signal = 7082 phba->cgn_init_reg_signal; 7083 lpfc_issue_els_edc(phba->pport, 0); 7084 } 7085 } 7086 break; 7087 case LPFC_CFG_MANAGED: 7088 switch (phba->cgn_p.cgn_param_mode) { 7089 case LPFC_CFG_OFF: 7090 /* Turning CMF off */ 7091 lpfc_cmf_stop(phba); 7092 if (phba->link_state >= LPFC_LINK_UP) 7093 lpfc_issue_els_edc(phba->pport, 0); 7094 break; 7095 case LPFC_CFG_MONITOR: 7096 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7097 "4661 Switch from MANAGED to " 7098 "`MONITOR mode\n"); 7099 phba->cmf_max_bytes_per_interval = 7100 phba->cmf_link_byte_count; 7101 7102 /* Resume blocked IO - unblock on workqueue */ 7103 queue_work(phba->wq, 7104 &phba->unblock_request_work); 7105 break; 7106 } 7107 break; 7108 case LPFC_CFG_MONITOR: 7109 switch (phba->cgn_p.cgn_param_mode) { 7110 case LPFC_CFG_OFF: 7111 /* Turning CMF off */ 7112 lpfc_cmf_stop(phba); 7113 if (phba->link_state >= LPFC_LINK_UP) 7114 lpfc_issue_els_edc(phba->pport, 0); 7115 break; 7116 case LPFC_CFG_MANAGED: 7117 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 7118 "4662 Switch from MONITOR to " 7119 "MANAGED mode\n"); 7120 lpfc_cmf_signal_init(phba); 7121 break; 7122 } 7123 break; 7124 } 7125 } else { 7126 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7127 "4669 FW cgn parm buf wrong magic 0x%x " 7128 "version %d\n", p_cgn_param->cgn_param_magic, 7129 p_cgn_param->cgn_param_version); 7130 } 7131 } 7132 7133 /** 7134 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7135 * @phba: pointer to lpfc hba data structure. 7136 * 7137 * This routine issues a read_object mailbox command to 7138 * get the congestion management parameters from the FW 7139 * parses it and updates the driver maintained values. 7140 * 7141 * Returns 7142 * 0 if the object was empty 7143 * -Eval if an error was encountered 7144 * Count if bytes were read from object 7145 **/ 7146 int 7147 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7148 { 7149 int ret = 0; 7150 struct lpfc_cgn_param *p_cgn_param = NULL; 7151 u32 *pdata = NULL; 7152 u32 len = 0; 7153 7154 /* Find out if the FW has a new set of congestion parameters. */ 7155 len = sizeof(struct lpfc_cgn_param); 7156 pdata = kzalloc(len, GFP_KERNEL); 7157 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7158 pdata, len); 7159 7160 /* 0 means no data. A negative means error. A positive means 7161 * bytes were copied. 7162 */ 7163 if (!ret) { 7164 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7165 "4670 CGN RD OBJ returns no data\n"); 7166 goto rd_obj_err; 7167 } else if (ret < 0) { 7168 /* Some error. Just exit and return it to the caller.*/ 7169 goto rd_obj_err; 7170 } 7171 7172 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7173 "6234 READ CGN PARAMS Successful %d\n", len); 7174 7175 /* Parse data pointer over len and update the phba congestion 7176 * parameters with values passed back. The receive rate values 7177 * may have been altered in FW, but take no action here. 7178 */ 7179 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7180 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7181 7182 rd_obj_err: 7183 kfree(pdata); 7184 return ret; 7185 } 7186 7187 /** 7188 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7189 * @phba: pointer to lpfc hba data structure. 7190 * 7191 * The FW generated Async ACQE SLI event calls this routine when 7192 * the event type is an SLI Internal Port Event and the Event Code 7193 * indicates a change to the FW maintained congestion parameters. 7194 * 7195 * This routine executes a Read_Object mailbox call to obtain the 7196 * current congestion parameters maintained in FW and corrects 7197 * the driver's active congestion parameters. 7198 * 7199 * The acqe event is not passed because there is no further data 7200 * required. 7201 * 7202 * Returns nonzero error if event processing encountered an error. 7203 * Zero otherwise for success. 7204 **/ 7205 static int 7206 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7207 { 7208 int ret = 0; 7209 7210 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7211 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7212 "4664 Cgn Evt when E2E off. Drop event\n"); 7213 return -EACCES; 7214 } 7215 7216 /* If the event is claiming an empty object, it's ok. A write 7217 * could have cleared it. Only error is a negative return 7218 * status. 7219 */ 7220 ret = lpfc_sli4_cgn_params_read(phba); 7221 if (ret < 0) { 7222 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7223 "4667 Error reading Cgn Params (%d)\n", 7224 ret); 7225 } else if (!ret) { 7226 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7227 "4673 CGN Event empty object.\n"); 7228 } 7229 return ret; 7230 } 7231 7232 /** 7233 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7234 * @phba: pointer to lpfc hba data structure. 7235 * 7236 * This routine is invoked by the worker thread to process all the pending 7237 * SLI4 asynchronous events. 7238 **/ 7239 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7240 { 7241 struct lpfc_cq_event *cq_event; 7242 unsigned long iflags; 7243 7244 /* First, declare the async event has been handled */ 7245 spin_lock_irqsave(&phba->hbalock, iflags); 7246 phba->hba_flag &= ~ASYNC_EVENT; 7247 spin_unlock_irqrestore(&phba->hbalock, iflags); 7248 7249 /* Now, handle all the async events */ 7250 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7251 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7252 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7253 cq_event, struct lpfc_cq_event, list); 7254 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7255 iflags); 7256 7257 /* Process the asynchronous event */ 7258 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7259 case LPFC_TRAILER_CODE_LINK: 7260 lpfc_sli4_async_link_evt(phba, 7261 &cq_event->cqe.acqe_link); 7262 break; 7263 case LPFC_TRAILER_CODE_FCOE: 7264 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7265 break; 7266 case LPFC_TRAILER_CODE_DCBX: 7267 lpfc_sli4_async_dcbx_evt(phba, 7268 &cq_event->cqe.acqe_dcbx); 7269 break; 7270 case LPFC_TRAILER_CODE_GRP5: 7271 lpfc_sli4_async_grp5_evt(phba, 7272 &cq_event->cqe.acqe_grp5); 7273 break; 7274 case LPFC_TRAILER_CODE_FC: 7275 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7276 break; 7277 case LPFC_TRAILER_CODE_SLI: 7278 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7279 break; 7280 case LPFC_TRAILER_CODE_CMSTAT: 7281 lpfc_sli4_async_cmstat_evt(phba); 7282 break; 7283 default: 7284 lpfc_printf_log(phba, KERN_ERR, 7285 LOG_TRACE_EVENT, 7286 "1804 Invalid asynchronous event code: " 7287 "x%x\n", bf_get(lpfc_trailer_code, 7288 &cq_event->cqe.mcqe_cmpl)); 7289 break; 7290 } 7291 7292 /* Free the completion event processed to the free pool */ 7293 lpfc_sli4_cq_event_release(phba, cq_event); 7294 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7295 } 7296 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7297 } 7298 7299 /** 7300 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7301 * @phba: pointer to lpfc hba data structure. 7302 * 7303 * This routine is invoked by the worker thread to process FCF table 7304 * rediscovery pending completion event. 7305 **/ 7306 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7307 { 7308 int rc; 7309 7310 spin_lock_irq(&phba->hbalock); 7311 /* Clear FCF rediscovery timeout event */ 7312 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7313 /* Clear driver fast failover FCF record flag */ 7314 phba->fcf.failover_rec.flag = 0; 7315 /* Set state for FCF fast failover */ 7316 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7317 spin_unlock_irq(&phba->hbalock); 7318 7319 /* Scan FCF table from the first entry to re-discover SAN */ 7320 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7321 "2777 Start post-quiescent FCF table scan\n"); 7322 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7323 if (rc) 7324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7325 "2747 Issue FCF scan read FCF mailbox " 7326 "command failed 0x%x\n", rc); 7327 } 7328 7329 /** 7330 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7331 * @phba: pointer to lpfc hba data structure. 7332 * @dev_grp: The HBA PCI-Device group number. 7333 * 7334 * This routine is invoked to set up the per HBA PCI-Device group function 7335 * API jump table entries. 7336 * 7337 * Return: 0 if success, otherwise -ENODEV 7338 **/ 7339 int 7340 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7341 { 7342 int rc; 7343 7344 /* Set up lpfc PCI-device group */ 7345 phba->pci_dev_grp = dev_grp; 7346 7347 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7348 if (dev_grp == LPFC_PCI_DEV_OC) 7349 phba->sli_rev = LPFC_SLI_REV4; 7350 7351 /* Set up device INIT API function jump table */ 7352 rc = lpfc_init_api_table_setup(phba, dev_grp); 7353 if (rc) 7354 return -ENODEV; 7355 /* Set up SCSI API function jump table */ 7356 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7357 if (rc) 7358 return -ENODEV; 7359 /* Set up SLI API function jump table */ 7360 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7361 if (rc) 7362 return -ENODEV; 7363 /* Set up MBOX API function jump table */ 7364 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7365 if (rc) 7366 return -ENODEV; 7367 7368 return 0; 7369 } 7370 7371 /** 7372 * lpfc_log_intr_mode - Log the active interrupt mode 7373 * @phba: pointer to lpfc hba data structure. 7374 * @intr_mode: active interrupt mode adopted. 7375 * 7376 * This routine it invoked to log the currently used active interrupt mode 7377 * to the device. 7378 **/ 7379 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7380 { 7381 switch (intr_mode) { 7382 case 0: 7383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7384 "0470 Enable INTx interrupt mode.\n"); 7385 break; 7386 case 1: 7387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7388 "0481 Enabled MSI interrupt mode.\n"); 7389 break; 7390 case 2: 7391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7392 "0480 Enabled MSI-X interrupt mode.\n"); 7393 break; 7394 default: 7395 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7396 "0482 Illegal interrupt mode.\n"); 7397 break; 7398 } 7399 return; 7400 } 7401 7402 /** 7403 * lpfc_enable_pci_dev - Enable a generic PCI device. 7404 * @phba: pointer to lpfc hba data structure. 7405 * 7406 * This routine is invoked to enable the PCI device that is common to all 7407 * PCI devices. 7408 * 7409 * Return codes 7410 * 0 - successful 7411 * other values - error 7412 **/ 7413 static int 7414 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7415 { 7416 struct pci_dev *pdev; 7417 7418 /* Obtain PCI device reference */ 7419 if (!phba->pcidev) 7420 goto out_error; 7421 else 7422 pdev = phba->pcidev; 7423 /* Enable PCI device */ 7424 if (pci_enable_device_mem(pdev)) 7425 goto out_error; 7426 /* Request PCI resource for the device */ 7427 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7428 goto out_disable_device; 7429 /* Set up device as PCI master and save state for EEH */ 7430 pci_set_master(pdev); 7431 pci_try_set_mwi(pdev); 7432 pci_save_state(pdev); 7433 7434 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7435 if (pci_is_pcie(pdev)) 7436 pdev->needs_freset = 1; 7437 7438 return 0; 7439 7440 out_disable_device: 7441 pci_disable_device(pdev); 7442 out_error: 7443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7444 "1401 Failed to enable pci device\n"); 7445 return -ENODEV; 7446 } 7447 7448 /** 7449 * lpfc_disable_pci_dev - Disable a generic PCI device. 7450 * @phba: pointer to lpfc hba data structure. 7451 * 7452 * This routine is invoked to disable the PCI device that is common to all 7453 * PCI devices. 7454 **/ 7455 static void 7456 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7457 { 7458 struct pci_dev *pdev; 7459 7460 /* Obtain PCI device reference */ 7461 if (!phba->pcidev) 7462 return; 7463 else 7464 pdev = phba->pcidev; 7465 /* Release PCI resource and disable PCI device */ 7466 pci_release_mem_regions(pdev); 7467 pci_disable_device(pdev); 7468 7469 return; 7470 } 7471 7472 /** 7473 * lpfc_reset_hba - Reset a hba 7474 * @phba: pointer to lpfc hba data structure. 7475 * 7476 * This routine is invoked to reset a hba device. It brings the HBA 7477 * offline, performs a board restart, and then brings the board back 7478 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7479 * on outstanding mailbox commands. 7480 **/ 7481 void 7482 lpfc_reset_hba(struct lpfc_hba *phba) 7483 { 7484 /* If resets are disabled then set error state and return. */ 7485 if (!phba->cfg_enable_hba_reset) { 7486 phba->link_state = LPFC_HBA_ERROR; 7487 return; 7488 } 7489 7490 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7491 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7492 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7493 } else { 7494 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7495 lpfc_sli_flush_io_rings(phba); 7496 } 7497 lpfc_offline(phba); 7498 lpfc_sli_brdrestart(phba); 7499 lpfc_online(phba); 7500 lpfc_unblock_mgmt_io(phba); 7501 } 7502 7503 /** 7504 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7505 * @phba: pointer to lpfc hba data structure. 7506 * 7507 * This function enables the PCI SR-IOV virtual functions to a physical 7508 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7509 * enable the number of virtual functions to the physical function. As 7510 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7511 * API call does not considered as an error condition for most of the device. 7512 **/ 7513 uint16_t 7514 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7515 { 7516 struct pci_dev *pdev = phba->pcidev; 7517 uint16_t nr_virtfn; 7518 int pos; 7519 7520 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7521 if (pos == 0) 7522 return 0; 7523 7524 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7525 return nr_virtfn; 7526 } 7527 7528 /** 7529 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7530 * @phba: pointer to lpfc hba data structure. 7531 * @nr_vfn: number of virtual functions to be enabled. 7532 * 7533 * This function enables the PCI SR-IOV virtual functions to a physical 7534 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7535 * enable the number of virtual functions to the physical function. As 7536 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7537 * API call does not considered as an error condition for most of the device. 7538 **/ 7539 int 7540 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7541 { 7542 struct pci_dev *pdev = phba->pcidev; 7543 uint16_t max_nr_vfn; 7544 int rc; 7545 7546 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7547 if (nr_vfn > max_nr_vfn) { 7548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7549 "3057 Requested vfs (%d) greater than " 7550 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7551 return -EINVAL; 7552 } 7553 7554 rc = pci_enable_sriov(pdev, nr_vfn); 7555 if (rc) { 7556 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7557 "2806 Failed to enable sriov on this device " 7558 "with vfn number nr_vf:%d, rc:%d\n", 7559 nr_vfn, rc); 7560 } else 7561 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7562 "2807 Successful enable sriov on this device " 7563 "with vfn number nr_vf:%d\n", nr_vfn); 7564 return rc; 7565 } 7566 7567 static void 7568 lpfc_unblock_requests_work(struct work_struct *work) 7569 { 7570 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7571 unblock_request_work); 7572 7573 lpfc_unblock_requests(phba); 7574 } 7575 7576 /** 7577 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7578 * @phba: pointer to lpfc hba data structure. 7579 * 7580 * This routine is invoked to set up the driver internal resources before the 7581 * device specific resource setup to support the HBA device it attached to. 7582 * 7583 * Return codes 7584 * 0 - successful 7585 * other values - error 7586 **/ 7587 static int 7588 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7589 { 7590 struct lpfc_sli *psli = &phba->sli; 7591 7592 /* 7593 * Driver resources common to all SLI revisions 7594 */ 7595 atomic_set(&phba->fast_event_count, 0); 7596 atomic_set(&phba->dbg_log_idx, 0); 7597 atomic_set(&phba->dbg_log_cnt, 0); 7598 atomic_set(&phba->dbg_log_dmping, 0); 7599 spin_lock_init(&phba->hbalock); 7600 7601 /* Initialize port_list spinlock */ 7602 spin_lock_init(&phba->port_list_lock); 7603 INIT_LIST_HEAD(&phba->port_list); 7604 7605 INIT_LIST_HEAD(&phba->work_list); 7606 init_waitqueue_head(&phba->wait_4_mlo_m_q); 7607 7608 /* Initialize the wait queue head for the kernel thread */ 7609 init_waitqueue_head(&phba->work_waitq); 7610 7611 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7612 "1403 Protocols supported %s %s %s\n", 7613 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7614 "SCSI" : " "), 7615 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7616 "NVME" : " "), 7617 (phba->nvmet_support ? "NVMET" : " ")); 7618 7619 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7620 spin_lock_init(&phba->scsi_buf_list_get_lock); 7621 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7622 spin_lock_init(&phba->scsi_buf_list_put_lock); 7623 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7624 7625 /* Initialize the fabric iocb list */ 7626 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7627 7628 /* Initialize list to save ELS buffers */ 7629 INIT_LIST_HEAD(&phba->elsbuf); 7630 7631 /* Initialize FCF connection rec list */ 7632 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7633 7634 /* Initialize OAS configuration list */ 7635 spin_lock_init(&phba->devicelock); 7636 INIT_LIST_HEAD(&phba->luns); 7637 7638 /* MBOX heartbeat timer */ 7639 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7640 /* Fabric block timer */ 7641 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7642 /* EA polling mode timer */ 7643 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7644 /* Heartbeat timer */ 7645 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7646 7647 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7648 7649 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7650 lpfc_idle_stat_delay_work); 7651 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7652 return 0; 7653 } 7654 7655 /** 7656 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7657 * @phba: pointer to lpfc hba data structure. 7658 * 7659 * This routine is invoked to set up the driver internal resources specific to 7660 * support the SLI-3 HBA device it attached to. 7661 * 7662 * Return codes 7663 * 0 - successful 7664 * other values - error 7665 **/ 7666 static int 7667 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7668 { 7669 int rc, entry_sz; 7670 7671 /* 7672 * Initialize timers used by driver 7673 */ 7674 7675 /* FCP polling mode timer */ 7676 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7677 7678 /* Host attention work mask setup */ 7679 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7680 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7681 7682 /* Get all the module params for configuring this host */ 7683 lpfc_get_cfgparam(phba); 7684 /* Set up phase-1 common device driver resources */ 7685 7686 rc = lpfc_setup_driver_resource_phase1(phba); 7687 if (rc) 7688 return -ENODEV; 7689 7690 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 7691 phba->menlo_flag |= HBA_MENLO_SUPPORT; 7692 /* check for menlo minimum sg count */ 7693 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 7694 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 7695 } 7696 7697 if (!phba->sli.sli3_ring) 7698 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7699 sizeof(struct lpfc_sli_ring), 7700 GFP_KERNEL); 7701 if (!phba->sli.sli3_ring) 7702 return -ENOMEM; 7703 7704 /* 7705 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7706 * used to create the sg_dma_buf_pool must be dynamically calculated. 7707 */ 7708 7709 if (phba->sli_rev == LPFC_SLI_REV4) 7710 entry_sz = sizeof(struct sli4_sge); 7711 else 7712 entry_sz = sizeof(struct ulp_bde64); 7713 7714 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7715 if (phba->cfg_enable_bg) { 7716 /* 7717 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7718 * the FCP rsp, and a BDE for each. Sice we have no control 7719 * over how many protection data segments the SCSI Layer 7720 * will hand us (ie: there could be one for every block 7721 * in the IO), we just allocate enough BDEs to accomidate 7722 * our max amount and we need to limit lpfc_sg_seg_cnt to 7723 * minimize the risk of running out. 7724 */ 7725 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7726 sizeof(struct fcp_rsp) + 7727 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7728 7729 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7730 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7731 7732 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7733 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7734 } else { 7735 /* 7736 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7737 * the FCP rsp, a BDE for each, and a BDE for up to 7738 * cfg_sg_seg_cnt data segments. 7739 */ 7740 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7741 sizeof(struct fcp_rsp) + 7742 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7743 7744 /* Total BDEs in BPL for scsi_sg_list */ 7745 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7746 } 7747 7748 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7749 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7750 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7751 phba->cfg_total_seg_cnt); 7752 7753 phba->max_vpi = LPFC_MAX_VPI; 7754 /* This will be set to correct value after config_port mbox */ 7755 phba->max_vports = 0; 7756 7757 /* 7758 * Initialize the SLI Layer to run with lpfc HBAs. 7759 */ 7760 lpfc_sli_setup(phba); 7761 lpfc_sli_queue_init(phba); 7762 7763 /* Allocate device driver memory */ 7764 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7765 return -ENOMEM; 7766 7767 phba->lpfc_sg_dma_buf_pool = 7768 dma_pool_create("lpfc_sg_dma_buf_pool", 7769 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7770 BPL_ALIGN_SZ, 0); 7771 7772 if (!phba->lpfc_sg_dma_buf_pool) 7773 goto fail_free_mem; 7774 7775 phba->lpfc_cmd_rsp_buf_pool = 7776 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7777 &phba->pcidev->dev, 7778 sizeof(struct fcp_cmnd) + 7779 sizeof(struct fcp_rsp), 7780 BPL_ALIGN_SZ, 0); 7781 7782 if (!phba->lpfc_cmd_rsp_buf_pool) 7783 goto fail_free_dma_buf_pool; 7784 7785 /* 7786 * Enable sr-iov virtual functions if supported and configured 7787 * through the module parameter. 7788 */ 7789 if (phba->cfg_sriov_nr_virtfn > 0) { 7790 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7791 phba->cfg_sriov_nr_virtfn); 7792 if (rc) { 7793 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7794 "2808 Requested number of SR-IOV " 7795 "virtual functions (%d) is not " 7796 "supported\n", 7797 phba->cfg_sriov_nr_virtfn); 7798 phba->cfg_sriov_nr_virtfn = 0; 7799 } 7800 } 7801 7802 return 0; 7803 7804 fail_free_dma_buf_pool: 7805 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7806 phba->lpfc_sg_dma_buf_pool = NULL; 7807 fail_free_mem: 7808 lpfc_mem_free(phba); 7809 return -ENOMEM; 7810 } 7811 7812 /** 7813 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7814 * @phba: pointer to lpfc hba data structure. 7815 * 7816 * This routine is invoked to unset the driver internal resources set up 7817 * specific for supporting the SLI-3 HBA device it attached to. 7818 **/ 7819 static void 7820 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7821 { 7822 /* Free device driver memory allocated */ 7823 lpfc_mem_free_all(phba); 7824 7825 return; 7826 } 7827 7828 /** 7829 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7830 * @phba: pointer to lpfc hba data structure. 7831 * 7832 * This routine is invoked to set up the driver internal resources specific to 7833 * support the SLI-4 HBA device it attached to. 7834 * 7835 * Return codes 7836 * 0 - successful 7837 * other values - error 7838 **/ 7839 static int 7840 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7841 { 7842 LPFC_MBOXQ_t *mboxq; 7843 MAILBOX_t *mb; 7844 int rc, i, max_buf_size; 7845 int longs; 7846 int extra; 7847 uint64_t wwn; 7848 u32 if_type; 7849 u32 if_fam; 7850 7851 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7852 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7853 phba->sli4_hba.curr_disp_cpu = 0; 7854 7855 /* Get all the module params for configuring this host */ 7856 lpfc_get_cfgparam(phba); 7857 7858 /* Set up phase-1 common device driver resources */ 7859 rc = lpfc_setup_driver_resource_phase1(phba); 7860 if (rc) 7861 return -ENODEV; 7862 7863 /* Before proceed, wait for POST done and device ready */ 7864 rc = lpfc_sli4_post_status_check(phba); 7865 if (rc) 7866 return -ENODEV; 7867 7868 /* Allocate all driver workqueues here */ 7869 7870 /* The lpfc_wq workqueue for deferred irq use */ 7871 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7872 7873 /* 7874 * Initialize timers used by driver 7875 */ 7876 7877 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7878 7879 /* FCF rediscover timer */ 7880 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7881 7882 /* CMF congestion timer */ 7883 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7884 phba->cmf_timer.function = lpfc_cmf_timer; 7885 7886 /* 7887 * Control structure for handling external multi-buffer mailbox 7888 * command pass-through. 7889 */ 7890 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7891 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7892 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7893 7894 phba->max_vpi = LPFC_MAX_VPI; 7895 7896 /* This will be set to correct value after the read_config mbox */ 7897 phba->max_vports = 0; 7898 7899 /* Program the default value of vlan_id and fc_map */ 7900 phba->valid_vlan = 0; 7901 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7902 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7903 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7904 7905 /* 7906 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7907 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7908 * The WQ create will allocate the ring. 7909 */ 7910 7911 /* Initialize buffer queue management fields */ 7912 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 7913 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 7914 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 7915 7916 /* for VMID idle timeout if VMID is enabled */ 7917 if (lpfc_is_vmid_enabled(phba)) 7918 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 7919 7920 /* 7921 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 7922 */ 7923 /* Initialize the Abort buffer list used by driver */ 7924 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 7925 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 7926 7927 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 7928 /* Initialize the Abort nvme buffer list used by driver */ 7929 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 7930 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7931 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 7932 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 7933 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 7934 } 7935 7936 /* This abort list used by worker thread */ 7937 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 7938 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 7939 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 7940 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 7941 7942 /* 7943 * Initialize driver internal slow-path work queues 7944 */ 7945 7946 /* Driver internel slow-path CQ Event pool */ 7947 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 7948 /* Response IOCB work queue list */ 7949 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 7950 /* Asynchronous event CQ Event work queue list */ 7951 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 7952 /* Slow-path XRI aborted CQ Event work queue list */ 7953 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 7954 /* Receive queue CQ Event work queue list */ 7955 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 7956 7957 /* Initialize extent block lists. */ 7958 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 7959 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 7960 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 7961 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 7962 7963 /* Initialize mboxq lists. If the early init routines fail 7964 * these lists need to be correctly initialized. 7965 */ 7966 INIT_LIST_HEAD(&phba->sli.mboxq); 7967 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 7968 7969 /* initialize optic_state to 0xFF */ 7970 phba->sli4_hba.lnk_info.optic_state = 0xff; 7971 7972 /* Allocate device driver memory */ 7973 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 7974 if (rc) 7975 return -ENOMEM; 7976 7977 /* IF Type 2 ports get initialized now. */ 7978 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 7979 LPFC_SLI_INTF_IF_TYPE_2) { 7980 rc = lpfc_pci_function_reset(phba); 7981 if (unlikely(rc)) { 7982 rc = -ENODEV; 7983 goto out_free_mem; 7984 } 7985 phba->temp_sensor_support = 1; 7986 } 7987 7988 /* Create the bootstrap mailbox command */ 7989 rc = lpfc_create_bootstrap_mbox(phba); 7990 if (unlikely(rc)) 7991 goto out_free_mem; 7992 7993 /* Set up the host's endian order with the device. */ 7994 rc = lpfc_setup_endian_order(phba); 7995 if (unlikely(rc)) 7996 goto out_free_bsmbx; 7997 7998 /* Set up the hba's configuration parameters. */ 7999 rc = lpfc_sli4_read_config(phba); 8000 if (unlikely(rc)) 8001 goto out_free_bsmbx; 8002 8003 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { 8004 /* Right now the link is down, if FA-PWWN is configured the 8005 * firmware will try FLOGI before the driver gets a link up. 8006 * If it fails, the driver should get a MISCONFIGURED async 8007 * event which will clear this flag. The only notification 8008 * the driver gets is if it fails, if it succeeds there is no 8009 * notification given. Assume success. 8010 */ 8011 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 8012 } 8013 8014 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8015 if (unlikely(rc)) 8016 goto out_free_bsmbx; 8017 8018 /* IF Type 0 ports get initialized now. */ 8019 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8020 LPFC_SLI_INTF_IF_TYPE_0) { 8021 rc = lpfc_pci_function_reset(phba); 8022 if (unlikely(rc)) 8023 goto out_free_bsmbx; 8024 } 8025 8026 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8027 GFP_KERNEL); 8028 if (!mboxq) { 8029 rc = -ENOMEM; 8030 goto out_free_bsmbx; 8031 } 8032 8033 /* Check for NVMET being configured */ 8034 phba->nvmet_support = 0; 8035 if (lpfc_enable_nvmet_cnt) { 8036 8037 /* First get WWN of HBA instance */ 8038 lpfc_read_nv(phba, mboxq); 8039 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8040 if (rc != MBX_SUCCESS) { 8041 lpfc_printf_log(phba, KERN_ERR, 8042 LOG_TRACE_EVENT, 8043 "6016 Mailbox failed , mbxCmd x%x " 8044 "READ_NV, mbxStatus x%x\n", 8045 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8046 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8047 mempool_free(mboxq, phba->mbox_mem_pool); 8048 rc = -EIO; 8049 goto out_free_bsmbx; 8050 } 8051 mb = &mboxq->u.mb; 8052 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8053 sizeof(uint64_t)); 8054 wwn = cpu_to_be64(wwn); 8055 phba->sli4_hba.wwnn.u.name = wwn; 8056 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8057 sizeof(uint64_t)); 8058 /* wwn is WWPN of HBA instance */ 8059 wwn = cpu_to_be64(wwn); 8060 phba->sli4_hba.wwpn.u.name = wwn; 8061 8062 /* Check to see if it matches any module parameter */ 8063 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8064 if (wwn == lpfc_enable_nvmet[i]) { 8065 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8066 if (lpfc_nvmet_mem_alloc(phba)) 8067 break; 8068 8069 phba->nvmet_support = 1; /* a match */ 8070 8071 lpfc_printf_log(phba, KERN_ERR, 8072 LOG_TRACE_EVENT, 8073 "6017 NVME Target %016llx\n", 8074 wwn); 8075 #else 8076 lpfc_printf_log(phba, KERN_ERR, 8077 LOG_TRACE_EVENT, 8078 "6021 Can't enable NVME Target." 8079 " NVME_TARGET_FC infrastructure" 8080 " is not in kernel\n"); 8081 #endif 8082 /* Not supported for NVMET */ 8083 phba->cfg_xri_rebalancing = 0; 8084 if (phba->irq_chann_mode == NHT_MODE) { 8085 phba->cfg_irq_chann = 8086 phba->sli4_hba.num_present_cpu; 8087 phba->cfg_hdw_queue = 8088 phba->sli4_hba.num_present_cpu; 8089 phba->irq_chann_mode = NORMAL_MODE; 8090 } 8091 break; 8092 } 8093 } 8094 } 8095 8096 lpfc_nvme_mod_param_dep(phba); 8097 8098 /* 8099 * Get sli4 parameters that override parameters from Port capabilities. 8100 * If this call fails, it isn't critical unless the SLI4 parameters come 8101 * back in conflict. 8102 */ 8103 rc = lpfc_get_sli4_parameters(phba, mboxq); 8104 if (rc) { 8105 if_type = bf_get(lpfc_sli_intf_if_type, 8106 &phba->sli4_hba.sli_intf); 8107 if_fam = bf_get(lpfc_sli_intf_sli_family, 8108 &phba->sli4_hba.sli_intf); 8109 if (phba->sli4_hba.extents_in_use && 8110 phba->sli4_hba.rpi_hdrs_in_use) { 8111 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8112 "2999 Unsupported SLI4 Parameters " 8113 "Extents and RPI headers enabled.\n"); 8114 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8115 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8116 mempool_free(mboxq, phba->mbox_mem_pool); 8117 rc = -EIO; 8118 goto out_free_bsmbx; 8119 } 8120 } 8121 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8122 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8123 mempool_free(mboxq, phba->mbox_mem_pool); 8124 rc = -EIO; 8125 goto out_free_bsmbx; 8126 } 8127 } 8128 8129 /* 8130 * 1 for cmd, 1 for rsp, NVME adds an extra one 8131 * for boundary conditions in its max_sgl_segment template. 8132 */ 8133 extra = 2; 8134 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8135 extra++; 8136 8137 /* 8138 * It doesn't matter what family our adapter is in, we are 8139 * limited to 2 Pages, 512 SGEs, for our SGL. 8140 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8141 */ 8142 max_buf_size = (2 * SLI4_PAGE_SIZE); 8143 8144 /* 8145 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8146 * used to create the sg_dma_buf_pool must be calculated. 8147 */ 8148 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8149 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8150 8151 /* 8152 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8153 * the FCP rsp, and a SGE. Sice we have no control 8154 * over how many protection segments the SCSI Layer 8155 * will hand us (ie: there could be one for every block 8156 * in the IO), just allocate enough SGEs to accomidate 8157 * our max amount and we need to limit lpfc_sg_seg_cnt 8158 * to minimize the risk of running out. 8159 */ 8160 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8161 sizeof(struct fcp_rsp) + max_buf_size; 8162 8163 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8164 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8165 8166 /* 8167 * If supporting DIF, reduce the seg count for scsi to 8168 * allow room for the DIF sges. 8169 */ 8170 if (phba->cfg_enable_bg && 8171 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8172 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8173 else 8174 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8175 8176 } else { 8177 /* 8178 * The scsi_buf for a regular I/O holds the FCP cmnd, 8179 * the FCP rsp, a SGE for each, and a SGE for up to 8180 * cfg_sg_seg_cnt data segments. 8181 */ 8182 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8183 sizeof(struct fcp_rsp) + 8184 ((phba->cfg_sg_seg_cnt + extra) * 8185 sizeof(struct sli4_sge)); 8186 8187 /* Total SGEs for scsi_sg_list */ 8188 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8189 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8190 8191 /* 8192 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8193 * need to post 1 page for the SGL. 8194 */ 8195 } 8196 8197 if (phba->cfg_xpsgl && !phba->nvmet_support) 8198 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8199 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8200 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8201 else 8202 phba->cfg_sg_dma_buf_size = 8203 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8204 8205 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8206 sizeof(struct sli4_sge); 8207 8208 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8209 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8210 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8211 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8212 "6300 Reducing NVME sg segment " 8213 "cnt to %d\n", 8214 LPFC_MAX_NVME_SEG_CNT); 8215 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8216 } else 8217 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8218 } 8219 8220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8221 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8222 "total:%d scsi:%d nvme:%d\n", 8223 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8224 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8225 phba->cfg_nvme_seg_cnt); 8226 8227 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8228 i = phba->cfg_sg_dma_buf_size; 8229 else 8230 i = SLI4_PAGE_SIZE; 8231 8232 phba->lpfc_sg_dma_buf_pool = 8233 dma_pool_create("lpfc_sg_dma_buf_pool", 8234 &phba->pcidev->dev, 8235 phba->cfg_sg_dma_buf_size, 8236 i, 0); 8237 if (!phba->lpfc_sg_dma_buf_pool) 8238 goto out_free_bsmbx; 8239 8240 phba->lpfc_cmd_rsp_buf_pool = 8241 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8242 &phba->pcidev->dev, 8243 sizeof(struct fcp_cmnd) + 8244 sizeof(struct fcp_rsp), 8245 i, 0); 8246 if (!phba->lpfc_cmd_rsp_buf_pool) 8247 goto out_free_sg_dma_buf; 8248 8249 mempool_free(mboxq, phba->mbox_mem_pool); 8250 8251 /* Verify OAS is supported */ 8252 lpfc_sli4_oas_verify(phba); 8253 8254 /* Verify RAS support on adapter */ 8255 lpfc_sli4_ras_init(phba); 8256 8257 /* Verify all the SLI4 queues */ 8258 rc = lpfc_sli4_queue_verify(phba); 8259 if (rc) 8260 goto out_free_cmd_rsp_buf; 8261 8262 /* Create driver internal CQE event pool */ 8263 rc = lpfc_sli4_cq_event_pool_create(phba); 8264 if (rc) 8265 goto out_free_cmd_rsp_buf; 8266 8267 /* Initialize sgl lists per host */ 8268 lpfc_init_sgl_list(phba); 8269 8270 /* Allocate and initialize active sgl array */ 8271 rc = lpfc_init_active_sgl_array(phba); 8272 if (rc) { 8273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8274 "1430 Failed to initialize sgl list.\n"); 8275 goto out_destroy_cq_event_pool; 8276 } 8277 rc = lpfc_sli4_init_rpi_hdrs(phba); 8278 if (rc) { 8279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8280 "1432 Failed to initialize rpi headers.\n"); 8281 goto out_free_active_sgl; 8282 } 8283 8284 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8285 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8286 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8287 GFP_KERNEL); 8288 if (!phba->fcf.fcf_rr_bmask) { 8289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8290 "2759 Failed allocate memory for FCF round " 8291 "robin failover bmask\n"); 8292 rc = -ENOMEM; 8293 goto out_remove_rpi_hdrs; 8294 } 8295 8296 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8297 sizeof(struct lpfc_hba_eq_hdl), 8298 GFP_KERNEL); 8299 if (!phba->sli4_hba.hba_eq_hdl) { 8300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8301 "2572 Failed allocate memory for " 8302 "fast-path per-EQ handle array\n"); 8303 rc = -ENOMEM; 8304 goto out_free_fcf_rr_bmask; 8305 } 8306 8307 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8308 sizeof(struct lpfc_vector_map_info), 8309 GFP_KERNEL); 8310 if (!phba->sli4_hba.cpu_map) { 8311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8312 "3327 Failed allocate memory for msi-x " 8313 "interrupt vector mapping\n"); 8314 rc = -ENOMEM; 8315 goto out_free_hba_eq_hdl; 8316 } 8317 8318 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8319 if (!phba->sli4_hba.eq_info) { 8320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8321 "3321 Failed allocation for per_cpu stats\n"); 8322 rc = -ENOMEM; 8323 goto out_free_hba_cpu_map; 8324 } 8325 8326 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8327 sizeof(*phba->sli4_hba.idle_stat), 8328 GFP_KERNEL); 8329 if (!phba->sli4_hba.idle_stat) { 8330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8331 "3390 Failed allocation for idle_stat\n"); 8332 rc = -ENOMEM; 8333 goto out_free_hba_eq_info; 8334 } 8335 8336 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8337 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8338 if (!phba->sli4_hba.c_stat) { 8339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8340 "3332 Failed allocating per cpu hdwq stats\n"); 8341 rc = -ENOMEM; 8342 goto out_free_hba_idle_stat; 8343 } 8344 #endif 8345 8346 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8347 if (!phba->cmf_stat) { 8348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8349 "3331 Failed allocating per cpu cgn stats\n"); 8350 rc = -ENOMEM; 8351 goto out_free_hba_hdwq_info; 8352 } 8353 8354 /* 8355 * Enable sr-iov virtual functions if supported and configured 8356 * through the module parameter. 8357 */ 8358 if (phba->cfg_sriov_nr_virtfn > 0) { 8359 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8360 phba->cfg_sriov_nr_virtfn); 8361 if (rc) { 8362 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8363 "3020 Requested number of SR-IOV " 8364 "virtual functions (%d) is not " 8365 "supported\n", 8366 phba->cfg_sriov_nr_virtfn); 8367 phba->cfg_sriov_nr_virtfn = 0; 8368 } 8369 } 8370 8371 return 0; 8372 8373 out_free_hba_hdwq_info: 8374 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8375 free_percpu(phba->sli4_hba.c_stat); 8376 out_free_hba_idle_stat: 8377 #endif 8378 kfree(phba->sli4_hba.idle_stat); 8379 out_free_hba_eq_info: 8380 free_percpu(phba->sli4_hba.eq_info); 8381 out_free_hba_cpu_map: 8382 kfree(phba->sli4_hba.cpu_map); 8383 out_free_hba_eq_hdl: 8384 kfree(phba->sli4_hba.hba_eq_hdl); 8385 out_free_fcf_rr_bmask: 8386 kfree(phba->fcf.fcf_rr_bmask); 8387 out_remove_rpi_hdrs: 8388 lpfc_sli4_remove_rpi_hdrs(phba); 8389 out_free_active_sgl: 8390 lpfc_free_active_sgl(phba); 8391 out_destroy_cq_event_pool: 8392 lpfc_sli4_cq_event_pool_destroy(phba); 8393 out_free_cmd_rsp_buf: 8394 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8395 phba->lpfc_cmd_rsp_buf_pool = NULL; 8396 out_free_sg_dma_buf: 8397 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8398 phba->lpfc_sg_dma_buf_pool = NULL; 8399 out_free_bsmbx: 8400 lpfc_destroy_bootstrap_mbox(phba); 8401 out_free_mem: 8402 lpfc_mem_free(phba); 8403 return rc; 8404 } 8405 8406 /** 8407 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8408 * @phba: pointer to lpfc hba data structure. 8409 * 8410 * This routine is invoked to unset the driver internal resources set up 8411 * specific for supporting the SLI-4 HBA device it attached to. 8412 **/ 8413 static void 8414 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8415 { 8416 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8417 8418 free_percpu(phba->sli4_hba.eq_info); 8419 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8420 free_percpu(phba->sli4_hba.c_stat); 8421 #endif 8422 free_percpu(phba->cmf_stat); 8423 kfree(phba->sli4_hba.idle_stat); 8424 8425 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8426 kfree(phba->sli4_hba.cpu_map); 8427 phba->sli4_hba.num_possible_cpu = 0; 8428 phba->sli4_hba.num_present_cpu = 0; 8429 phba->sli4_hba.curr_disp_cpu = 0; 8430 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8431 8432 /* Free memory allocated for fast-path work queue handles */ 8433 kfree(phba->sli4_hba.hba_eq_hdl); 8434 8435 /* Free the allocated rpi headers. */ 8436 lpfc_sli4_remove_rpi_hdrs(phba); 8437 lpfc_sli4_remove_rpis(phba); 8438 8439 /* Free eligible FCF index bmask */ 8440 kfree(phba->fcf.fcf_rr_bmask); 8441 8442 /* Free the ELS sgl list */ 8443 lpfc_free_active_sgl(phba); 8444 lpfc_free_els_sgl_list(phba); 8445 lpfc_free_nvmet_sgl_list(phba); 8446 8447 /* Free the completion queue EQ event pool */ 8448 lpfc_sli4_cq_event_release_all(phba); 8449 lpfc_sli4_cq_event_pool_destroy(phba); 8450 8451 /* Release resource identifiers. */ 8452 lpfc_sli4_dealloc_resource_identifiers(phba); 8453 8454 /* Free the bsmbx region. */ 8455 lpfc_destroy_bootstrap_mbox(phba); 8456 8457 /* Free the SLI Layer memory with SLI4 HBAs */ 8458 lpfc_mem_free_all(phba); 8459 8460 /* Free the current connect table */ 8461 list_for_each_entry_safe(conn_entry, next_conn_entry, 8462 &phba->fcf_conn_rec_list, list) { 8463 list_del_init(&conn_entry->list); 8464 kfree(conn_entry); 8465 } 8466 8467 return; 8468 } 8469 8470 /** 8471 * lpfc_init_api_table_setup - Set up init api function jump table 8472 * @phba: The hba struct for which this call is being executed. 8473 * @dev_grp: The HBA PCI-Device group number. 8474 * 8475 * This routine sets up the device INIT interface API function jump table 8476 * in @phba struct. 8477 * 8478 * Returns: 0 - success, -ENODEV - failure. 8479 **/ 8480 int 8481 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8482 { 8483 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8484 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8485 phba->lpfc_selective_reset = lpfc_selective_reset; 8486 switch (dev_grp) { 8487 case LPFC_PCI_DEV_LP: 8488 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8489 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8490 phba->lpfc_stop_port = lpfc_stop_port_s3; 8491 break; 8492 case LPFC_PCI_DEV_OC: 8493 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8494 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8495 phba->lpfc_stop_port = lpfc_stop_port_s4; 8496 break; 8497 default: 8498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8499 "1431 Invalid HBA PCI-device group: 0x%x\n", 8500 dev_grp); 8501 return -ENODEV; 8502 } 8503 return 0; 8504 } 8505 8506 /** 8507 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8508 * @phba: pointer to lpfc hba data structure. 8509 * 8510 * This routine is invoked to set up the driver internal resources after the 8511 * device specific resource setup to support the HBA device it attached to. 8512 * 8513 * Return codes 8514 * 0 - successful 8515 * other values - error 8516 **/ 8517 static int 8518 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8519 { 8520 int error; 8521 8522 /* Startup the kernel thread for this host adapter. */ 8523 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8524 "lpfc_worker_%d", phba->brd_no); 8525 if (IS_ERR(phba->worker_thread)) { 8526 error = PTR_ERR(phba->worker_thread); 8527 return error; 8528 } 8529 8530 return 0; 8531 } 8532 8533 /** 8534 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8535 * @phba: pointer to lpfc hba data structure. 8536 * 8537 * This routine is invoked to unset the driver internal resources set up after 8538 * the device specific resource setup for supporting the HBA device it 8539 * attached to. 8540 **/ 8541 static void 8542 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8543 { 8544 if (phba->wq) { 8545 destroy_workqueue(phba->wq); 8546 phba->wq = NULL; 8547 } 8548 8549 /* Stop kernel worker thread */ 8550 if (phba->worker_thread) 8551 kthread_stop(phba->worker_thread); 8552 } 8553 8554 /** 8555 * lpfc_free_iocb_list - Free iocb list. 8556 * @phba: pointer to lpfc hba data structure. 8557 * 8558 * This routine is invoked to free the driver's IOCB list and memory. 8559 **/ 8560 void 8561 lpfc_free_iocb_list(struct lpfc_hba *phba) 8562 { 8563 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8564 8565 spin_lock_irq(&phba->hbalock); 8566 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8567 &phba->lpfc_iocb_list, list) { 8568 list_del(&iocbq_entry->list); 8569 kfree(iocbq_entry); 8570 phba->total_iocbq_bufs--; 8571 } 8572 spin_unlock_irq(&phba->hbalock); 8573 8574 return; 8575 } 8576 8577 /** 8578 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8579 * @phba: pointer to lpfc hba data structure. 8580 * @iocb_count: number of requested iocbs 8581 * 8582 * This routine is invoked to allocate and initizlize the driver's IOCB 8583 * list and set up the IOCB tag array accordingly. 8584 * 8585 * Return codes 8586 * 0 - successful 8587 * other values - error 8588 **/ 8589 int 8590 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8591 { 8592 struct lpfc_iocbq *iocbq_entry = NULL; 8593 uint16_t iotag; 8594 int i; 8595 8596 /* Initialize and populate the iocb list per host. */ 8597 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8598 for (i = 0; i < iocb_count; i++) { 8599 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8600 if (iocbq_entry == NULL) { 8601 printk(KERN_ERR "%s: only allocated %d iocbs of " 8602 "expected %d count. Unloading driver.\n", 8603 __func__, i, iocb_count); 8604 goto out_free_iocbq; 8605 } 8606 8607 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8608 if (iotag == 0) { 8609 kfree(iocbq_entry); 8610 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8611 "Unloading driver.\n", __func__); 8612 goto out_free_iocbq; 8613 } 8614 iocbq_entry->sli4_lxritag = NO_XRI; 8615 iocbq_entry->sli4_xritag = NO_XRI; 8616 8617 spin_lock_irq(&phba->hbalock); 8618 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8619 phba->total_iocbq_bufs++; 8620 spin_unlock_irq(&phba->hbalock); 8621 } 8622 8623 return 0; 8624 8625 out_free_iocbq: 8626 lpfc_free_iocb_list(phba); 8627 8628 return -ENOMEM; 8629 } 8630 8631 /** 8632 * lpfc_free_sgl_list - Free a given sgl list. 8633 * @phba: pointer to lpfc hba data structure. 8634 * @sglq_list: pointer to the head of sgl list. 8635 * 8636 * This routine is invoked to free a give sgl list and memory. 8637 **/ 8638 void 8639 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8640 { 8641 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8642 8643 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8644 list_del(&sglq_entry->list); 8645 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8646 kfree(sglq_entry); 8647 } 8648 } 8649 8650 /** 8651 * lpfc_free_els_sgl_list - Free els sgl list. 8652 * @phba: pointer to lpfc hba data structure. 8653 * 8654 * This routine is invoked to free the driver's els sgl list and memory. 8655 **/ 8656 static void 8657 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8658 { 8659 LIST_HEAD(sglq_list); 8660 8661 /* Retrieve all els sgls from driver list */ 8662 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8663 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8664 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8665 8666 /* Now free the sgl list */ 8667 lpfc_free_sgl_list(phba, &sglq_list); 8668 } 8669 8670 /** 8671 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8672 * @phba: pointer to lpfc hba data structure. 8673 * 8674 * This routine is invoked to free the driver's nvmet sgl list and memory. 8675 **/ 8676 static void 8677 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8678 { 8679 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8680 LIST_HEAD(sglq_list); 8681 8682 /* Retrieve all nvmet sgls from driver list */ 8683 spin_lock_irq(&phba->hbalock); 8684 spin_lock(&phba->sli4_hba.sgl_list_lock); 8685 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8686 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8687 spin_unlock_irq(&phba->hbalock); 8688 8689 /* Now free the sgl list */ 8690 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8691 list_del(&sglq_entry->list); 8692 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8693 kfree(sglq_entry); 8694 } 8695 8696 /* Update the nvmet_xri_cnt to reflect no current sgls. 8697 * The next initialization cycle sets the count and allocates 8698 * the sgls over again. 8699 */ 8700 phba->sli4_hba.nvmet_xri_cnt = 0; 8701 } 8702 8703 /** 8704 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8705 * @phba: pointer to lpfc hba data structure. 8706 * 8707 * This routine is invoked to allocate the driver's active sgl memory. 8708 * This array will hold the sglq_entry's for active IOs. 8709 **/ 8710 static int 8711 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8712 { 8713 int size; 8714 size = sizeof(struct lpfc_sglq *); 8715 size *= phba->sli4_hba.max_cfg_param.max_xri; 8716 8717 phba->sli4_hba.lpfc_sglq_active_list = 8718 kzalloc(size, GFP_KERNEL); 8719 if (!phba->sli4_hba.lpfc_sglq_active_list) 8720 return -ENOMEM; 8721 return 0; 8722 } 8723 8724 /** 8725 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8726 * @phba: pointer to lpfc hba data structure. 8727 * 8728 * This routine is invoked to walk through the array of active sglq entries 8729 * and free all of the resources. 8730 * This is just a place holder for now. 8731 **/ 8732 static void 8733 lpfc_free_active_sgl(struct lpfc_hba *phba) 8734 { 8735 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8736 } 8737 8738 /** 8739 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8740 * @phba: pointer to lpfc hba data structure. 8741 * 8742 * This routine is invoked to allocate and initizlize the driver's sgl 8743 * list and set up the sgl xritag tag array accordingly. 8744 * 8745 **/ 8746 static void 8747 lpfc_init_sgl_list(struct lpfc_hba *phba) 8748 { 8749 /* Initialize and populate the sglq list per host/VF. */ 8750 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8751 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8753 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8754 8755 /* els xri-sgl book keeping */ 8756 phba->sli4_hba.els_xri_cnt = 0; 8757 8758 /* nvme xri-buffer book keeping */ 8759 phba->sli4_hba.io_xri_cnt = 0; 8760 } 8761 8762 /** 8763 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8764 * @phba: pointer to lpfc hba data structure. 8765 * 8766 * This routine is invoked to post rpi header templates to the 8767 * port for those SLI4 ports that do not support extents. This routine 8768 * posts a PAGE_SIZE memory region to the port to hold up to 8769 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8770 * and should be called only when interrupts are disabled. 8771 * 8772 * Return codes 8773 * 0 - successful 8774 * -ERROR - otherwise. 8775 **/ 8776 int 8777 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8778 { 8779 int rc = 0; 8780 struct lpfc_rpi_hdr *rpi_hdr; 8781 8782 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8783 if (!phba->sli4_hba.rpi_hdrs_in_use) 8784 return rc; 8785 if (phba->sli4_hba.extents_in_use) 8786 return -EIO; 8787 8788 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8789 if (!rpi_hdr) { 8790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8791 "0391 Error during rpi post operation\n"); 8792 lpfc_sli4_remove_rpis(phba); 8793 rc = -ENODEV; 8794 } 8795 8796 return rc; 8797 } 8798 8799 /** 8800 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8801 * @phba: pointer to lpfc hba data structure. 8802 * 8803 * This routine is invoked to allocate a single 4KB memory region to 8804 * support rpis and stores them in the phba. This single region 8805 * provides support for up to 64 rpis. The region is used globally 8806 * by the device. 8807 * 8808 * Returns: 8809 * A valid rpi hdr on success. 8810 * A NULL pointer on any failure. 8811 **/ 8812 struct lpfc_rpi_hdr * 8813 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8814 { 8815 uint16_t rpi_limit, curr_rpi_range; 8816 struct lpfc_dmabuf *dmabuf; 8817 struct lpfc_rpi_hdr *rpi_hdr; 8818 8819 /* 8820 * If the SLI4 port supports extents, posting the rpi header isn't 8821 * required. Set the expected maximum count and let the actual value 8822 * get set when extents are fully allocated. 8823 */ 8824 if (!phba->sli4_hba.rpi_hdrs_in_use) 8825 return NULL; 8826 if (phba->sli4_hba.extents_in_use) 8827 return NULL; 8828 8829 /* The limit on the logical index is just the max_rpi count. */ 8830 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8831 8832 spin_lock_irq(&phba->hbalock); 8833 /* 8834 * Establish the starting RPI in this header block. The starting 8835 * rpi is normalized to a zero base because the physical rpi is 8836 * port based. 8837 */ 8838 curr_rpi_range = phba->sli4_hba.next_rpi; 8839 spin_unlock_irq(&phba->hbalock); 8840 8841 /* Reached full RPI range */ 8842 if (curr_rpi_range == rpi_limit) 8843 return NULL; 8844 8845 /* 8846 * First allocate the protocol header region for the port. The 8847 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8848 */ 8849 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8850 if (!dmabuf) 8851 return NULL; 8852 8853 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8854 LPFC_HDR_TEMPLATE_SIZE, 8855 &dmabuf->phys, GFP_KERNEL); 8856 if (!dmabuf->virt) { 8857 rpi_hdr = NULL; 8858 goto err_free_dmabuf; 8859 } 8860 8861 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8862 rpi_hdr = NULL; 8863 goto err_free_coherent; 8864 } 8865 8866 /* Save the rpi header data for cleanup later. */ 8867 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8868 if (!rpi_hdr) 8869 goto err_free_coherent; 8870 8871 rpi_hdr->dmabuf = dmabuf; 8872 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8873 rpi_hdr->page_count = 1; 8874 spin_lock_irq(&phba->hbalock); 8875 8876 /* The rpi_hdr stores the logical index only. */ 8877 rpi_hdr->start_rpi = curr_rpi_range; 8878 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8879 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8880 8881 spin_unlock_irq(&phba->hbalock); 8882 return rpi_hdr; 8883 8884 err_free_coherent: 8885 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8886 dmabuf->virt, dmabuf->phys); 8887 err_free_dmabuf: 8888 kfree(dmabuf); 8889 return NULL; 8890 } 8891 8892 /** 8893 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8894 * @phba: pointer to lpfc hba data structure. 8895 * 8896 * This routine is invoked to remove all memory resources allocated 8897 * to support rpis for SLI4 ports not supporting extents. This routine 8898 * presumes the caller has released all rpis consumed by fabric or port 8899 * logins and is prepared to have the header pages removed. 8900 **/ 8901 void 8902 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 8903 { 8904 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 8905 8906 if (!phba->sli4_hba.rpi_hdrs_in_use) 8907 goto exit; 8908 8909 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 8910 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 8911 list_del(&rpi_hdr->list); 8912 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 8913 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 8914 kfree(rpi_hdr->dmabuf); 8915 kfree(rpi_hdr); 8916 } 8917 exit: 8918 /* There are no rpis available to the port now. */ 8919 phba->sli4_hba.next_rpi = 0; 8920 } 8921 8922 /** 8923 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 8924 * @pdev: pointer to pci device data structure. 8925 * 8926 * This routine is invoked to allocate the driver hba data structure for an 8927 * HBA device. If the allocation is successful, the phba reference to the 8928 * PCI device data structure is set. 8929 * 8930 * Return codes 8931 * pointer to @phba - successful 8932 * NULL - error 8933 **/ 8934 static struct lpfc_hba * 8935 lpfc_hba_alloc(struct pci_dev *pdev) 8936 { 8937 struct lpfc_hba *phba; 8938 8939 /* Allocate memory for HBA structure */ 8940 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 8941 if (!phba) { 8942 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 8943 return NULL; 8944 } 8945 8946 /* Set reference to PCI device in HBA structure */ 8947 phba->pcidev = pdev; 8948 8949 /* Assign an unused board number */ 8950 phba->brd_no = lpfc_get_instance(); 8951 if (phba->brd_no < 0) { 8952 kfree(phba); 8953 return NULL; 8954 } 8955 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 8956 8957 spin_lock_init(&phba->ct_ev_lock); 8958 INIT_LIST_HEAD(&phba->ct_ev_waiters); 8959 8960 return phba; 8961 } 8962 8963 /** 8964 * lpfc_hba_free - Free driver hba data structure with a device. 8965 * @phba: pointer to lpfc hba data structure. 8966 * 8967 * This routine is invoked to free the driver hba data structure with an 8968 * HBA device. 8969 **/ 8970 static void 8971 lpfc_hba_free(struct lpfc_hba *phba) 8972 { 8973 if (phba->sli_rev == LPFC_SLI_REV4) 8974 kfree(phba->sli4_hba.hdwq); 8975 8976 /* Release the driver assigned board number */ 8977 idr_remove(&lpfc_hba_index, phba->brd_no); 8978 8979 /* Free memory allocated with sli3 rings */ 8980 kfree(phba->sli.sli3_ring); 8981 phba->sli.sli3_ring = NULL; 8982 8983 kfree(phba); 8984 return; 8985 } 8986 8987 /** 8988 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes 8989 * @vport: pointer to lpfc vport data structure. 8990 * 8991 * This routine is will setup initial FDMI attribute masks for 8992 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt 8993 * to get these attributes first before falling back, the attribute 8994 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1 8995 **/ 8996 void 8997 lpfc_setup_fdmi_mask(struct lpfc_vport *vport) 8998 { 8999 struct lpfc_hba *phba = vport->phba; 9000 9001 vport->load_flag |= FC_ALLOW_FDMI; 9002 if (phba->cfg_enable_SmartSAN || 9003 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { 9004 /* Setup appropriate attribute masks */ 9005 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9006 if (phba->cfg_enable_SmartSAN) 9007 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9008 else 9009 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9010 } 9011 9012 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 9013 "6077 Setup FDMI mask: hba x%x port x%x\n", 9014 vport->fdmi_hba_mask, vport->fdmi_port_mask); 9015 } 9016 9017 /** 9018 * lpfc_create_shost - Create hba physical port with associated scsi host. 9019 * @phba: pointer to lpfc hba data structure. 9020 * 9021 * This routine is invoked to create HBA physical port and associate a SCSI 9022 * host with it. 9023 * 9024 * Return codes 9025 * 0 - successful 9026 * other values - error 9027 **/ 9028 static int 9029 lpfc_create_shost(struct lpfc_hba *phba) 9030 { 9031 struct lpfc_vport *vport; 9032 struct Scsi_Host *shost; 9033 9034 /* Initialize HBA FC structure */ 9035 phba->fc_edtov = FF_DEF_EDTOV; 9036 phba->fc_ratov = FF_DEF_RATOV; 9037 phba->fc_altov = FF_DEF_ALTOV; 9038 phba->fc_arbtov = FF_DEF_ARBTOV; 9039 9040 atomic_set(&phba->sdev_cnt, 0); 9041 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9042 if (!vport) 9043 return -ENODEV; 9044 9045 shost = lpfc_shost_from_vport(vport); 9046 phba->pport = vport; 9047 9048 if (phba->nvmet_support) { 9049 /* Only 1 vport (pport) will support NVME target */ 9050 phba->targetport = NULL; 9051 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9052 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9053 "6076 NVME Target Found\n"); 9054 } 9055 9056 lpfc_debugfs_initialize(vport); 9057 /* Put reference to SCSI host to driver's device private data */ 9058 pci_set_drvdata(phba->pcidev, shost); 9059 9060 lpfc_setup_fdmi_mask(vport); 9061 9062 /* 9063 * At this point we are fully registered with PSA. In addition, 9064 * any initial discovery should be completed. 9065 */ 9066 return 0; 9067 } 9068 9069 /** 9070 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9071 * @phba: pointer to lpfc hba data structure. 9072 * 9073 * This routine is invoked to destroy HBA physical port and the associated 9074 * SCSI host. 9075 **/ 9076 static void 9077 lpfc_destroy_shost(struct lpfc_hba *phba) 9078 { 9079 struct lpfc_vport *vport = phba->pport; 9080 9081 /* Destroy physical port that associated with the SCSI host */ 9082 destroy_port(vport); 9083 9084 return; 9085 } 9086 9087 /** 9088 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9089 * @phba: pointer to lpfc hba data structure. 9090 * @shost: the shost to be used to detect Block guard settings. 9091 * 9092 * This routine sets up the local Block guard protocol settings for @shost. 9093 * This routine also allocates memory for debugging bg buffers. 9094 **/ 9095 static void 9096 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9097 { 9098 uint32_t old_mask; 9099 uint32_t old_guard; 9100 9101 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9102 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9103 "1478 Registering BlockGuard with the " 9104 "SCSI layer\n"); 9105 9106 old_mask = phba->cfg_prot_mask; 9107 old_guard = phba->cfg_prot_guard; 9108 9109 /* Only allow supported values */ 9110 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9111 SHOST_DIX_TYPE0_PROTECTION | 9112 SHOST_DIX_TYPE1_PROTECTION); 9113 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9114 SHOST_DIX_GUARD_CRC); 9115 9116 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9117 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9118 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9119 9120 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9121 if ((old_mask != phba->cfg_prot_mask) || 9122 (old_guard != phba->cfg_prot_guard)) 9123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9124 "1475 Registering BlockGuard with the " 9125 "SCSI layer: mask %d guard %d\n", 9126 phba->cfg_prot_mask, 9127 phba->cfg_prot_guard); 9128 9129 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9130 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9131 } else 9132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9133 "1479 Not Registering BlockGuard with the SCSI " 9134 "layer, Bad protection parameters: %d %d\n", 9135 old_mask, old_guard); 9136 } 9137 } 9138 9139 /** 9140 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9141 * @phba: pointer to lpfc hba data structure. 9142 * 9143 * This routine is invoked to perform all the necessary post initialization 9144 * setup for the device. 9145 **/ 9146 static void 9147 lpfc_post_init_setup(struct lpfc_hba *phba) 9148 { 9149 struct Scsi_Host *shost; 9150 struct lpfc_adapter_event_header adapter_event; 9151 9152 /* Get the default values for Model Name and Description */ 9153 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9154 9155 /* 9156 * hba setup may have changed the hba_queue_depth so we need to 9157 * adjust the value of can_queue. 9158 */ 9159 shost = pci_get_drvdata(phba->pcidev); 9160 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9161 9162 lpfc_host_attrib_init(shost); 9163 9164 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9165 spin_lock_irq(shost->host_lock); 9166 lpfc_poll_start_timer(phba); 9167 spin_unlock_irq(shost->host_lock); 9168 } 9169 9170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9171 "0428 Perform SCSI scan\n"); 9172 /* Send board arrival event to upper layer */ 9173 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9174 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9175 fc_host_post_vendor_event(shost, fc_get_event_number(), 9176 sizeof(adapter_event), 9177 (char *) &adapter_event, 9178 LPFC_NL_VENDOR_ID); 9179 return; 9180 } 9181 9182 /** 9183 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9184 * @phba: pointer to lpfc hba data structure. 9185 * 9186 * This routine is invoked to set up the PCI device memory space for device 9187 * with SLI-3 interface spec. 9188 * 9189 * Return codes 9190 * 0 - successful 9191 * other values - error 9192 **/ 9193 static int 9194 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9195 { 9196 struct pci_dev *pdev = phba->pcidev; 9197 unsigned long bar0map_len, bar2map_len; 9198 int i, hbq_count; 9199 void *ptr; 9200 int error; 9201 9202 if (!pdev) 9203 return -ENODEV; 9204 9205 /* Set the device DMA mask size */ 9206 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9207 if (error) 9208 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9209 if (error) 9210 return error; 9211 error = -ENODEV; 9212 9213 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9214 * required by each mapping. 9215 */ 9216 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9217 bar0map_len = pci_resource_len(pdev, 0); 9218 9219 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9220 bar2map_len = pci_resource_len(pdev, 2); 9221 9222 /* Map HBA SLIM to a kernel virtual address. */ 9223 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9224 if (!phba->slim_memmap_p) { 9225 dev_printk(KERN_ERR, &pdev->dev, 9226 "ioremap failed for SLIM memory.\n"); 9227 goto out; 9228 } 9229 9230 /* Map HBA Control Registers to a kernel virtual address. */ 9231 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9232 if (!phba->ctrl_regs_memmap_p) { 9233 dev_printk(KERN_ERR, &pdev->dev, 9234 "ioremap failed for HBA control registers.\n"); 9235 goto out_iounmap_slim; 9236 } 9237 9238 /* Allocate memory for SLI-2 structures */ 9239 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9240 &phba->slim2p.phys, GFP_KERNEL); 9241 if (!phba->slim2p.virt) 9242 goto out_iounmap; 9243 9244 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9245 phba->mbox_ext = (phba->slim2p.virt + 9246 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9247 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9248 phba->IOCBs = (phba->slim2p.virt + 9249 offsetof(struct lpfc_sli2_slim, IOCBs)); 9250 9251 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9252 lpfc_sli_hbq_size(), 9253 &phba->hbqslimp.phys, 9254 GFP_KERNEL); 9255 if (!phba->hbqslimp.virt) 9256 goto out_free_slim; 9257 9258 hbq_count = lpfc_sli_hbq_count(); 9259 ptr = phba->hbqslimp.virt; 9260 for (i = 0; i < hbq_count; ++i) { 9261 phba->hbqs[i].hbq_virt = ptr; 9262 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9263 ptr += (lpfc_hbq_defs[i]->entry_count * 9264 sizeof(struct lpfc_hbq_entry)); 9265 } 9266 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9267 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9268 9269 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9270 9271 phba->MBslimaddr = phba->slim_memmap_p; 9272 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9273 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9274 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9275 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9276 9277 return 0; 9278 9279 out_free_slim: 9280 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9281 phba->slim2p.virt, phba->slim2p.phys); 9282 out_iounmap: 9283 iounmap(phba->ctrl_regs_memmap_p); 9284 out_iounmap_slim: 9285 iounmap(phba->slim_memmap_p); 9286 out: 9287 return error; 9288 } 9289 9290 /** 9291 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9292 * @phba: pointer to lpfc hba data structure. 9293 * 9294 * This routine is invoked to unset the PCI device memory space for device 9295 * with SLI-3 interface spec. 9296 **/ 9297 static void 9298 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9299 { 9300 struct pci_dev *pdev; 9301 9302 /* Obtain PCI device reference */ 9303 if (!phba->pcidev) 9304 return; 9305 else 9306 pdev = phba->pcidev; 9307 9308 /* Free coherent DMA memory allocated */ 9309 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9310 phba->hbqslimp.virt, phba->hbqslimp.phys); 9311 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9312 phba->slim2p.virt, phba->slim2p.phys); 9313 9314 /* I/O memory unmap */ 9315 iounmap(phba->ctrl_regs_memmap_p); 9316 iounmap(phba->slim_memmap_p); 9317 9318 return; 9319 } 9320 9321 /** 9322 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9323 * @phba: pointer to lpfc hba data structure. 9324 * 9325 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9326 * done and check status. 9327 * 9328 * Return 0 if successful, otherwise -ENODEV. 9329 **/ 9330 int 9331 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9332 { 9333 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9334 struct lpfc_register reg_data; 9335 int i, port_error = 0; 9336 uint32_t if_type; 9337 9338 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9339 memset(®_data, 0, sizeof(reg_data)); 9340 if (!phba->sli4_hba.PSMPHRregaddr) 9341 return -ENODEV; 9342 9343 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9344 for (i = 0; i < 3000; i++) { 9345 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9346 &portsmphr_reg.word0) || 9347 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9348 /* Port has a fatal POST error, break out */ 9349 port_error = -ENODEV; 9350 break; 9351 } 9352 if (LPFC_POST_STAGE_PORT_READY == 9353 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9354 break; 9355 msleep(10); 9356 } 9357 9358 /* 9359 * If there was a port error during POST, then don't proceed with 9360 * other register reads as the data may not be valid. Just exit. 9361 */ 9362 if (port_error) { 9363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9364 "1408 Port Failed POST - portsmphr=0x%x, " 9365 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9366 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9367 portsmphr_reg.word0, 9368 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9369 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9370 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9371 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9372 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9373 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9374 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9375 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9376 } else { 9377 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9378 "2534 Device Info: SLIFamily=0x%x, " 9379 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9380 "SLIHint_2=0x%x, FT=0x%x\n", 9381 bf_get(lpfc_sli_intf_sli_family, 9382 &phba->sli4_hba.sli_intf), 9383 bf_get(lpfc_sli_intf_slirev, 9384 &phba->sli4_hba.sli_intf), 9385 bf_get(lpfc_sli_intf_if_type, 9386 &phba->sli4_hba.sli_intf), 9387 bf_get(lpfc_sli_intf_sli_hint1, 9388 &phba->sli4_hba.sli_intf), 9389 bf_get(lpfc_sli_intf_sli_hint2, 9390 &phba->sli4_hba.sli_intf), 9391 bf_get(lpfc_sli_intf_func_type, 9392 &phba->sli4_hba.sli_intf)); 9393 /* 9394 * Check for other Port errors during the initialization 9395 * process. Fail the load if the port did not come up 9396 * correctly. 9397 */ 9398 if_type = bf_get(lpfc_sli_intf_if_type, 9399 &phba->sli4_hba.sli_intf); 9400 switch (if_type) { 9401 case LPFC_SLI_INTF_IF_TYPE_0: 9402 phba->sli4_hba.ue_mask_lo = 9403 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9404 phba->sli4_hba.ue_mask_hi = 9405 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9406 uerrlo_reg.word0 = 9407 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9408 uerrhi_reg.word0 = 9409 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9410 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9411 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9412 lpfc_printf_log(phba, KERN_ERR, 9413 LOG_TRACE_EVENT, 9414 "1422 Unrecoverable Error " 9415 "Detected during POST " 9416 "uerr_lo_reg=0x%x, " 9417 "uerr_hi_reg=0x%x, " 9418 "ue_mask_lo_reg=0x%x, " 9419 "ue_mask_hi_reg=0x%x\n", 9420 uerrlo_reg.word0, 9421 uerrhi_reg.word0, 9422 phba->sli4_hba.ue_mask_lo, 9423 phba->sli4_hba.ue_mask_hi); 9424 port_error = -ENODEV; 9425 } 9426 break; 9427 case LPFC_SLI_INTF_IF_TYPE_2: 9428 case LPFC_SLI_INTF_IF_TYPE_6: 9429 /* Final checks. The port status should be clean. */ 9430 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9431 ®_data.word0) || 9432 (bf_get(lpfc_sliport_status_err, ®_data) && 9433 !bf_get(lpfc_sliport_status_rn, ®_data))) { 9434 phba->work_status[0] = 9435 readl(phba->sli4_hba.u.if_type2. 9436 ERR1regaddr); 9437 phba->work_status[1] = 9438 readl(phba->sli4_hba.u.if_type2. 9439 ERR2regaddr); 9440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9441 "2888 Unrecoverable port error " 9442 "following POST: port status reg " 9443 "0x%x, port_smphr reg 0x%x, " 9444 "error 1=0x%x, error 2=0x%x\n", 9445 reg_data.word0, 9446 portsmphr_reg.word0, 9447 phba->work_status[0], 9448 phba->work_status[1]); 9449 port_error = -ENODEV; 9450 break; 9451 } 9452 9453 if (lpfc_pldv_detect && 9454 bf_get(lpfc_sli_intf_sli_family, 9455 &phba->sli4_hba.sli_intf) == 9456 LPFC_SLI_INTF_FAMILY_G6) 9457 pci_write_config_byte(phba->pcidev, 9458 LPFC_SLI_INTF, CFG_PLD); 9459 break; 9460 case LPFC_SLI_INTF_IF_TYPE_1: 9461 default: 9462 break; 9463 } 9464 } 9465 return port_error; 9466 } 9467 9468 /** 9469 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9470 * @phba: pointer to lpfc hba data structure. 9471 * @if_type: The SLI4 interface type getting configured. 9472 * 9473 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9474 * memory map. 9475 **/ 9476 static void 9477 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9478 { 9479 switch (if_type) { 9480 case LPFC_SLI_INTF_IF_TYPE_0: 9481 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9482 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9483 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9484 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9485 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9486 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9487 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9488 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9489 phba->sli4_hba.SLIINTFregaddr = 9490 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9491 break; 9492 case LPFC_SLI_INTF_IF_TYPE_2: 9493 phba->sli4_hba.u.if_type2.EQDregaddr = 9494 phba->sli4_hba.conf_regs_memmap_p + 9495 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9496 phba->sli4_hba.u.if_type2.ERR1regaddr = 9497 phba->sli4_hba.conf_regs_memmap_p + 9498 LPFC_CTL_PORT_ER1_OFFSET; 9499 phba->sli4_hba.u.if_type2.ERR2regaddr = 9500 phba->sli4_hba.conf_regs_memmap_p + 9501 LPFC_CTL_PORT_ER2_OFFSET; 9502 phba->sli4_hba.u.if_type2.CTRLregaddr = 9503 phba->sli4_hba.conf_regs_memmap_p + 9504 LPFC_CTL_PORT_CTL_OFFSET; 9505 phba->sli4_hba.u.if_type2.STATUSregaddr = 9506 phba->sli4_hba.conf_regs_memmap_p + 9507 LPFC_CTL_PORT_STA_OFFSET; 9508 phba->sli4_hba.SLIINTFregaddr = 9509 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9510 phba->sli4_hba.PSMPHRregaddr = 9511 phba->sli4_hba.conf_regs_memmap_p + 9512 LPFC_CTL_PORT_SEM_OFFSET; 9513 phba->sli4_hba.RQDBregaddr = 9514 phba->sli4_hba.conf_regs_memmap_p + 9515 LPFC_ULP0_RQ_DOORBELL; 9516 phba->sli4_hba.WQDBregaddr = 9517 phba->sli4_hba.conf_regs_memmap_p + 9518 LPFC_ULP0_WQ_DOORBELL; 9519 phba->sli4_hba.CQDBregaddr = 9520 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9521 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9522 phba->sli4_hba.MQDBregaddr = 9523 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9524 phba->sli4_hba.BMBXregaddr = 9525 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9526 break; 9527 case LPFC_SLI_INTF_IF_TYPE_6: 9528 phba->sli4_hba.u.if_type2.EQDregaddr = 9529 phba->sli4_hba.conf_regs_memmap_p + 9530 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9531 phba->sli4_hba.u.if_type2.ERR1regaddr = 9532 phba->sli4_hba.conf_regs_memmap_p + 9533 LPFC_CTL_PORT_ER1_OFFSET; 9534 phba->sli4_hba.u.if_type2.ERR2regaddr = 9535 phba->sli4_hba.conf_regs_memmap_p + 9536 LPFC_CTL_PORT_ER2_OFFSET; 9537 phba->sli4_hba.u.if_type2.CTRLregaddr = 9538 phba->sli4_hba.conf_regs_memmap_p + 9539 LPFC_CTL_PORT_CTL_OFFSET; 9540 phba->sli4_hba.u.if_type2.STATUSregaddr = 9541 phba->sli4_hba.conf_regs_memmap_p + 9542 LPFC_CTL_PORT_STA_OFFSET; 9543 phba->sli4_hba.PSMPHRregaddr = 9544 phba->sli4_hba.conf_regs_memmap_p + 9545 LPFC_CTL_PORT_SEM_OFFSET; 9546 phba->sli4_hba.BMBXregaddr = 9547 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9548 break; 9549 case LPFC_SLI_INTF_IF_TYPE_1: 9550 default: 9551 dev_printk(KERN_ERR, &phba->pcidev->dev, 9552 "FATAL - unsupported SLI4 interface type - %d\n", 9553 if_type); 9554 break; 9555 } 9556 } 9557 9558 /** 9559 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9560 * @phba: pointer to lpfc hba data structure. 9561 * @if_type: sli if type to operate on. 9562 * 9563 * This routine is invoked to set up SLI4 BAR1 register memory map. 9564 **/ 9565 static void 9566 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9567 { 9568 switch (if_type) { 9569 case LPFC_SLI_INTF_IF_TYPE_0: 9570 phba->sli4_hba.PSMPHRregaddr = 9571 phba->sli4_hba.ctrl_regs_memmap_p + 9572 LPFC_SLIPORT_IF0_SMPHR; 9573 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9574 LPFC_HST_ISR0; 9575 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9576 LPFC_HST_IMR0; 9577 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9578 LPFC_HST_ISCR0; 9579 break; 9580 case LPFC_SLI_INTF_IF_TYPE_6: 9581 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9582 LPFC_IF6_RQ_DOORBELL; 9583 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9584 LPFC_IF6_WQ_DOORBELL; 9585 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9586 LPFC_IF6_CQ_DOORBELL; 9587 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9588 LPFC_IF6_EQ_DOORBELL; 9589 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9590 LPFC_IF6_MQ_DOORBELL; 9591 break; 9592 case LPFC_SLI_INTF_IF_TYPE_2: 9593 case LPFC_SLI_INTF_IF_TYPE_1: 9594 default: 9595 dev_err(&phba->pcidev->dev, 9596 "FATAL - unsupported SLI4 interface type - %d\n", 9597 if_type); 9598 break; 9599 } 9600 } 9601 9602 /** 9603 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9604 * @phba: pointer to lpfc hba data structure. 9605 * @vf: virtual function number 9606 * 9607 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9608 * based on the given viftual function number, @vf. 9609 * 9610 * Return 0 if successful, otherwise -ENODEV. 9611 **/ 9612 static int 9613 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9614 { 9615 if (vf > LPFC_VIR_FUNC_MAX) 9616 return -ENODEV; 9617 9618 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9619 vf * LPFC_VFR_PAGE_SIZE + 9620 LPFC_ULP0_RQ_DOORBELL); 9621 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9622 vf * LPFC_VFR_PAGE_SIZE + 9623 LPFC_ULP0_WQ_DOORBELL); 9624 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9625 vf * LPFC_VFR_PAGE_SIZE + 9626 LPFC_EQCQ_DOORBELL); 9627 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9628 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9629 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9630 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9631 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9632 return 0; 9633 } 9634 9635 /** 9636 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9637 * @phba: pointer to lpfc hba data structure. 9638 * 9639 * This routine is invoked to create the bootstrap mailbox 9640 * region consistent with the SLI-4 interface spec. This 9641 * routine allocates all memory necessary to communicate 9642 * mailbox commands to the port and sets up all alignment 9643 * needs. No locks are expected to be held when calling 9644 * this routine. 9645 * 9646 * Return codes 9647 * 0 - successful 9648 * -ENOMEM - could not allocated memory. 9649 **/ 9650 static int 9651 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9652 { 9653 uint32_t bmbx_size; 9654 struct lpfc_dmabuf *dmabuf; 9655 struct dma_address *dma_address; 9656 uint32_t pa_addr; 9657 uint64_t phys_addr; 9658 9659 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9660 if (!dmabuf) 9661 return -ENOMEM; 9662 9663 /* 9664 * The bootstrap mailbox region is comprised of 2 parts 9665 * plus an alignment restriction of 16 bytes. 9666 */ 9667 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9668 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9669 &dmabuf->phys, GFP_KERNEL); 9670 if (!dmabuf->virt) { 9671 kfree(dmabuf); 9672 return -ENOMEM; 9673 } 9674 9675 /* 9676 * Initialize the bootstrap mailbox pointers now so that the register 9677 * operations are simple later. The mailbox dma address is required 9678 * to be 16-byte aligned. Also align the virtual memory as each 9679 * maibox is copied into the bmbx mailbox region before issuing the 9680 * command to the port. 9681 */ 9682 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9683 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9684 9685 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9686 LPFC_ALIGN_16_BYTE); 9687 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9688 LPFC_ALIGN_16_BYTE); 9689 9690 /* 9691 * Set the high and low physical addresses now. The SLI4 alignment 9692 * requirement is 16 bytes and the mailbox is posted to the port 9693 * as two 30-bit addresses. The other data is a bit marking whether 9694 * the 30-bit address is the high or low address. 9695 * Upcast bmbx aphys to 64bits so shift instruction compiles 9696 * clean on 32 bit machines. 9697 */ 9698 dma_address = &phba->sli4_hba.bmbx.dma_address; 9699 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9700 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9701 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9702 LPFC_BMBX_BIT1_ADDR_HI); 9703 9704 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9705 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9706 LPFC_BMBX_BIT1_ADDR_LO); 9707 return 0; 9708 } 9709 9710 /** 9711 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9712 * @phba: pointer to lpfc hba data structure. 9713 * 9714 * This routine is invoked to teardown the bootstrap mailbox 9715 * region and release all host resources. This routine requires 9716 * the caller to ensure all mailbox commands recovered, no 9717 * additional mailbox comands are sent, and interrupts are disabled 9718 * before calling this routine. 9719 * 9720 **/ 9721 static void 9722 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9723 { 9724 dma_free_coherent(&phba->pcidev->dev, 9725 phba->sli4_hba.bmbx.bmbx_size, 9726 phba->sli4_hba.bmbx.dmabuf->virt, 9727 phba->sli4_hba.bmbx.dmabuf->phys); 9728 9729 kfree(phba->sli4_hba.bmbx.dmabuf); 9730 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9731 } 9732 9733 static const char * const lpfc_topo_to_str[] = { 9734 "Loop then P2P", 9735 "Loopback", 9736 "P2P Only", 9737 "Unsupported", 9738 "Loop Only", 9739 "Unsupported", 9740 "P2P then Loop", 9741 }; 9742 9743 #define LINK_FLAGS_DEF 0x0 9744 #define LINK_FLAGS_P2P 0x1 9745 #define LINK_FLAGS_LOOP 0x2 9746 /** 9747 * lpfc_map_topology - Map the topology read from READ_CONFIG 9748 * @phba: pointer to lpfc hba data structure. 9749 * @rd_config: pointer to read config data 9750 * 9751 * This routine is invoked to map the topology values as read 9752 * from the read config mailbox command. If the persistent 9753 * topology feature is supported, the firmware will provide the 9754 * saved topology information to be used in INIT_LINK 9755 **/ 9756 static void 9757 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9758 { 9759 u8 ptv, tf, pt; 9760 9761 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9762 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9763 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9764 9765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9766 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9767 ptv, tf, pt); 9768 if (!ptv) { 9769 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9770 "2019 FW does not support persistent topology " 9771 "Using driver parameter defined value [%s]", 9772 lpfc_topo_to_str[phba->cfg_topology]); 9773 return; 9774 } 9775 /* FW supports persistent topology - override module parameter value */ 9776 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9777 9778 /* if ASIC_GEN_NUM >= 0xC) */ 9779 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9780 LPFC_SLI_INTF_IF_TYPE_6) || 9781 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9782 LPFC_SLI_INTF_FAMILY_G6)) { 9783 if (!tf) { 9784 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9785 ? FLAGS_TOPOLOGY_MODE_LOOP 9786 : FLAGS_TOPOLOGY_MODE_PT_PT); 9787 } else { 9788 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9789 } 9790 } else { /* G5 */ 9791 if (tf) { 9792 /* If topology failover set - pt is '0' or '1' */ 9793 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9794 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9795 } else { 9796 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9797 ? FLAGS_TOPOLOGY_MODE_PT_PT 9798 : FLAGS_TOPOLOGY_MODE_LOOP); 9799 } 9800 } 9801 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9803 "2020 Using persistent topology value [%s]", 9804 lpfc_topo_to_str[phba->cfg_topology]); 9805 } else { 9806 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9807 "2021 Invalid topology values from FW " 9808 "Using driver parameter defined value [%s]", 9809 lpfc_topo_to_str[phba->cfg_topology]); 9810 } 9811 } 9812 9813 /** 9814 * lpfc_sli4_read_config - Get the config parameters. 9815 * @phba: pointer to lpfc hba data structure. 9816 * 9817 * This routine is invoked to read the configuration parameters from the HBA. 9818 * The configuration parameters are used to set the base and maximum values 9819 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9820 * allocation for the port. 9821 * 9822 * Return codes 9823 * 0 - successful 9824 * -ENOMEM - No available memory 9825 * -EIO - The mailbox failed to complete successfully. 9826 **/ 9827 int 9828 lpfc_sli4_read_config(struct lpfc_hba *phba) 9829 { 9830 LPFC_MBOXQ_t *pmb; 9831 struct lpfc_mbx_read_config *rd_config; 9832 union lpfc_sli4_cfg_shdr *shdr; 9833 uint32_t shdr_status, shdr_add_status; 9834 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9835 struct lpfc_rsrc_desc_fcfcoe *desc; 9836 char *pdesc_0; 9837 uint16_t forced_link_speed; 9838 uint32_t if_type, qmin, fawwpn; 9839 int length, i, rc = 0, rc2; 9840 9841 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9842 if (!pmb) { 9843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9844 "2011 Unable to allocate memory for issuing " 9845 "SLI_CONFIG_SPECIAL mailbox command\n"); 9846 return -ENOMEM; 9847 } 9848 9849 lpfc_read_config(phba, pmb); 9850 9851 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9852 if (rc != MBX_SUCCESS) { 9853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9854 "2012 Mailbox failed , mbxCmd x%x " 9855 "READ_CONFIG, mbxStatus x%x\n", 9856 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9857 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9858 rc = -EIO; 9859 } else { 9860 rd_config = &pmb->u.mqe.un.rd_config; 9861 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9862 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9863 phba->sli4_hba.lnk_info.lnk_tp = 9864 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9865 phba->sli4_hba.lnk_info.lnk_no = 9866 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9867 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9868 "3081 lnk_type:%d, lnk_numb:%d\n", 9869 phba->sli4_hba.lnk_info.lnk_tp, 9870 phba->sli4_hba.lnk_info.lnk_no); 9871 } else 9872 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9873 "3082 Mailbox (x%x) returned ldv:x0\n", 9874 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9875 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9876 phba->bbcredit_support = 1; 9877 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9878 } 9879 9880 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config); 9881 9882 if (fawwpn) { 9883 lpfc_printf_log(phba, KERN_INFO, 9884 LOG_INIT | LOG_DISCOVERY, 9885 "2702 READ_CONFIG: FA-PWWN is " 9886 "configured on\n"); 9887 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; 9888 } else { 9889 phba->sli4_hba.fawwpn_flag = 0; 9890 } 9891 9892 phba->sli4_hba.conf_trunk = 9893 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9894 phba->sli4_hba.extents_in_use = 9895 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9896 9897 phba->sli4_hba.max_cfg_param.max_xri = 9898 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9899 /* Reduce resource usage in kdump environment */ 9900 if (is_kdump_kernel() && 9901 phba->sli4_hba.max_cfg_param.max_xri > 512) 9902 phba->sli4_hba.max_cfg_param.max_xri = 512; 9903 phba->sli4_hba.max_cfg_param.xri_base = 9904 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 9905 phba->sli4_hba.max_cfg_param.max_vpi = 9906 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 9907 /* Limit the max we support */ 9908 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 9909 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 9910 phba->sli4_hba.max_cfg_param.vpi_base = 9911 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 9912 phba->sli4_hba.max_cfg_param.max_rpi = 9913 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 9914 phba->sli4_hba.max_cfg_param.rpi_base = 9915 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 9916 phba->sli4_hba.max_cfg_param.max_vfi = 9917 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 9918 phba->sli4_hba.max_cfg_param.vfi_base = 9919 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 9920 phba->sli4_hba.max_cfg_param.max_fcfi = 9921 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 9922 phba->sli4_hba.max_cfg_param.max_eq = 9923 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 9924 phba->sli4_hba.max_cfg_param.max_rq = 9925 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 9926 phba->sli4_hba.max_cfg_param.max_wq = 9927 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 9928 phba->sli4_hba.max_cfg_param.max_cq = 9929 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 9930 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 9931 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 9932 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 9933 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 9934 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 9935 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 9936 phba->max_vports = phba->max_vpi; 9937 9938 /* Next decide on FPIN or Signal E2E CGN support 9939 * For congestion alarms and warnings valid combination are: 9940 * 1. FPIN alarms / FPIN warnings 9941 * 2. Signal alarms / Signal warnings 9942 * 3. FPIN alarms / Signal warnings 9943 * 4. Signal alarms / FPIN warnings 9944 * 9945 * Initialize the adapter frequency to 100 mSecs 9946 */ 9947 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9948 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9949 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9950 9951 if (lpfc_use_cgn_signal) { 9952 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 9953 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 9954 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 9955 } 9956 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 9957 /* MUST support both alarm and warning 9958 * because EDC does not support alarm alone. 9959 */ 9960 if (phba->cgn_reg_signal != 9961 EDC_CG_SIG_WARN_ONLY) { 9962 /* Must support both or none */ 9963 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 9964 phba->cgn_reg_signal = 9965 EDC_CG_SIG_NOTSUPPORTED; 9966 } else { 9967 phba->cgn_reg_signal = 9968 EDC_CG_SIG_WARN_ALARM; 9969 phba->cgn_reg_fpin = 9970 LPFC_CGN_FPIN_NONE; 9971 } 9972 } 9973 } 9974 9975 /* Set the congestion initial signal and fpin values. */ 9976 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 9977 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 9978 9979 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 9980 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 9981 phba->cgn_reg_signal, phba->cgn_reg_fpin); 9982 9983 lpfc_map_topology(phba, rd_config); 9984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9985 "2003 cfg params Extents? %d " 9986 "XRI(B:%d M:%d), " 9987 "VPI(B:%d M:%d) " 9988 "VFI(B:%d M:%d) " 9989 "RPI(B:%d M:%d) " 9990 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 9991 phba->sli4_hba.extents_in_use, 9992 phba->sli4_hba.max_cfg_param.xri_base, 9993 phba->sli4_hba.max_cfg_param.max_xri, 9994 phba->sli4_hba.max_cfg_param.vpi_base, 9995 phba->sli4_hba.max_cfg_param.max_vpi, 9996 phba->sli4_hba.max_cfg_param.vfi_base, 9997 phba->sli4_hba.max_cfg_param.max_vfi, 9998 phba->sli4_hba.max_cfg_param.rpi_base, 9999 phba->sli4_hba.max_cfg_param.max_rpi, 10000 phba->sli4_hba.max_cfg_param.max_fcfi, 10001 phba->sli4_hba.max_cfg_param.max_eq, 10002 phba->sli4_hba.max_cfg_param.max_cq, 10003 phba->sli4_hba.max_cfg_param.max_wq, 10004 phba->sli4_hba.max_cfg_param.max_rq, 10005 phba->lmt); 10006 10007 /* 10008 * Calculate queue resources based on how 10009 * many WQ/CQ/EQs are available. 10010 */ 10011 qmin = phba->sli4_hba.max_cfg_param.max_wq; 10012 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 10013 qmin = phba->sli4_hba.max_cfg_param.max_cq; 10014 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 10015 qmin = phba->sli4_hba.max_cfg_param.max_eq; 10016 /* 10017 * Whats left after this can go toward NVME / FCP. 10018 * The minus 4 accounts for ELS, NVME LS, MBOX 10019 * plus one extra. When configured for 10020 * NVMET, FCP io channel WQs are not created. 10021 */ 10022 qmin -= 4; 10023 10024 /* Check to see if there is enough for NVME */ 10025 if ((phba->cfg_irq_chann > qmin) || 10026 (phba->cfg_hdw_queue > qmin)) { 10027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10028 "2005 Reducing Queues - " 10029 "FW resource limitation: " 10030 "WQ %d CQ %d EQ %d: min %d: " 10031 "IRQ %d HDWQ %d\n", 10032 phba->sli4_hba.max_cfg_param.max_wq, 10033 phba->sli4_hba.max_cfg_param.max_cq, 10034 phba->sli4_hba.max_cfg_param.max_eq, 10035 qmin, phba->cfg_irq_chann, 10036 phba->cfg_hdw_queue); 10037 10038 if (phba->cfg_irq_chann > qmin) 10039 phba->cfg_irq_chann = qmin; 10040 if (phba->cfg_hdw_queue > qmin) 10041 phba->cfg_hdw_queue = qmin; 10042 } 10043 } 10044 10045 if (rc) 10046 goto read_cfg_out; 10047 10048 /* Update link speed if forced link speed is supported */ 10049 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10050 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10051 forced_link_speed = 10052 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10053 if (forced_link_speed) { 10054 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10055 10056 switch (forced_link_speed) { 10057 case LINK_SPEED_1G: 10058 phba->cfg_link_speed = 10059 LPFC_USER_LINK_SPEED_1G; 10060 break; 10061 case LINK_SPEED_2G: 10062 phba->cfg_link_speed = 10063 LPFC_USER_LINK_SPEED_2G; 10064 break; 10065 case LINK_SPEED_4G: 10066 phba->cfg_link_speed = 10067 LPFC_USER_LINK_SPEED_4G; 10068 break; 10069 case LINK_SPEED_8G: 10070 phba->cfg_link_speed = 10071 LPFC_USER_LINK_SPEED_8G; 10072 break; 10073 case LINK_SPEED_10G: 10074 phba->cfg_link_speed = 10075 LPFC_USER_LINK_SPEED_10G; 10076 break; 10077 case LINK_SPEED_16G: 10078 phba->cfg_link_speed = 10079 LPFC_USER_LINK_SPEED_16G; 10080 break; 10081 case LINK_SPEED_32G: 10082 phba->cfg_link_speed = 10083 LPFC_USER_LINK_SPEED_32G; 10084 break; 10085 case LINK_SPEED_64G: 10086 phba->cfg_link_speed = 10087 LPFC_USER_LINK_SPEED_64G; 10088 break; 10089 case 0xffff: 10090 phba->cfg_link_speed = 10091 LPFC_USER_LINK_SPEED_AUTO; 10092 break; 10093 default: 10094 lpfc_printf_log(phba, KERN_ERR, 10095 LOG_TRACE_EVENT, 10096 "0047 Unrecognized link " 10097 "speed : %d\n", 10098 forced_link_speed); 10099 phba->cfg_link_speed = 10100 LPFC_USER_LINK_SPEED_AUTO; 10101 } 10102 } 10103 } 10104 10105 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10106 length = phba->sli4_hba.max_cfg_param.max_xri - 10107 lpfc_sli4_get_els_iocb_cnt(phba); 10108 if (phba->cfg_hba_queue_depth > length) { 10109 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10110 "3361 HBA queue depth changed from %d to %d\n", 10111 phba->cfg_hba_queue_depth, length); 10112 phba->cfg_hba_queue_depth = length; 10113 } 10114 10115 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10116 LPFC_SLI_INTF_IF_TYPE_2) 10117 goto read_cfg_out; 10118 10119 /* get the pf# and vf# for SLI4 if_type 2 port */ 10120 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10121 sizeof(struct lpfc_sli4_cfg_mhdr)); 10122 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10123 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10124 length, LPFC_SLI4_MBX_EMBED); 10125 10126 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10127 shdr = (union lpfc_sli4_cfg_shdr *) 10128 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10129 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10130 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10131 if (rc2 || shdr_status || shdr_add_status) { 10132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10133 "3026 Mailbox failed , mbxCmd x%x " 10134 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10135 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10136 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10137 goto read_cfg_out; 10138 } 10139 10140 /* search for fc_fcoe resrouce descriptor */ 10141 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10142 10143 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10144 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10145 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10146 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10147 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10148 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10149 goto read_cfg_out; 10150 10151 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10152 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10153 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10154 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10155 phba->sli4_hba.iov.pf_number = 10156 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10157 phba->sli4_hba.iov.vf_number = 10158 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10159 break; 10160 } 10161 } 10162 10163 if (i < LPFC_RSRC_DESC_MAX_NUM) 10164 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10165 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10166 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10167 phba->sli4_hba.iov.vf_number); 10168 else 10169 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10170 "3028 GET_FUNCTION_CONFIG: failed to find " 10171 "Resource Descriptor:x%x\n", 10172 LPFC_RSRC_DESC_TYPE_FCFCOE); 10173 10174 read_cfg_out: 10175 mempool_free(pmb, phba->mbox_mem_pool); 10176 return rc; 10177 } 10178 10179 /** 10180 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10181 * @phba: pointer to lpfc hba data structure. 10182 * 10183 * This routine is invoked to setup the port-side endian order when 10184 * the port if_type is 0. This routine has no function for other 10185 * if_types. 10186 * 10187 * Return codes 10188 * 0 - successful 10189 * -ENOMEM - No available memory 10190 * -EIO - The mailbox failed to complete successfully. 10191 **/ 10192 static int 10193 lpfc_setup_endian_order(struct lpfc_hba *phba) 10194 { 10195 LPFC_MBOXQ_t *mboxq; 10196 uint32_t if_type, rc = 0; 10197 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10198 HOST_ENDIAN_HIGH_WORD1}; 10199 10200 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10201 switch (if_type) { 10202 case LPFC_SLI_INTF_IF_TYPE_0: 10203 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10204 GFP_KERNEL); 10205 if (!mboxq) { 10206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10207 "0492 Unable to allocate memory for " 10208 "issuing SLI_CONFIG_SPECIAL mailbox " 10209 "command\n"); 10210 return -ENOMEM; 10211 } 10212 10213 /* 10214 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10215 * two words to contain special data values and no other data. 10216 */ 10217 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10218 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10219 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10220 if (rc != MBX_SUCCESS) { 10221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10222 "0493 SLI_CONFIG_SPECIAL mailbox " 10223 "failed with status x%x\n", 10224 rc); 10225 rc = -EIO; 10226 } 10227 mempool_free(mboxq, phba->mbox_mem_pool); 10228 break; 10229 case LPFC_SLI_INTF_IF_TYPE_6: 10230 case LPFC_SLI_INTF_IF_TYPE_2: 10231 case LPFC_SLI_INTF_IF_TYPE_1: 10232 default: 10233 break; 10234 } 10235 return rc; 10236 } 10237 10238 /** 10239 * lpfc_sli4_queue_verify - Verify and update EQ counts 10240 * @phba: pointer to lpfc hba data structure. 10241 * 10242 * This routine is invoked to check the user settable queue counts for EQs. 10243 * After this routine is called the counts will be set to valid values that 10244 * adhere to the constraints of the system's interrupt vectors and the port's 10245 * queue resources. 10246 * 10247 * Return codes 10248 * 0 - successful 10249 * -ENOMEM - No available memory 10250 **/ 10251 static int 10252 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10253 { 10254 /* 10255 * Sanity check for configured queue parameters against the run-time 10256 * device parameters 10257 */ 10258 10259 if (phba->nvmet_support) { 10260 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10261 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10262 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10263 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10264 } 10265 10266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10267 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10268 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10269 phba->cfg_nvmet_mrq); 10270 10271 /* Get EQ depth from module parameter, fake the default for now */ 10272 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10273 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10274 10275 /* Get CQ depth from module parameter, fake the default for now */ 10276 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10277 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10278 return 0; 10279 } 10280 10281 static int 10282 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10283 { 10284 struct lpfc_queue *qdesc; 10285 u32 wqesize; 10286 int cpu; 10287 10288 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10289 /* Create Fast Path IO CQs */ 10290 if (phba->enab_exp_wqcq_pages) 10291 /* Increase the CQ size when WQEs contain an embedded cdb */ 10292 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10293 phba->sli4_hba.cq_esize, 10294 LPFC_CQE_EXP_COUNT, cpu); 10295 10296 else 10297 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10298 phba->sli4_hba.cq_esize, 10299 phba->sli4_hba.cq_ecount, cpu); 10300 if (!qdesc) { 10301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10302 "0499 Failed allocate fast-path IO CQ (%d)\n", 10303 idx); 10304 return 1; 10305 } 10306 qdesc->qe_valid = 1; 10307 qdesc->hdwq = idx; 10308 qdesc->chann = cpu; 10309 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10310 10311 /* Create Fast Path IO WQs */ 10312 if (phba->enab_exp_wqcq_pages) { 10313 /* Increase the WQ size when WQEs contain an embedded cdb */ 10314 wqesize = (phba->fcp_embed_io) ? 10315 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10316 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10317 wqesize, 10318 LPFC_WQE_EXP_COUNT, cpu); 10319 } else 10320 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10321 phba->sli4_hba.wq_esize, 10322 phba->sli4_hba.wq_ecount, cpu); 10323 10324 if (!qdesc) { 10325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10326 "0503 Failed allocate fast-path IO WQ (%d)\n", 10327 idx); 10328 return 1; 10329 } 10330 qdesc->hdwq = idx; 10331 qdesc->chann = cpu; 10332 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10333 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10334 return 0; 10335 } 10336 10337 /** 10338 * lpfc_sli4_queue_create - Create all the SLI4 queues 10339 * @phba: pointer to lpfc hba data structure. 10340 * 10341 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10342 * operation. For each SLI4 queue type, the parameters such as queue entry 10343 * count (queue depth) shall be taken from the module parameter. For now, 10344 * we just use some constant number as place holder. 10345 * 10346 * Return codes 10347 * 0 - successful 10348 * -ENOMEM - No availble memory 10349 * -EIO - The mailbox failed to complete successfully. 10350 **/ 10351 int 10352 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10353 { 10354 struct lpfc_queue *qdesc; 10355 int idx, cpu, eqcpu; 10356 struct lpfc_sli4_hdw_queue *qp; 10357 struct lpfc_vector_map_info *cpup; 10358 struct lpfc_vector_map_info *eqcpup; 10359 struct lpfc_eq_intr_info *eqi; 10360 10361 /* 10362 * Create HBA Record arrays. 10363 * Both NVME and FCP will share that same vectors / EQs 10364 */ 10365 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10366 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10367 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10368 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10369 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10370 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10371 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10372 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10373 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10374 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10375 10376 if (!phba->sli4_hba.hdwq) { 10377 phba->sli4_hba.hdwq = kcalloc( 10378 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10379 GFP_KERNEL); 10380 if (!phba->sli4_hba.hdwq) { 10381 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10382 "6427 Failed allocate memory for " 10383 "fast-path Hardware Queue array\n"); 10384 goto out_error; 10385 } 10386 /* Prepare hardware queues to take IO buffers */ 10387 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10388 qp = &phba->sli4_hba.hdwq[idx]; 10389 spin_lock_init(&qp->io_buf_list_get_lock); 10390 spin_lock_init(&qp->io_buf_list_put_lock); 10391 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10392 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10393 qp->get_io_bufs = 0; 10394 qp->put_io_bufs = 0; 10395 qp->total_io_bufs = 0; 10396 spin_lock_init(&qp->abts_io_buf_list_lock); 10397 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10398 qp->abts_scsi_io_bufs = 0; 10399 qp->abts_nvme_io_bufs = 0; 10400 INIT_LIST_HEAD(&qp->sgl_list); 10401 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10402 spin_lock_init(&qp->hdwq_lock); 10403 } 10404 } 10405 10406 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10407 if (phba->nvmet_support) { 10408 phba->sli4_hba.nvmet_cqset = kcalloc( 10409 phba->cfg_nvmet_mrq, 10410 sizeof(struct lpfc_queue *), 10411 GFP_KERNEL); 10412 if (!phba->sli4_hba.nvmet_cqset) { 10413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10414 "3121 Fail allocate memory for " 10415 "fast-path CQ set array\n"); 10416 goto out_error; 10417 } 10418 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10419 phba->cfg_nvmet_mrq, 10420 sizeof(struct lpfc_queue *), 10421 GFP_KERNEL); 10422 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10423 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10424 "3122 Fail allocate memory for " 10425 "fast-path RQ set hdr array\n"); 10426 goto out_error; 10427 } 10428 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10429 phba->cfg_nvmet_mrq, 10430 sizeof(struct lpfc_queue *), 10431 GFP_KERNEL); 10432 if (!phba->sli4_hba.nvmet_mrq_data) { 10433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10434 "3124 Fail allocate memory for " 10435 "fast-path RQ set data array\n"); 10436 goto out_error; 10437 } 10438 } 10439 } 10440 10441 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10442 10443 /* Create HBA Event Queues (EQs) */ 10444 for_each_present_cpu(cpu) { 10445 /* We only want to create 1 EQ per vector, even though 10446 * multiple CPUs might be using that vector. so only 10447 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10448 */ 10449 cpup = &phba->sli4_hba.cpu_map[cpu]; 10450 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10451 continue; 10452 10453 /* Get a ptr to the Hardware Queue associated with this CPU */ 10454 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10455 10456 /* Allocate an EQ */ 10457 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10458 phba->sli4_hba.eq_esize, 10459 phba->sli4_hba.eq_ecount, cpu); 10460 if (!qdesc) { 10461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10462 "0497 Failed allocate EQ (%d)\n", 10463 cpup->hdwq); 10464 goto out_error; 10465 } 10466 qdesc->qe_valid = 1; 10467 qdesc->hdwq = cpup->hdwq; 10468 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10469 qdesc->last_cpu = qdesc->chann; 10470 10471 /* Save the allocated EQ in the Hardware Queue */ 10472 qp->hba_eq = qdesc; 10473 10474 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10475 list_add(&qdesc->cpu_list, &eqi->list); 10476 } 10477 10478 /* Now we need to populate the other Hardware Queues, that share 10479 * an IRQ vector, with the associated EQ ptr. 10480 */ 10481 for_each_present_cpu(cpu) { 10482 cpup = &phba->sli4_hba.cpu_map[cpu]; 10483 10484 /* Check for EQ already allocated in previous loop */ 10485 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10486 continue; 10487 10488 /* Check for multiple CPUs per hdwq */ 10489 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10490 if (qp->hba_eq) 10491 continue; 10492 10493 /* We need to share an EQ for this hdwq */ 10494 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10495 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10496 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10497 } 10498 10499 /* Allocate IO Path SLI4 CQ/WQs */ 10500 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10501 if (lpfc_alloc_io_wq_cq(phba, idx)) 10502 goto out_error; 10503 } 10504 10505 if (phba->nvmet_support) { 10506 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10507 cpu = lpfc_find_cpu_handle(phba, idx, 10508 LPFC_FIND_BY_HDWQ); 10509 qdesc = lpfc_sli4_queue_alloc(phba, 10510 LPFC_DEFAULT_PAGE_SIZE, 10511 phba->sli4_hba.cq_esize, 10512 phba->sli4_hba.cq_ecount, 10513 cpu); 10514 if (!qdesc) { 10515 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10516 "3142 Failed allocate NVME " 10517 "CQ Set (%d)\n", idx); 10518 goto out_error; 10519 } 10520 qdesc->qe_valid = 1; 10521 qdesc->hdwq = idx; 10522 qdesc->chann = cpu; 10523 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10524 } 10525 } 10526 10527 /* 10528 * Create Slow Path Completion Queues (CQs) 10529 */ 10530 10531 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10532 /* Create slow-path Mailbox Command Complete Queue */ 10533 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10534 phba->sli4_hba.cq_esize, 10535 phba->sli4_hba.cq_ecount, cpu); 10536 if (!qdesc) { 10537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10538 "0500 Failed allocate slow-path mailbox CQ\n"); 10539 goto out_error; 10540 } 10541 qdesc->qe_valid = 1; 10542 phba->sli4_hba.mbx_cq = qdesc; 10543 10544 /* Create slow-path ELS Complete Queue */ 10545 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10546 phba->sli4_hba.cq_esize, 10547 phba->sli4_hba.cq_ecount, cpu); 10548 if (!qdesc) { 10549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10550 "0501 Failed allocate slow-path ELS CQ\n"); 10551 goto out_error; 10552 } 10553 qdesc->qe_valid = 1; 10554 qdesc->chann = cpu; 10555 phba->sli4_hba.els_cq = qdesc; 10556 10557 10558 /* 10559 * Create Slow Path Work Queues (WQs) 10560 */ 10561 10562 /* Create Mailbox Command Queue */ 10563 10564 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10565 phba->sli4_hba.mq_esize, 10566 phba->sli4_hba.mq_ecount, cpu); 10567 if (!qdesc) { 10568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10569 "0505 Failed allocate slow-path MQ\n"); 10570 goto out_error; 10571 } 10572 qdesc->chann = cpu; 10573 phba->sli4_hba.mbx_wq = qdesc; 10574 10575 /* 10576 * Create ELS Work Queues 10577 */ 10578 10579 /* Create slow-path ELS Work Queue */ 10580 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10581 phba->sli4_hba.wq_esize, 10582 phba->sli4_hba.wq_ecount, cpu); 10583 if (!qdesc) { 10584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10585 "0504 Failed allocate slow-path ELS WQ\n"); 10586 goto out_error; 10587 } 10588 qdesc->chann = cpu; 10589 phba->sli4_hba.els_wq = qdesc; 10590 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10591 10592 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10593 /* Create NVME LS Complete Queue */ 10594 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10595 phba->sli4_hba.cq_esize, 10596 phba->sli4_hba.cq_ecount, cpu); 10597 if (!qdesc) { 10598 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10599 "6079 Failed allocate NVME LS CQ\n"); 10600 goto out_error; 10601 } 10602 qdesc->chann = cpu; 10603 qdesc->qe_valid = 1; 10604 phba->sli4_hba.nvmels_cq = qdesc; 10605 10606 /* Create NVME LS Work Queue */ 10607 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10608 phba->sli4_hba.wq_esize, 10609 phba->sli4_hba.wq_ecount, cpu); 10610 if (!qdesc) { 10611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10612 "6080 Failed allocate NVME LS WQ\n"); 10613 goto out_error; 10614 } 10615 qdesc->chann = cpu; 10616 phba->sli4_hba.nvmels_wq = qdesc; 10617 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10618 } 10619 10620 /* 10621 * Create Receive Queue (RQ) 10622 */ 10623 10624 /* Create Receive Queue for header */ 10625 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10626 phba->sli4_hba.rq_esize, 10627 phba->sli4_hba.rq_ecount, cpu); 10628 if (!qdesc) { 10629 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10630 "0506 Failed allocate receive HRQ\n"); 10631 goto out_error; 10632 } 10633 phba->sli4_hba.hdr_rq = qdesc; 10634 10635 /* Create Receive Queue for data */ 10636 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10637 phba->sli4_hba.rq_esize, 10638 phba->sli4_hba.rq_ecount, cpu); 10639 if (!qdesc) { 10640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10641 "0507 Failed allocate receive DRQ\n"); 10642 goto out_error; 10643 } 10644 phba->sli4_hba.dat_rq = qdesc; 10645 10646 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10647 phba->nvmet_support) { 10648 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10649 cpu = lpfc_find_cpu_handle(phba, idx, 10650 LPFC_FIND_BY_HDWQ); 10651 /* Create NVMET Receive Queue for header */ 10652 qdesc = lpfc_sli4_queue_alloc(phba, 10653 LPFC_DEFAULT_PAGE_SIZE, 10654 phba->sli4_hba.rq_esize, 10655 LPFC_NVMET_RQE_DEF_COUNT, 10656 cpu); 10657 if (!qdesc) { 10658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10659 "3146 Failed allocate " 10660 "receive HRQ\n"); 10661 goto out_error; 10662 } 10663 qdesc->hdwq = idx; 10664 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10665 10666 /* Only needed for header of RQ pair */ 10667 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10668 GFP_KERNEL, 10669 cpu_to_node(cpu)); 10670 if (qdesc->rqbp == NULL) { 10671 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10672 "6131 Failed allocate " 10673 "Header RQBP\n"); 10674 goto out_error; 10675 } 10676 10677 /* Put list in known state in case driver load fails. */ 10678 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10679 10680 /* Create NVMET Receive Queue for data */ 10681 qdesc = lpfc_sli4_queue_alloc(phba, 10682 LPFC_DEFAULT_PAGE_SIZE, 10683 phba->sli4_hba.rq_esize, 10684 LPFC_NVMET_RQE_DEF_COUNT, 10685 cpu); 10686 if (!qdesc) { 10687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10688 "3156 Failed allocate " 10689 "receive DRQ\n"); 10690 goto out_error; 10691 } 10692 qdesc->hdwq = idx; 10693 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10694 } 10695 } 10696 10697 /* Clear NVME stats */ 10698 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10699 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10700 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10701 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10702 } 10703 } 10704 10705 /* Clear SCSI stats */ 10706 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10707 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10708 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10709 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10710 } 10711 } 10712 10713 return 0; 10714 10715 out_error: 10716 lpfc_sli4_queue_destroy(phba); 10717 return -ENOMEM; 10718 } 10719 10720 static inline void 10721 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10722 { 10723 if (*qp != NULL) { 10724 lpfc_sli4_queue_free(*qp); 10725 *qp = NULL; 10726 } 10727 } 10728 10729 static inline void 10730 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10731 { 10732 int idx; 10733 10734 if (*qs == NULL) 10735 return; 10736 10737 for (idx = 0; idx < max; idx++) 10738 __lpfc_sli4_release_queue(&(*qs)[idx]); 10739 10740 kfree(*qs); 10741 *qs = NULL; 10742 } 10743 10744 static inline void 10745 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10746 { 10747 struct lpfc_sli4_hdw_queue *hdwq; 10748 struct lpfc_queue *eq; 10749 uint32_t idx; 10750 10751 hdwq = phba->sli4_hba.hdwq; 10752 10753 /* Loop thru all Hardware Queues */ 10754 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10755 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10756 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10757 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10758 hdwq[idx].hba_eq = NULL; 10759 hdwq[idx].io_cq = NULL; 10760 hdwq[idx].io_wq = NULL; 10761 if (phba->cfg_xpsgl && !phba->nvmet_support) 10762 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10763 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10764 } 10765 /* Loop thru all IRQ vectors */ 10766 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10767 /* Free the EQ corresponding to the IRQ vector */ 10768 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10769 lpfc_sli4_queue_free(eq); 10770 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10771 } 10772 } 10773 10774 /** 10775 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10776 * @phba: pointer to lpfc hba data structure. 10777 * 10778 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10779 * operation. 10780 * 10781 * Return codes 10782 * 0 - successful 10783 * -ENOMEM - No available memory 10784 * -EIO - The mailbox failed to complete successfully. 10785 **/ 10786 void 10787 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10788 { 10789 /* 10790 * Set FREE_INIT before beginning to free the queues. 10791 * Wait until the users of queues to acknowledge to 10792 * release queues by clearing FREE_WAIT. 10793 */ 10794 spin_lock_irq(&phba->hbalock); 10795 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10796 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10797 spin_unlock_irq(&phba->hbalock); 10798 msleep(20); 10799 spin_lock_irq(&phba->hbalock); 10800 } 10801 spin_unlock_irq(&phba->hbalock); 10802 10803 lpfc_sli4_cleanup_poll_list(phba); 10804 10805 /* Release HBA eqs */ 10806 if (phba->sli4_hba.hdwq) 10807 lpfc_sli4_release_hdwq(phba); 10808 10809 if (phba->nvmet_support) { 10810 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10811 phba->cfg_nvmet_mrq); 10812 10813 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10814 phba->cfg_nvmet_mrq); 10815 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10816 phba->cfg_nvmet_mrq); 10817 } 10818 10819 /* Release mailbox command work queue */ 10820 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10821 10822 /* Release ELS work queue */ 10823 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10824 10825 /* Release ELS work queue */ 10826 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10827 10828 /* Release unsolicited receive queue */ 10829 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10830 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10831 10832 /* Release ELS complete queue */ 10833 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10834 10835 /* Release NVME LS complete queue */ 10836 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10837 10838 /* Release mailbox command complete queue */ 10839 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10840 10841 /* Everything on this list has been freed */ 10842 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10843 10844 /* Done with freeing the queues */ 10845 spin_lock_irq(&phba->hbalock); 10846 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10847 spin_unlock_irq(&phba->hbalock); 10848 } 10849 10850 int 10851 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10852 { 10853 struct lpfc_rqb *rqbp; 10854 struct lpfc_dmabuf *h_buf; 10855 struct rqb_dmabuf *rqb_buffer; 10856 10857 rqbp = rq->rqbp; 10858 while (!list_empty(&rqbp->rqb_buffer_list)) { 10859 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10860 struct lpfc_dmabuf, list); 10861 10862 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10863 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10864 rqbp->buffer_count--; 10865 } 10866 return 1; 10867 } 10868 10869 static int 10870 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10871 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10872 int qidx, uint32_t qtype) 10873 { 10874 struct lpfc_sli_ring *pring; 10875 int rc; 10876 10877 if (!eq || !cq || !wq) { 10878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10879 "6085 Fast-path %s (%d) not allocated\n", 10880 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10881 return -ENOMEM; 10882 } 10883 10884 /* create the Cq first */ 10885 rc = lpfc_cq_create(phba, cq, eq, 10886 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10887 if (rc) { 10888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10889 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10890 qidx, (uint32_t)rc); 10891 return rc; 10892 } 10893 10894 if (qtype != LPFC_MBOX) { 10895 /* Setup cq_map for fast lookup */ 10896 if (cq_map) 10897 *cq_map = cq->queue_id; 10898 10899 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10900 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10901 qidx, cq->queue_id, qidx, eq->queue_id); 10902 10903 /* create the wq */ 10904 rc = lpfc_wq_create(phba, wq, cq, qtype); 10905 if (rc) { 10906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10907 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 10908 qidx, (uint32_t)rc); 10909 /* no need to tear down cq - caller will do so */ 10910 return rc; 10911 } 10912 10913 /* Bind this CQ/WQ to the NVME ring */ 10914 pring = wq->pring; 10915 pring->sli.sli4.wqp = (void *)wq; 10916 cq->pring = pring; 10917 10918 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10919 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 10920 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 10921 } else { 10922 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 10923 if (rc) { 10924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10925 "0539 Failed setup of slow-path MQ: " 10926 "rc = 0x%x\n", rc); 10927 /* no need to tear down cq - caller will do so */ 10928 return rc; 10929 } 10930 10931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10932 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 10933 phba->sli4_hba.mbx_wq->queue_id, 10934 phba->sli4_hba.mbx_cq->queue_id); 10935 } 10936 10937 return 0; 10938 } 10939 10940 /** 10941 * lpfc_setup_cq_lookup - Setup the CQ lookup table 10942 * @phba: pointer to lpfc hba data structure. 10943 * 10944 * This routine will populate the cq_lookup table by all 10945 * available CQ queue_id's. 10946 **/ 10947 static void 10948 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 10949 { 10950 struct lpfc_queue *eq, *childq; 10951 int qidx; 10952 10953 memset(phba->sli4_hba.cq_lookup, 0, 10954 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 10955 /* Loop thru all IRQ vectors */ 10956 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 10957 /* Get the EQ corresponding to the IRQ vector */ 10958 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 10959 if (!eq) 10960 continue; 10961 /* Loop through all CQs associated with that EQ */ 10962 list_for_each_entry(childq, &eq->child_list, list) { 10963 if (childq->queue_id > phba->sli4_hba.cq_max) 10964 continue; 10965 if (childq->subtype == LPFC_IO) 10966 phba->sli4_hba.cq_lookup[childq->queue_id] = 10967 childq; 10968 } 10969 } 10970 } 10971 10972 /** 10973 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 10974 * @phba: pointer to lpfc hba data structure. 10975 * 10976 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 10977 * operation. 10978 * 10979 * Return codes 10980 * 0 - successful 10981 * -ENOMEM - No available memory 10982 * -EIO - The mailbox failed to complete successfully. 10983 **/ 10984 int 10985 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 10986 { 10987 uint32_t shdr_status, shdr_add_status; 10988 union lpfc_sli4_cfg_shdr *shdr; 10989 struct lpfc_vector_map_info *cpup; 10990 struct lpfc_sli4_hdw_queue *qp; 10991 LPFC_MBOXQ_t *mboxq; 10992 int qidx, cpu; 10993 uint32_t length, usdelay; 10994 int rc = -ENOMEM; 10995 10996 /* Check for dual-ULP support */ 10997 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10998 if (!mboxq) { 10999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11000 "3249 Unable to allocate memory for " 11001 "QUERY_FW_CFG mailbox command\n"); 11002 return -ENOMEM; 11003 } 11004 length = (sizeof(struct lpfc_mbx_query_fw_config) - 11005 sizeof(struct lpfc_sli4_cfg_mhdr)); 11006 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11007 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 11008 length, LPFC_SLI4_MBX_EMBED); 11009 11010 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11011 11012 shdr = (union lpfc_sli4_cfg_shdr *) 11013 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11014 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11015 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11016 if (shdr_status || shdr_add_status || rc) { 11017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11018 "3250 QUERY_FW_CFG mailbox failed with status " 11019 "x%x add_status x%x, mbx status x%x\n", 11020 shdr_status, shdr_add_status, rc); 11021 mempool_free(mboxq, phba->mbox_mem_pool); 11022 rc = -ENXIO; 11023 goto out_error; 11024 } 11025 11026 phba->sli4_hba.fw_func_mode = 11027 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 11028 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 11029 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 11030 phba->sli4_hba.physical_port = 11031 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 11032 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11033 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 11034 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 11035 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 11036 11037 mempool_free(mboxq, phba->mbox_mem_pool); 11038 11039 /* 11040 * Set up HBA Event Queues (EQs) 11041 */ 11042 qp = phba->sli4_hba.hdwq; 11043 11044 /* Set up HBA event queue */ 11045 if (!qp) { 11046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11047 "3147 Fast-path EQs not allocated\n"); 11048 rc = -ENOMEM; 11049 goto out_error; 11050 } 11051 11052 /* Loop thru all IRQ vectors */ 11053 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11054 /* Create HBA Event Queues (EQs) in order */ 11055 for_each_present_cpu(cpu) { 11056 cpup = &phba->sli4_hba.cpu_map[cpu]; 11057 11058 /* Look for the CPU thats using that vector with 11059 * LPFC_CPU_FIRST_IRQ set. 11060 */ 11061 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11062 continue; 11063 if (qidx != cpup->eq) 11064 continue; 11065 11066 /* Create an EQ for that vector */ 11067 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11068 phba->cfg_fcp_imax); 11069 if (rc) { 11070 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11071 "0523 Failed setup of fast-path" 11072 " EQ (%d), rc = 0x%x\n", 11073 cpup->eq, (uint32_t)rc); 11074 goto out_destroy; 11075 } 11076 11077 /* Save the EQ for that vector in the hba_eq_hdl */ 11078 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11079 qp[cpup->hdwq].hba_eq; 11080 11081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11082 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11083 cpup->eq, 11084 qp[cpup->hdwq].hba_eq->queue_id); 11085 } 11086 } 11087 11088 /* Loop thru all Hardware Queues */ 11089 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11090 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11091 cpup = &phba->sli4_hba.cpu_map[cpu]; 11092 11093 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11094 rc = lpfc_create_wq_cq(phba, 11095 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11096 qp[qidx].io_cq, 11097 qp[qidx].io_wq, 11098 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11099 qidx, 11100 LPFC_IO); 11101 if (rc) { 11102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11103 "0535 Failed to setup fastpath " 11104 "IO WQ/CQ (%d), rc = 0x%x\n", 11105 qidx, (uint32_t)rc); 11106 goto out_destroy; 11107 } 11108 } 11109 11110 /* 11111 * Set up Slow Path Complete Queues (CQs) 11112 */ 11113 11114 /* Set up slow-path MBOX CQ/MQ */ 11115 11116 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11118 "0528 %s not allocated\n", 11119 phba->sli4_hba.mbx_cq ? 11120 "Mailbox WQ" : "Mailbox CQ"); 11121 rc = -ENOMEM; 11122 goto out_destroy; 11123 } 11124 11125 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11126 phba->sli4_hba.mbx_cq, 11127 phba->sli4_hba.mbx_wq, 11128 NULL, 0, LPFC_MBOX); 11129 if (rc) { 11130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11131 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11132 (uint32_t)rc); 11133 goto out_destroy; 11134 } 11135 if (phba->nvmet_support) { 11136 if (!phba->sli4_hba.nvmet_cqset) { 11137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11138 "3165 Fast-path NVME CQ Set " 11139 "array not allocated\n"); 11140 rc = -ENOMEM; 11141 goto out_destroy; 11142 } 11143 if (phba->cfg_nvmet_mrq > 1) { 11144 rc = lpfc_cq_create_set(phba, 11145 phba->sli4_hba.nvmet_cqset, 11146 qp, 11147 LPFC_WCQ, LPFC_NVMET); 11148 if (rc) { 11149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11150 "3164 Failed setup of NVME CQ " 11151 "Set, rc = 0x%x\n", 11152 (uint32_t)rc); 11153 goto out_destroy; 11154 } 11155 } else { 11156 /* Set up NVMET Receive Complete Queue */ 11157 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11158 qp[0].hba_eq, 11159 LPFC_WCQ, LPFC_NVMET); 11160 if (rc) { 11161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11162 "6089 Failed setup NVMET CQ: " 11163 "rc = 0x%x\n", (uint32_t)rc); 11164 goto out_destroy; 11165 } 11166 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11167 11168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11169 "6090 NVMET CQ setup: cq-id=%d, " 11170 "parent eq-id=%d\n", 11171 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11172 qp[0].hba_eq->queue_id); 11173 } 11174 } 11175 11176 /* Set up slow-path ELS WQ/CQ */ 11177 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11179 "0530 ELS %s not allocated\n", 11180 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11181 rc = -ENOMEM; 11182 goto out_destroy; 11183 } 11184 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11185 phba->sli4_hba.els_cq, 11186 phba->sli4_hba.els_wq, 11187 NULL, 0, LPFC_ELS); 11188 if (rc) { 11189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11190 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11191 (uint32_t)rc); 11192 goto out_destroy; 11193 } 11194 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11195 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11196 phba->sli4_hba.els_wq->queue_id, 11197 phba->sli4_hba.els_cq->queue_id); 11198 11199 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11200 /* Set up NVME LS Complete Queue */ 11201 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11203 "6091 LS %s not allocated\n", 11204 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11205 rc = -ENOMEM; 11206 goto out_destroy; 11207 } 11208 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11209 phba->sli4_hba.nvmels_cq, 11210 phba->sli4_hba.nvmels_wq, 11211 NULL, 0, LPFC_NVME_LS); 11212 if (rc) { 11213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11214 "0526 Failed setup of NVVME LS WQ/CQ: " 11215 "rc = 0x%x\n", (uint32_t)rc); 11216 goto out_destroy; 11217 } 11218 11219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11220 "6096 ELS WQ setup: wq-id=%d, " 11221 "parent cq-id=%d\n", 11222 phba->sli4_hba.nvmels_wq->queue_id, 11223 phba->sli4_hba.nvmels_cq->queue_id); 11224 } 11225 11226 /* 11227 * Create NVMET Receive Queue (RQ) 11228 */ 11229 if (phba->nvmet_support) { 11230 if ((!phba->sli4_hba.nvmet_cqset) || 11231 (!phba->sli4_hba.nvmet_mrq_hdr) || 11232 (!phba->sli4_hba.nvmet_mrq_data)) { 11233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11234 "6130 MRQ CQ Queues not " 11235 "allocated\n"); 11236 rc = -ENOMEM; 11237 goto out_destroy; 11238 } 11239 if (phba->cfg_nvmet_mrq > 1) { 11240 rc = lpfc_mrq_create(phba, 11241 phba->sli4_hba.nvmet_mrq_hdr, 11242 phba->sli4_hba.nvmet_mrq_data, 11243 phba->sli4_hba.nvmet_cqset, 11244 LPFC_NVMET); 11245 if (rc) { 11246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11247 "6098 Failed setup of NVMET " 11248 "MRQ: rc = 0x%x\n", 11249 (uint32_t)rc); 11250 goto out_destroy; 11251 } 11252 11253 } else { 11254 rc = lpfc_rq_create(phba, 11255 phba->sli4_hba.nvmet_mrq_hdr[0], 11256 phba->sli4_hba.nvmet_mrq_data[0], 11257 phba->sli4_hba.nvmet_cqset[0], 11258 LPFC_NVMET); 11259 if (rc) { 11260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11261 "6057 Failed setup of NVMET " 11262 "Receive Queue: rc = 0x%x\n", 11263 (uint32_t)rc); 11264 goto out_destroy; 11265 } 11266 11267 lpfc_printf_log( 11268 phba, KERN_INFO, LOG_INIT, 11269 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11270 "dat-rq-id=%d parent cq-id=%d\n", 11271 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11272 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11273 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11274 11275 } 11276 } 11277 11278 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11280 "0540 Receive Queue not allocated\n"); 11281 rc = -ENOMEM; 11282 goto out_destroy; 11283 } 11284 11285 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11286 phba->sli4_hba.els_cq, LPFC_USOL); 11287 if (rc) { 11288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11289 "0541 Failed setup of Receive Queue: " 11290 "rc = 0x%x\n", (uint32_t)rc); 11291 goto out_destroy; 11292 } 11293 11294 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11295 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11296 "parent cq-id=%d\n", 11297 phba->sli4_hba.hdr_rq->queue_id, 11298 phba->sli4_hba.dat_rq->queue_id, 11299 phba->sli4_hba.els_cq->queue_id); 11300 11301 if (phba->cfg_fcp_imax) 11302 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11303 else 11304 usdelay = 0; 11305 11306 for (qidx = 0; qidx < phba->cfg_irq_chann; 11307 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11308 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11309 usdelay); 11310 11311 if (phba->sli4_hba.cq_max) { 11312 kfree(phba->sli4_hba.cq_lookup); 11313 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11314 sizeof(struct lpfc_queue *), GFP_KERNEL); 11315 if (!phba->sli4_hba.cq_lookup) { 11316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11317 "0549 Failed setup of CQ Lookup table: " 11318 "size 0x%x\n", phba->sli4_hba.cq_max); 11319 rc = -ENOMEM; 11320 goto out_destroy; 11321 } 11322 lpfc_setup_cq_lookup(phba); 11323 } 11324 return 0; 11325 11326 out_destroy: 11327 lpfc_sli4_queue_unset(phba); 11328 out_error: 11329 return rc; 11330 } 11331 11332 /** 11333 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11334 * @phba: pointer to lpfc hba data structure. 11335 * 11336 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11337 * operation. 11338 * 11339 * Return codes 11340 * 0 - successful 11341 * -ENOMEM - No available memory 11342 * -EIO - The mailbox failed to complete successfully. 11343 **/ 11344 void 11345 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11346 { 11347 struct lpfc_sli4_hdw_queue *qp; 11348 struct lpfc_queue *eq; 11349 int qidx; 11350 11351 /* Unset mailbox command work queue */ 11352 if (phba->sli4_hba.mbx_wq) 11353 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11354 11355 /* Unset NVME LS work queue */ 11356 if (phba->sli4_hba.nvmels_wq) 11357 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11358 11359 /* Unset ELS work queue */ 11360 if (phba->sli4_hba.els_wq) 11361 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11362 11363 /* Unset unsolicited receive queue */ 11364 if (phba->sli4_hba.hdr_rq) 11365 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11366 phba->sli4_hba.dat_rq); 11367 11368 /* Unset mailbox command complete queue */ 11369 if (phba->sli4_hba.mbx_cq) 11370 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11371 11372 /* Unset ELS complete queue */ 11373 if (phba->sli4_hba.els_cq) 11374 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11375 11376 /* Unset NVME LS complete queue */ 11377 if (phba->sli4_hba.nvmels_cq) 11378 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11379 11380 if (phba->nvmet_support) { 11381 /* Unset NVMET MRQ queue */ 11382 if (phba->sli4_hba.nvmet_mrq_hdr) { 11383 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11384 lpfc_rq_destroy( 11385 phba, 11386 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11387 phba->sli4_hba.nvmet_mrq_data[qidx]); 11388 } 11389 11390 /* Unset NVMET CQ Set complete queue */ 11391 if (phba->sli4_hba.nvmet_cqset) { 11392 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11393 lpfc_cq_destroy( 11394 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11395 } 11396 } 11397 11398 /* Unset fast-path SLI4 queues */ 11399 if (phba->sli4_hba.hdwq) { 11400 /* Loop thru all Hardware Queues */ 11401 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11402 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11403 qp = &phba->sli4_hba.hdwq[qidx]; 11404 lpfc_wq_destroy(phba, qp->io_wq); 11405 lpfc_cq_destroy(phba, qp->io_cq); 11406 } 11407 /* Loop thru all IRQ vectors */ 11408 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11409 /* Destroy the EQ corresponding to the IRQ vector */ 11410 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11411 lpfc_eq_destroy(phba, eq); 11412 } 11413 } 11414 11415 kfree(phba->sli4_hba.cq_lookup); 11416 phba->sli4_hba.cq_lookup = NULL; 11417 phba->sli4_hba.cq_max = 0; 11418 } 11419 11420 /** 11421 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11422 * @phba: pointer to lpfc hba data structure. 11423 * 11424 * This routine is invoked to allocate and set up a pool of completion queue 11425 * events. The body of the completion queue event is a completion queue entry 11426 * CQE. For now, this pool is used for the interrupt service routine to queue 11427 * the following HBA completion queue events for the worker thread to process: 11428 * - Mailbox asynchronous events 11429 * - Receive queue completion unsolicited events 11430 * Later, this can be used for all the slow-path events. 11431 * 11432 * Return codes 11433 * 0 - successful 11434 * -ENOMEM - No available memory 11435 **/ 11436 static int 11437 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11438 { 11439 struct lpfc_cq_event *cq_event; 11440 int i; 11441 11442 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11443 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11444 if (!cq_event) 11445 goto out_pool_create_fail; 11446 list_add_tail(&cq_event->list, 11447 &phba->sli4_hba.sp_cqe_event_pool); 11448 } 11449 return 0; 11450 11451 out_pool_create_fail: 11452 lpfc_sli4_cq_event_pool_destroy(phba); 11453 return -ENOMEM; 11454 } 11455 11456 /** 11457 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11458 * @phba: pointer to lpfc hba data structure. 11459 * 11460 * This routine is invoked to free the pool of completion queue events at 11461 * driver unload time. Note that, it is the responsibility of the driver 11462 * cleanup routine to free all the outstanding completion-queue events 11463 * allocated from this pool back into the pool before invoking this routine 11464 * to destroy the pool. 11465 **/ 11466 static void 11467 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11468 { 11469 struct lpfc_cq_event *cq_event, *next_cq_event; 11470 11471 list_for_each_entry_safe(cq_event, next_cq_event, 11472 &phba->sli4_hba.sp_cqe_event_pool, list) { 11473 list_del(&cq_event->list); 11474 kfree(cq_event); 11475 } 11476 } 11477 11478 /** 11479 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11480 * @phba: pointer to lpfc hba data structure. 11481 * 11482 * This routine is the lock free version of the API invoked to allocate a 11483 * completion-queue event from the free pool. 11484 * 11485 * Return: Pointer to the newly allocated completion-queue event if successful 11486 * NULL otherwise. 11487 **/ 11488 struct lpfc_cq_event * 11489 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11490 { 11491 struct lpfc_cq_event *cq_event = NULL; 11492 11493 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11494 struct lpfc_cq_event, list); 11495 return cq_event; 11496 } 11497 11498 /** 11499 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11500 * @phba: pointer to lpfc hba data structure. 11501 * 11502 * This routine is the lock version of the API invoked to allocate a 11503 * completion-queue event from the free pool. 11504 * 11505 * Return: Pointer to the newly allocated completion-queue event if successful 11506 * NULL otherwise. 11507 **/ 11508 struct lpfc_cq_event * 11509 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11510 { 11511 struct lpfc_cq_event *cq_event; 11512 unsigned long iflags; 11513 11514 spin_lock_irqsave(&phba->hbalock, iflags); 11515 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11516 spin_unlock_irqrestore(&phba->hbalock, iflags); 11517 return cq_event; 11518 } 11519 11520 /** 11521 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11522 * @phba: pointer to lpfc hba data structure. 11523 * @cq_event: pointer to the completion queue event to be freed. 11524 * 11525 * This routine is the lock free version of the API invoked to release a 11526 * completion-queue event back into the free pool. 11527 **/ 11528 void 11529 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11530 struct lpfc_cq_event *cq_event) 11531 { 11532 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11533 } 11534 11535 /** 11536 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11537 * @phba: pointer to lpfc hba data structure. 11538 * @cq_event: pointer to the completion queue event to be freed. 11539 * 11540 * This routine is the lock version of the API invoked to release a 11541 * completion-queue event back into the free pool. 11542 **/ 11543 void 11544 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11545 struct lpfc_cq_event *cq_event) 11546 { 11547 unsigned long iflags; 11548 spin_lock_irqsave(&phba->hbalock, iflags); 11549 __lpfc_sli4_cq_event_release(phba, cq_event); 11550 spin_unlock_irqrestore(&phba->hbalock, iflags); 11551 } 11552 11553 /** 11554 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11555 * @phba: pointer to lpfc hba data structure. 11556 * 11557 * This routine is to free all the pending completion-queue events to the 11558 * back into the free pool for device reset. 11559 **/ 11560 static void 11561 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11562 { 11563 LIST_HEAD(cq_event_list); 11564 struct lpfc_cq_event *cq_event; 11565 unsigned long iflags; 11566 11567 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11568 11569 /* Pending ELS XRI abort events */ 11570 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11571 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11572 &cq_event_list); 11573 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11574 11575 /* Pending asynnc events */ 11576 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11577 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11578 &cq_event_list); 11579 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11580 11581 while (!list_empty(&cq_event_list)) { 11582 list_remove_head(&cq_event_list, cq_event, 11583 struct lpfc_cq_event, list); 11584 lpfc_sli4_cq_event_release(phba, cq_event); 11585 } 11586 } 11587 11588 /** 11589 * lpfc_pci_function_reset - Reset pci function. 11590 * @phba: pointer to lpfc hba data structure. 11591 * 11592 * This routine is invoked to request a PCI function reset. It will destroys 11593 * all resources assigned to the PCI function which originates this request. 11594 * 11595 * Return codes 11596 * 0 - successful 11597 * -ENOMEM - No available memory 11598 * -EIO - The mailbox failed to complete successfully. 11599 **/ 11600 int 11601 lpfc_pci_function_reset(struct lpfc_hba *phba) 11602 { 11603 LPFC_MBOXQ_t *mboxq; 11604 uint32_t rc = 0, if_type; 11605 uint32_t shdr_status, shdr_add_status; 11606 uint32_t rdy_chk; 11607 uint32_t port_reset = 0; 11608 union lpfc_sli4_cfg_shdr *shdr; 11609 struct lpfc_register reg_data; 11610 uint16_t devid; 11611 11612 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11613 switch (if_type) { 11614 case LPFC_SLI_INTF_IF_TYPE_0: 11615 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11616 GFP_KERNEL); 11617 if (!mboxq) { 11618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11619 "0494 Unable to allocate memory for " 11620 "issuing SLI_FUNCTION_RESET mailbox " 11621 "command\n"); 11622 return -ENOMEM; 11623 } 11624 11625 /* Setup PCI function reset mailbox-ioctl command */ 11626 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11627 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11628 LPFC_SLI4_MBX_EMBED); 11629 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11630 shdr = (union lpfc_sli4_cfg_shdr *) 11631 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11632 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11633 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11634 &shdr->response); 11635 mempool_free(mboxq, phba->mbox_mem_pool); 11636 if (shdr_status || shdr_add_status || rc) { 11637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11638 "0495 SLI_FUNCTION_RESET mailbox " 11639 "failed with status x%x add_status x%x," 11640 " mbx status x%x\n", 11641 shdr_status, shdr_add_status, rc); 11642 rc = -ENXIO; 11643 } 11644 break; 11645 case LPFC_SLI_INTF_IF_TYPE_2: 11646 case LPFC_SLI_INTF_IF_TYPE_6: 11647 wait: 11648 /* 11649 * Poll the Port Status Register and wait for RDY for 11650 * up to 30 seconds. If the port doesn't respond, treat 11651 * it as an error. 11652 */ 11653 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11654 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11655 STATUSregaddr, ®_data.word0)) { 11656 rc = -ENODEV; 11657 goto out; 11658 } 11659 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11660 break; 11661 msleep(20); 11662 } 11663 11664 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11665 phba->work_status[0] = readl( 11666 phba->sli4_hba.u.if_type2.ERR1regaddr); 11667 phba->work_status[1] = readl( 11668 phba->sli4_hba.u.if_type2.ERR2regaddr); 11669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11670 "2890 Port not ready, port status reg " 11671 "0x%x error 1=0x%x, error 2=0x%x\n", 11672 reg_data.word0, 11673 phba->work_status[0], 11674 phba->work_status[1]); 11675 rc = -ENODEV; 11676 goto out; 11677 } 11678 11679 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11680 lpfc_pldv_detect = true; 11681 11682 if (!port_reset) { 11683 /* 11684 * Reset the port now 11685 */ 11686 reg_data.word0 = 0; 11687 bf_set(lpfc_sliport_ctrl_end, ®_data, 11688 LPFC_SLIPORT_LITTLE_ENDIAN); 11689 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11690 LPFC_SLIPORT_INIT_PORT); 11691 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11692 CTRLregaddr); 11693 /* flush */ 11694 pci_read_config_word(phba->pcidev, 11695 PCI_DEVICE_ID, &devid); 11696 11697 port_reset = 1; 11698 msleep(20); 11699 goto wait; 11700 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11701 rc = -ENODEV; 11702 goto out; 11703 } 11704 break; 11705 11706 case LPFC_SLI_INTF_IF_TYPE_1: 11707 default: 11708 break; 11709 } 11710 11711 out: 11712 /* Catch the not-ready port failure after a port reset. */ 11713 if (rc) { 11714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11715 "3317 HBA not functional: IP Reset Failed " 11716 "try: echo fw_reset > board_mode\n"); 11717 rc = -ENODEV; 11718 } 11719 11720 return rc; 11721 } 11722 11723 /** 11724 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11725 * @phba: pointer to lpfc hba data structure. 11726 * 11727 * This routine is invoked to set up the PCI device memory space for device 11728 * with SLI-4 interface spec. 11729 * 11730 * Return codes 11731 * 0 - successful 11732 * other values - error 11733 **/ 11734 static int 11735 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11736 { 11737 struct pci_dev *pdev = phba->pcidev; 11738 unsigned long bar0map_len, bar1map_len, bar2map_len; 11739 int error; 11740 uint32_t if_type; 11741 11742 if (!pdev) 11743 return -ENODEV; 11744 11745 /* Set the device DMA mask size */ 11746 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11747 if (error) 11748 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11749 if (error) 11750 return error; 11751 11752 /* 11753 * The BARs and register set definitions and offset locations are 11754 * dependent on the if_type. 11755 */ 11756 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11757 &phba->sli4_hba.sli_intf.word0)) { 11758 return -ENODEV; 11759 } 11760 11761 /* There is no SLI3 failback for SLI4 devices. */ 11762 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11763 LPFC_SLI_INTF_VALID) { 11764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11765 "2894 SLI_INTF reg contents invalid " 11766 "sli_intf reg 0x%x\n", 11767 phba->sli4_hba.sli_intf.word0); 11768 return -ENODEV; 11769 } 11770 11771 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11772 /* 11773 * Get the bus address of SLI4 device Bar regions and the 11774 * number of bytes required by each mapping. The mapping of the 11775 * particular PCI BARs regions is dependent on the type of 11776 * SLI4 device. 11777 */ 11778 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11779 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11780 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11781 11782 /* 11783 * Map SLI4 PCI Config Space Register base to a kernel virtual 11784 * addr 11785 */ 11786 phba->sli4_hba.conf_regs_memmap_p = 11787 ioremap(phba->pci_bar0_map, bar0map_len); 11788 if (!phba->sli4_hba.conf_regs_memmap_p) { 11789 dev_printk(KERN_ERR, &pdev->dev, 11790 "ioremap failed for SLI4 PCI config " 11791 "registers.\n"); 11792 return -ENODEV; 11793 } 11794 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11795 /* Set up BAR0 PCI config space register memory map */ 11796 lpfc_sli4_bar0_register_memmap(phba, if_type); 11797 } else { 11798 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11799 bar0map_len = pci_resource_len(pdev, 1); 11800 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11801 dev_printk(KERN_ERR, &pdev->dev, 11802 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11803 return -ENODEV; 11804 } 11805 phba->sli4_hba.conf_regs_memmap_p = 11806 ioremap(phba->pci_bar0_map, bar0map_len); 11807 if (!phba->sli4_hba.conf_regs_memmap_p) { 11808 dev_printk(KERN_ERR, &pdev->dev, 11809 "ioremap failed for SLI4 PCI config " 11810 "registers.\n"); 11811 return -ENODEV; 11812 } 11813 lpfc_sli4_bar0_register_memmap(phba, if_type); 11814 } 11815 11816 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11817 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11818 /* 11819 * Map SLI4 if type 0 HBA Control Register base to a 11820 * kernel virtual address and setup the registers. 11821 */ 11822 phba->pci_bar1_map = pci_resource_start(pdev, 11823 PCI_64BIT_BAR2); 11824 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11825 phba->sli4_hba.ctrl_regs_memmap_p = 11826 ioremap(phba->pci_bar1_map, 11827 bar1map_len); 11828 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11829 dev_err(&pdev->dev, 11830 "ioremap failed for SLI4 HBA " 11831 "control registers.\n"); 11832 error = -ENOMEM; 11833 goto out_iounmap_conf; 11834 } 11835 phba->pci_bar2_memmap_p = 11836 phba->sli4_hba.ctrl_regs_memmap_p; 11837 lpfc_sli4_bar1_register_memmap(phba, if_type); 11838 } else { 11839 error = -ENOMEM; 11840 goto out_iounmap_conf; 11841 } 11842 } 11843 11844 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11845 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11846 /* 11847 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11848 * virtual address and setup the registers. 11849 */ 11850 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11851 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11852 phba->sli4_hba.drbl_regs_memmap_p = 11853 ioremap(phba->pci_bar1_map, bar1map_len); 11854 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11855 dev_err(&pdev->dev, 11856 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11857 error = -ENOMEM; 11858 goto out_iounmap_conf; 11859 } 11860 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11861 lpfc_sli4_bar1_register_memmap(phba, if_type); 11862 } 11863 11864 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11865 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11866 /* 11867 * Map SLI4 if type 0 HBA Doorbell Register base to 11868 * a kernel virtual address and setup the registers. 11869 */ 11870 phba->pci_bar2_map = pci_resource_start(pdev, 11871 PCI_64BIT_BAR4); 11872 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11873 phba->sli4_hba.drbl_regs_memmap_p = 11874 ioremap(phba->pci_bar2_map, 11875 bar2map_len); 11876 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11877 dev_err(&pdev->dev, 11878 "ioremap failed for SLI4 HBA" 11879 " doorbell registers.\n"); 11880 error = -ENOMEM; 11881 goto out_iounmap_ctrl; 11882 } 11883 phba->pci_bar4_memmap_p = 11884 phba->sli4_hba.drbl_regs_memmap_p; 11885 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11886 if (error) 11887 goto out_iounmap_all; 11888 } else { 11889 error = -ENOMEM; 11890 goto out_iounmap_all; 11891 } 11892 } 11893 11894 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11895 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11896 /* 11897 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11898 * virtual address and setup the registers. 11899 */ 11900 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11901 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11902 phba->sli4_hba.dpp_regs_memmap_p = 11903 ioremap(phba->pci_bar2_map, bar2map_len); 11904 if (!phba->sli4_hba.dpp_regs_memmap_p) { 11905 dev_err(&pdev->dev, 11906 "ioremap failed for SLI4 HBA dpp registers.\n"); 11907 error = -ENOMEM; 11908 goto out_iounmap_ctrl; 11909 } 11910 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 11911 } 11912 11913 /* Set up the EQ/CQ register handeling functions now */ 11914 switch (if_type) { 11915 case LPFC_SLI_INTF_IF_TYPE_0: 11916 case LPFC_SLI_INTF_IF_TYPE_2: 11917 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 11918 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 11919 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 11920 break; 11921 case LPFC_SLI_INTF_IF_TYPE_6: 11922 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 11923 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 11924 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 11925 break; 11926 default: 11927 break; 11928 } 11929 11930 return 0; 11931 11932 out_iounmap_all: 11933 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11934 out_iounmap_ctrl: 11935 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11936 out_iounmap_conf: 11937 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11938 11939 return error; 11940 } 11941 11942 /** 11943 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 11944 * @phba: pointer to lpfc hba data structure. 11945 * 11946 * This routine is invoked to unset the PCI device memory space for device 11947 * with SLI-4 interface spec. 11948 **/ 11949 static void 11950 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 11951 { 11952 uint32_t if_type; 11953 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11954 11955 switch (if_type) { 11956 case LPFC_SLI_INTF_IF_TYPE_0: 11957 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11958 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 11959 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11960 break; 11961 case LPFC_SLI_INTF_IF_TYPE_2: 11962 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11963 break; 11964 case LPFC_SLI_INTF_IF_TYPE_6: 11965 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 11966 iounmap(phba->sli4_hba.conf_regs_memmap_p); 11967 if (phba->sli4_hba.dpp_regs_memmap_p) 11968 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 11969 break; 11970 case LPFC_SLI_INTF_IF_TYPE_1: 11971 default: 11972 dev_printk(KERN_ERR, &phba->pcidev->dev, 11973 "FATAL - unsupported SLI4 interface type - %d\n", 11974 if_type); 11975 break; 11976 } 11977 } 11978 11979 /** 11980 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 11981 * @phba: pointer to lpfc hba data structure. 11982 * 11983 * This routine is invoked to enable the MSI-X interrupt vectors to device 11984 * with SLI-3 interface specs. 11985 * 11986 * Return codes 11987 * 0 - successful 11988 * other values - error 11989 **/ 11990 static int 11991 lpfc_sli_enable_msix(struct lpfc_hba *phba) 11992 { 11993 int rc; 11994 LPFC_MBOXQ_t *pmb; 11995 11996 /* Set up MSI-X multi-message vectors */ 11997 rc = pci_alloc_irq_vectors(phba->pcidev, 11998 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 11999 if (rc < 0) { 12000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12001 "0420 PCI enable MSI-X failed (%d)\n", rc); 12002 goto vec_fail_out; 12003 } 12004 12005 /* 12006 * Assign MSI-X vectors to interrupt handlers 12007 */ 12008 12009 /* vector-0 is associated to slow-path handler */ 12010 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 12011 &lpfc_sli_sp_intr_handler, 0, 12012 LPFC_SP_DRIVER_HANDLER_NAME, phba); 12013 if (rc) { 12014 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12015 "0421 MSI-X slow-path request_irq failed " 12016 "(%d)\n", rc); 12017 goto msi_fail_out; 12018 } 12019 12020 /* vector-1 is associated to fast-path handler */ 12021 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 12022 &lpfc_sli_fp_intr_handler, 0, 12023 LPFC_FP_DRIVER_HANDLER_NAME, phba); 12024 12025 if (rc) { 12026 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12027 "0429 MSI-X fast-path request_irq failed " 12028 "(%d)\n", rc); 12029 goto irq_fail_out; 12030 } 12031 12032 /* 12033 * Configure HBA MSI-X attention conditions to messages 12034 */ 12035 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12036 12037 if (!pmb) { 12038 rc = -ENOMEM; 12039 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12040 "0474 Unable to allocate memory for issuing " 12041 "MBOX_CONFIG_MSI command\n"); 12042 goto mem_fail_out; 12043 } 12044 rc = lpfc_config_msi(phba, pmb); 12045 if (rc) 12046 goto mbx_fail_out; 12047 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12048 if (rc != MBX_SUCCESS) { 12049 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12050 "0351 Config MSI mailbox command failed, " 12051 "mbxCmd x%x, mbxStatus x%x\n", 12052 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12053 goto mbx_fail_out; 12054 } 12055 12056 /* Free memory allocated for mailbox command */ 12057 mempool_free(pmb, phba->mbox_mem_pool); 12058 return rc; 12059 12060 mbx_fail_out: 12061 /* Free memory allocated for mailbox command */ 12062 mempool_free(pmb, phba->mbox_mem_pool); 12063 12064 mem_fail_out: 12065 /* free the irq already requested */ 12066 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12067 12068 irq_fail_out: 12069 /* free the irq already requested */ 12070 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12071 12072 msi_fail_out: 12073 /* Unconfigure MSI-X capability structure */ 12074 pci_free_irq_vectors(phba->pcidev); 12075 12076 vec_fail_out: 12077 return rc; 12078 } 12079 12080 /** 12081 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12082 * @phba: pointer to lpfc hba data structure. 12083 * 12084 * This routine is invoked to enable the MSI interrupt mode to device with 12085 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12086 * enable the MSI vector. The device driver is responsible for calling the 12087 * request_irq() to register MSI vector with a interrupt the handler, which 12088 * is done in this function. 12089 * 12090 * Return codes 12091 * 0 - successful 12092 * other values - error 12093 */ 12094 static int 12095 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12096 { 12097 int rc; 12098 12099 rc = pci_enable_msi(phba->pcidev); 12100 if (!rc) 12101 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12102 "0462 PCI enable MSI mode success.\n"); 12103 else { 12104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12105 "0471 PCI enable MSI mode failed (%d)\n", rc); 12106 return rc; 12107 } 12108 12109 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12110 0, LPFC_DRIVER_NAME, phba); 12111 if (rc) { 12112 pci_disable_msi(phba->pcidev); 12113 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12114 "0478 MSI request_irq failed (%d)\n", rc); 12115 } 12116 return rc; 12117 } 12118 12119 /** 12120 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12121 * @phba: pointer to lpfc hba data structure. 12122 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12123 * 12124 * This routine is invoked to enable device interrupt and associate driver's 12125 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12126 * spec. Depends on the interrupt mode configured to the driver, the driver 12127 * will try to fallback from the configured interrupt mode to an interrupt 12128 * mode which is supported by the platform, kernel, and device in the order 12129 * of: 12130 * MSI-X -> MSI -> IRQ. 12131 * 12132 * Return codes 12133 * 0 - successful 12134 * other values - error 12135 **/ 12136 static uint32_t 12137 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12138 { 12139 uint32_t intr_mode = LPFC_INTR_ERROR; 12140 int retval; 12141 12142 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12143 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12144 if (retval) 12145 return intr_mode; 12146 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12147 12148 if (cfg_mode == 2) { 12149 /* Now, try to enable MSI-X interrupt mode */ 12150 retval = lpfc_sli_enable_msix(phba); 12151 if (!retval) { 12152 /* Indicate initialization to MSI-X mode */ 12153 phba->intr_type = MSIX; 12154 intr_mode = 2; 12155 } 12156 } 12157 12158 /* Fallback to MSI if MSI-X initialization failed */ 12159 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12160 retval = lpfc_sli_enable_msi(phba); 12161 if (!retval) { 12162 /* Indicate initialization to MSI mode */ 12163 phba->intr_type = MSI; 12164 intr_mode = 1; 12165 } 12166 } 12167 12168 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12169 if (phba->intr_type == NONE) { 12170 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12171 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12172 if (!retval) { 12173 /* Indicate initialization to INTx mode */ 12174 phba->intr_type = INTx; 12175 intr_mode = 0; 12176 } 12177 } 12178 return intr_mode; 12179 } 12180 12181 /** 12182 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12183 * @phba: pointer to lpfc hba data structure. 12184 * 12185 * This routine is invoked to disable device interrupt and disassociate the 12186 * driver's interrupt handler(s) from interrupt vector(s) to device with 12187 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12188 * release the interrupt vector(s) for the message signaled interrupt. 12189 **/ 12190 static void 12191 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12192 { 12193 int nr_irqs, i; 12194 12195 if (phba->intr_type == MSIX) 12196 nr_irqs = LPFC_MSIX_VECTORS; 12197 else 12198 nr_irqs = 1; 12199 12200 for (i = 0; i < nr_irqs; i++) 12201 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12202 pci_free_irq_vectors(phba->pcidev); 12203 12204 /* Reset interrupt management states */ 12205 phba->intr_type = NONE; 12206 phba->sli.slistat.sli_intr = 0; 12207 } 12208 12209 /** 12210 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12211 * @phba: pointer to lpfc hba data structure. 12212 * @id: EQ vector index or Hardware Queue index 12213 * @match: LPFC_FIND_BY_EQ = match by EQ 12214 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12215 * Return the CPU that matches the selection criteria 12216 */ 12217 static uint16_t 12218 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12219 { 12220 struct lpfc_vector_map_info *cpup; 12221 int cpu; 12222 12223 /* Loop through all CPUs */ 12224 for_each_present_cpu(cpu) { 12225 cpup = &phba->sli4_hba.cpu_map[cpu]; 12226 12227 /* If we are matching by EQ, there may be multiple CPUs using 12228 * using the same vector, so select the one with 12229 * LPFC_CPU_FIRST_IRQ set. 12230 */ 12231 if ((match == LPFC_FIND_BY_EQ) && 12232 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12233 (cpup->eq == id)) 12234 return cpu; 12235 12236 /* If matching by HDWQ, select the first CPU that matches */ 12237 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12238 return cpu; 12239 } 12240 return 0; 12241 } 12242 12243 #ifdef CONFIG_X86 12244 /** 12245 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12246 * @phba: pointer to lpfc hba data structure. 12247 * @cpu: CPU map index 12248 * @phys_id: CPU package physical id 12249 * @core_id: CPU core id 12250 */ 12251 static int 12252 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12253 uint16_t phys_id, uint16_t core_id) 12254 { 12255 struct lpfc_vector_map_info *cpup; 12256 int idx; 12257 12258 for_each_present_cpu(idx) { 12259 cpup = &phba->sli4_hba.cpu_map[idx]; 12260 /* Does the cpup match the one we are looking for */ 12261 if ((cpup->phys_id == phys_id) && 12262 (cpup->core_id == core_id) && 12263 (cpu != idx)) 12264 return 1; 12265 } 12266 return 0; 12267 } 12268 #endif 12269 12270 /* 12271 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12272 * @phba: pointer to lpfc hba data structure. 12273 * @eqidx: index for eq and irq vector 12274 * @flag: flags to set for vector_map structure 12275 * @cpu: cpu used to index vector_map structure 12276 * 12277 * The routine assigns eq info into vector_map structure 12278 */ 12279 static inline void 12280 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12281 unsigned int cpu) 12282 { 12283 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12284 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12285 12286 cpup->eq = eqidx; 12287 cpup->flag |= flag; 12288 12289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12290 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12291 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12292 } 12293 12294 /** 12295 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12296 * @phba: pointer to lpfc hba data structure. 12297 * 12298 * The routine initializes the cpu_map array structure 12299 */ 12300 static void 12301 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12302 { 12303 struct lpfc_vector_map_info *cpup; 12304 struct lpfc_eq_intr_info *eqi; 12305 int cpu; 12306 12307 for_each_possible_cpu(cpu) { 12308 cpup = &phba->sli4_hba.cpu_map[cpu]; 12309 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12310 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12311 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12312 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12313 cpup->flag = 0; 12314 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12315 INIT_LIST_HEAD(&eqi->list); 12316 eqi->icnt = 0; 12317 } 12318 } 12319 12320 /** 12321 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12322 * @phba: pointer to lpfc hba data structure. 12323 * 12324 * The routine initializes the hba_eq_hdl array structure 12325 */ 12326 static void 12327 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12328 { 12329 struct lpfc_hba_eq_hdl *eqhdl; 12330 int i; 12331 12332 for (i = 0; i < phba->cfg_irq_chann; i++) { 12333 eqhdl = lpfc_get_eq_hdl(i); 12334 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 12335 eqhdl->phba = phba; 12336 } 12337 } 12338 12339 /** 12340 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12341 * @phba: pointer to lpfc hba data structure. 12342 * @vectors: number of msix vectors allocated. 12343 * 12344 * The routine will figure out the CPU affinity assignment for every 12345 * MSI-X vector allocated for the HBA. 12346 * In addition, the CPU to IO channel mapping will be calculated 12347 * and the phba->sli4_hba.cpu_map array will reflect this. 12348 */ 12349 static void 12350 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12351 { 12352 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12353 int max_phys_id, min_phys_id; 12354 int max_core_id, min_core_id; 12355 struct lpfc_vector_map_info *cpup; 12356 struct lpfc_vector_map_info *new_cpup; 12357 #ifdef CONFIG_X86 12358 struct cpuinfo_x86 *cpuinfo; 12359 #endif 12360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12361 struct lpfc_hdwq_stat *c_stat; 12362 #endif 12363 12364 max_phys_id = 0; 12365 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12366 max_core_id = 0; 12367 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12368 12369 /* Update CPU map with physical id and core id of each CPU */ 12370 for_each_present_cpu(cpu) { 12371 cpup = &phba->sli4_hba.cpu_map[cpu]; 12372 #ifdef CONFIG_X86 12373 cpuinfo = &cpu_data(cpu); 12374 cpup->phys_id = cpuinfo->phys_proc_id; 12375 cpup->core_id = cpuinfo->cpu_core_id; 12376 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12377 cpup->flag |= LPFC_CPU_MAP_HYPER; 12378 #else 12379 /* No distinction between CPUs for other platforms */ 12380 cpup->phys_id = 0; 12381 cpup->core_id = cpu; 12382 #endif 12383 12384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12385 "3328 CPU %d physid %d coreid %d flag x%x\n", 12386 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12387 12388 if (cpup->phys_id > max_phys_id) 12389 max_phys_id = cpup->phys_id; 12390 if (cpup->phys_id < min_phys_id) 12391 min_phys_id = cpup->phys_id; 12392 12393 if (cpup->core_id > max_core_id) 12394 max_core_id = cpup->core_id; 12395 if (cpup->core_id < min_core_id) 12396 min_core_id = cpup->core_id; 12397 } 12398 12399 /* After looking at each irq vector assigned to this pcidev, its 12400 * possible to see that not ALL CPUs have been accounted for. 12401 * Next we will set any unassigned (unaffinitized) cpu map 12402 * entries to a IRQ on the same phys_id. 12403 */ 12404 first_cpu = cpumask_first(cpu_present_mask); 12405 start_cpu = first_cpu; 12406 12407 for_each_present_cpu(cpu) { 12408 cpup = &phba->sli4_hba.cpu_map[cpu]; 12409 12410 /* Is this CPU entry unassigned */ 12411 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12412 /* Mark CPU as IRQ not assigned by the kernel */ 12413 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12414 12415 /* If so, find a new_cpup thats on the the SAME 12416 * phys_id as cpup. start_cpu will start where we 12417 * left off so all unassigned entries don't get assgined 12418 * the IRQ of the first entry. 12419 */ 12420 new_cpu = start_cpu; 12421 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12422 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12423 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12424 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12425 (new_cpup->phys_id == cpup->phys_id)) 12426 goto found_same; 12427 new_cpu = cpumask_next( 12428 new_cpu, cpu_present_mask); 12429 if (new_cpu == nr_cpumask_bits) 12430 new_cpu = first_cpu; 12431 } 12432 /* At this point, we leave the CPU as unassigned */ 12433 continue; 12434 found_same: 12435 /* We found a matching phys_id, so copy the IRQ info */ 12436 cpup->eq = new_cpup->eq; 12437 12438 /* Bump start_cpu to the next slot to minmize the 12439 * chance of having multiple unassigned CPU entries 12440 * selecting the same IRQ. 12441 */ 12442 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12443 if (start_cpu == nr_cpumask_bits) 12444 start_cpu = first_cpu; 12445 12446 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12447 "3337 Set Affinity: CPU %d " 12448 "eq %d from peer cpu %d same " 12449 "phys_id (%d)\n", 12450 cpu, cpup->eq, new_cpu, 12451 cpup->phys_id); 12452 } 12453 } 12454 12455 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12456 start_cpu = first_cpu; 12457 12458 for_each_present_cpu(cpu) { 12459 cpup = &phba->sli4_hba.cpu_map[cpu]; 12460 12461 /* Is this entry unassigned */ 12462 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12463 /* Mark it as IRQ not assigned by the kernel */ 12464 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12465 12466 /* If so, find a new_cpup thats on ANY phys_id 12467 * as the cpup. start_cpu will start where we 12468 * left off so all unassigned entries don't get 12469 * assigned the IRQ of the first entry. 12470 */ 12471 new_cpu = start_cpu; 12472 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12473 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12474 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12475 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12476 goto found_any; 12477 new_cpu = cpumask_next( 12478 new_cpu, cpu_present_mask); 12479 if (new_cpu == nr_cpumask_bits) 12480 new_cpu = first_cpu; 12481 } 12482 /* We should never leave an entry unassigned */ 12483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12484 "3339 Set Affinity: CPU %d " 12485 "eq %d UNASSIGNED\n", 12486 cpup->hdwq, cpup->eq); 12487 continue; 12488 found_any: 12489 /* We found an available entry, copy the IRQ info */ 12490 cpup->eq = new_cpup->eq; 12491 12492 /* Bump start_cpu to the next slot to minmize the 12493 * chance of having multiple unassigned CPU entries 12494 * selecting the same IRQ. 12495 */ 12496 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12497 if (start_cpu == nr_cpumask_bits) 12498 start_cpu = first_cpu; 12499 12500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12501 "3338 Set Affinity: CPU %d " 12502 "eq %d from peer cpu %d (%d/%d)\n", 12503 cpu, cpup->eq, new_cpu, 12504 new_cpup->phys_id, new_cpup->core_id); 12505 } 12506 } 12507 12508 /* Assign hdwq indices that are unique across all cpus in the map 12509 * that are also FIRST_CPUs. 12510 */ 12511 idx = 0; 12512 for_each_present_cpu(cpu) { 12513 cpup = &phba->sli4_hba.cpu_map[cpu]; 12514 12515 /* Only FIRST IRQs get a hdwq index assignment. */ 12516 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12517 continue; 12518 12519 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12520 cpup->hdwq = idx; 12521 idx++; 12522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12523 "3333 Set Affinity: CPU %d (phys %d core %d): " 12524 "hdwq %d eq %d flg x%x\n", 12525 cpu, cpup->phys_id, cpup->core_id, 12526 cpup->hdwq, cpup->eq, cpup->flag); 12527 } 12528 /* Associate a hdwq with each cpu_map entry 12529 * This will be 1 to 1 - hdwq to cpu, unless there are less 12530 * hardware queues then CPUs. For that case we will just round-robin 12531 * the available hardware queues as they get assigned to CPUs. 12532 * The next_idx is the idx from the FIRST_CPU loop above to account 12533 * for irq_chann < hdwq. The idx is used for round-robin assignments 12534 * and needs to start at 0. 12535 */ 12536 next_idx = idx; 12537 start_cpu = 0; 12538 idx = 0; 12539 for_each_present_cpu(cpu) { 12540 cpup = &phba->sli4_hba.cpu_map[cpu]; 12541 12542 /* FIRST cpus are already mapped. */ 12543 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12544 continue; 12545 12546 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12547 * of the unassigned cpus to the next idx so that all 12548 * hdw queues are fully utilized. 12549 */ 12550 if (next_idx < phba->cfg_hdw_queue) { 12551 cpup->hdwq = next_idx; 12552 next_idx++; 12553 continue; 12554 } 12555 12556 /* Not a First CPU and all hdw_queues are used. Reuse a 12557 * Hardware Queue for another CPU, so be smart about it 12558 * and pick one that has its IRQ/EQ mapped to the same phys_id 12559 * (CPU package) and core_id. 12560 */ 12561 new_cpu = start_cpu; 12562 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12563 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12564 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12565 new_cpup->phys_id == cpup->phys_id && 12566 new_cpup->core_id == cpup->core_id) { 12567 goto found_hdwq; 12568 } 12569 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12570 if (new_cpu == nr_cpumask_bits) 12571 new_cpu = first_cpu; 12572 } 12573 12574 /* If we can't match both phys_id and core_id, 12575 * settle for just a phys_id match. 12576 */ 12577 new_cpu = start_cpu; 12578 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12579 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12580 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12581 new_cpup->phys_id == cpup->phys_id) 12582 goto found_hdwq; 12583 12584 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 12585 if (new_cpu == nr_cpumask_bits) 12586 new_cpu = first_cpu; 12587 } 12588 12589 /* Otherwise just round robin on cfg_hdw_queue */ 12590 cpup->hdwq = idx % phba->cfg_hdw_queue; 12591 idx++; 12592 goto logit; 12593 found_hdwq: 12594 /* We found an available entry, copy the IRQ info */ 12595 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 12596 if (start_cpu == nr_cpumask_bits) 12597 start_cpu = first_cpu; 12598 cpup->hdwq = new_cpup->hdwq; 12599 logit: 12600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12601 "3335 Set Affinity: CPU %d (phys %d core %d): " 12602 "hdwq %d eq %d flg x%x\n", 12603 cpu, cpup->phys_id, cpup->core_id, 12604 cpup->hdwq, cpup->eq, cpup->flag); 12605 } 12606 12607 /* 12608 * Initialize the cpu_map slots for not-present cpus in case 12609 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12610 */ 12611 idx = 0; 12612 for_each_possible_cpu(cpu) { 12613 cpup = &phba->sli4_hba.cpu_map[cpu]; 12614 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12615 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12616 c_stat->hdwq_no = cpup->hdwq; 12617 #endif 12618 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12619 continue; 12620 12621 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12622 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12623 c_stat->hdwq_no = cpup->hdwq; 12624 #endif 12625 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12626 "3340 Set Affinity: not present " 12627 "CPU %d hdwq %d\n", 12628 cpu, cpup->hdwq); 12629 } 12630 12631 /* The cpu_map array will be used later during initialization 12632 * when EQ / CQ / WQs are allocated and configured. 12633 */ 12634 return; 12635 } 12636 12637 /** 12638 * lpfc_cpuhp_get_eq 12639 * 12640 * @phba: pointer to lpfc hba data structure. 12641 * @cpu: cpu going offline 12642 * @eqlist: eq list to append to 12643 */ 12644 static int 12645 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12646 struct list_head *eqlist) 12647 { 12648 const struct cpumask *maskp; 12649 struct lpfc_queue *eq; 12650 struct cpumask *tmp; 12651 u16 idx; 12652 12653 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12654 if (!tmp) 12655 return -ENOMEM; 12656 12657 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12658 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12659 if (!maskp) 12660 continue; 12661 /* 12662 * if irq is not affinitized to the cpu going 12663 * then we don't need to poll the eq attached 12664 * to it. 12665 */ 12666 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12667 continue; 12668 /* get the cpus that are online and are affini- 12669 * tized to this irq vector. If the count is 12670 * more than 1 then cpuhp is not going to shut- 12671 * down this vector. Since this cpu has not 12672 * gone offline yet, we need >1. 12673 */ 12674 cpumask_and(tmp, maskp, cpu_online_mask); 12675 if (cpumask_weight(tmp) > 1) 12676 continue; 12677 12678 /* Now that we have an irq to shutdown, get the eq 12679 * mapped to this irq. Note: multiple hdwq's in 12680 * the software can share an eq, but eventually 12681 * only eq will be mapped to this vector 12682 */ 12683 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12684 list_add(&eq->_poll_list, eqlist); 12685 } 12686 kfree(tmp); 12687 return 0; 12688 } 12689 12690 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12691 { 12692 if (phba->sli_rev != LPFC_SLI_REV4) 12693 return; 12694 12695 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12696 &phba->cpuhp); 12697 /* 12698 * unregistering the instance doesn't stop the polling 12699 * timer. Wait for the poll timer to retire. 12700 */ 12701 synchronize_rcu(); 12702 del_timer_sync(&phba->cpuhp_poll_timer); 12703 } 12704 12705 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12706 { 12707 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 12708 return; 12709 12710 __lpfc_cpuhp_remove(phba); 12711 } 12712 12713 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12714 { 12715 if (phba->sli_rev != LPFC_SLI_REV4) 12716 return; 12717 12718 rcu_read_lock(); 12719 12720 if (!list_empty(&phba->poll_list)) 12721 mod_timer(&phba->cpuhp_poll_timer, 12722 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12723 12724 rcu_read_unlock(); 12725 12726 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12727 &phba->cpuhp); 12728 } 12729 12730 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12731 { 12732 if (phba->pport->load_flag & FC_UNLOADING) { 12733 *retval = -EAGAIN; 12734 return true; 12735 } 12736 12737 if (phba->sli_rev != LPFC_SLI_REV4) { 12738 *retval = 0; 12739 return true; 12740 } 12741 12742 /* proceed with the hotplug */ 12743 return false; 12744 } 12745 12746 /** 12747 * lpfc_irq_set_aff - set IRQ affinity 12748 * @eqhdl: EQ handle 12749 * @cpu: cpu to set affinity 12750 * 12751 **/ 12752 static inline void 12753 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12754 { 12755 cpumask_clear(&eqhdl->aff_mask); 12756 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12757 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12758 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); 12759 } 12760 12761 /** 12762 * lpfc_irq_clear_aff - clear IRQ affinity 12763 * @eqhdl: EQ handle 12764 * 12765 **/ 12766 static inline void 12767 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12768 { 12769 cpumask_clear(&eqhdl->aff_mask); 12770 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12771 } 12772 12773 /** 12774 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12775 * @phba: pointer to HBA context object. 12776 * @cpu: cpu going offline/online 12777 * @offline: true, cpu is going offline. false, cpu is coming online. 12778 * 12779 * If cpu is going offline, we'll try our best effort to find the next 12780 * online cpu on the phba's original_mask and migrate all offlining IRQ 12781 * affinities. 12782 * 12783 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12784 * 12785 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12786 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12787 * 12788 **/ 12789 static void 12790 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12791 { 12792 struct lpfc_vector_map_info *cpup; 12793 struct cpumask *aff_mask; 12794 unsigned int cpu_select, cpu_next, idx; 12795 const struct cpumask *orig_mask; 12796 12797 if (phba->irq_chann_mode == NORMAL_MODE) 12798 return; 12799 12800 orig_mask = &phba->sli4_hba.irq_aff_mask; 12801 12802 if (!cpumask_test_cpu(cpu, orig_mask)) 12803 return; 12804 12805 cpup = &phba->sli4_hba.cpu_map[cpu]; 12806 12807 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12808 return; 12809 12810 if (offline) { 12811 /* Find next online CPU on original mask */ 12812 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12813 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12814 12815 /* Found a valid CPU */ 12816 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12817 /* Go through each eqhdl and ensure offlining 12818 * cpu aff_mask is migrated 12819 */ 12820 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12821 aff_mask = lpfc_get_aff_mask(idx); 12822 12823 /* Migrate affinity */ 12824 if (cpumask_test_cpu(cpu, aff_mask)) 12825 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12826 cpu_select); 12827 } 12828 } else { 12829 /* Rely on irqbalance if no online CPUs left on NUMA */ 12830 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12831 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12832 } 12833 } else { 12834 /* Migrate affinity back to this CPU */ 12835 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12836 } 12837 } 12838 12839 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12840 { 12841 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12842 struct lpfc_queue *eq, *next; 12843 LIST_HEAD(eqlist); 12844 int retval; 12845 12846 if (!phba) { 12847 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12848 return 0; 12849 } 12850 12851 if (__lpfc_cpuhp_checks(phba, &retval)) 12852 return retval; 12853 12854 lpfc_irq_rebalance(phba, cpu, true); 12855 12856 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12857 if (retval) 12858 return retval; 12859 12860 /* start polling on these eq's */ 12861 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12862 list_del_init(&eq->_poll_list); 12863 lpfc_sli4_start_polling(eq); 12864 } 12865 12866 return 0; 12867 } 12868 12869 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12870 { 12871 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12872 struct lpfc_queue *eq, *next; 12873 unsigned int n; 12874 int retval; 12875 12876 if (!phba) { 12877 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12878 return 0; 12879 } 12880 12881 if (__lpfc_cpuhp_checks(phba, &retval)) 12882 return retval; 12883 12884 lpfc_irq_rebalance(phba, cpu, false); 12885 12886 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12887 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12888 if (n == cpu) 12889 lpfc_sli4_stop_polling(eq); 12890 } 12891 12892 return 0; 12893 } 12894 12895 /** 12896 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12897 * @phba: pointer to lpfc hba data structure. 12898 * 12899 * This routine is invoked to enable the MSI-X interrupt vectors to device 12900 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12901 * to cpus on the system. 12902 * 12903 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12904 * the number of cpus on the same numa node as this adapter. The vectors are 12905 * allocated without requesting OS affinity mapping. A vector will be 12906 * allocated and assigned to each online and offline cpu. If the cpu is 12907 * online, then affinity will be set to that cpu. If the cpu is offline, then 12908 * affinity will be set to the nearest peer cpu within the numa node that is 12909 * online. If there are no online cpus within the numa node, affinity is not 12910 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12911 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12912 * configured. 12913 * 12914 * If numa mode is not enabled and there is more than 1 vector allocated, then 12915 * the driver relies on the managed irq interface where the OS assigns vector to 12916 * cpu affinity. The driver will then use that affinity mapping to setup its 12917 * cpu mapping table. 12918 * 12919 * Return codes 12920 * 0 - successful 12921 * other values - error 12922 **/ 12923 static int 12924 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 12925 { 12926 int vectors, rc, index; 12927 char *name; 12928 const struct cpumask *aff_mask = NULL; 12929 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 12930 struct lpfc_vector_map_info *cpup; 12931 struct lpfc_hba_eq_hdl *eqhdl; 12932 const struct cpumask *maskp; 12933 unsigned int flags = PCI_IRQ_MSIX; 12934 12935 /* Set up MSI-X multi-message vectors */ 12936 vectors = phba->cfg_irq_chann; 12937 12938 if (phba->irq_chann_mode != NORMAL_MODE) 12939 aff_mask = &phba->sli4_hba.irq_aff_mask; 12940 12941 if (aff_mask) { 12942 cpu_cnt = cpumask_weight(aff_mask); 12943 vectors = min(phba->cfg_irq_chann, cpu_cnt); 12944 12945 /* cpu: iterates over aff_mask including offline or online 12946 * cpu_select: iterates over online aff_mask to set affinity 12947 */ 12948 cpu = cpumask_first(aff_mask); 12949 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12950 } else { 12951 flags |= PCI_IRQ_AFFINITY; 12952 } 12953 12954 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 12955 if (rc < 0) { 12956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12957 "0484 PCI enable MSI-X failed (%d)\n", rc); 12958 goto vec_fail_out; 12959 } 12960 vectors = rc; 12961 12962 /* Assign MSI-X vectors to interrupt handlers */ 12963 for (index = 0; index < vectors; index++) { 12964 eqhdl = lpfc_get_eq_hdl(index); 12965 name = eqhdl->handler_name; 12966 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 12967 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 12968 LPFC_DRIVER_HANDLER_NAME"%d", index); 12969 12970 eqhdl->idx = index; 12971 rc = request_irq(pci_irq_vector(phba->pcidev, index), 12972 &lpfc_sli4_hba_intr_handler, 0, 12973 name, eqhdl); 12974 if (rc) { 12975 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12976 "0486 MSI-X fast-path (%d) " 12977 "request_irq failed (%d)\n", index, rc); 12978 goto cfg_fail_out; 12979 } 12980 12981 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 12982 12983 if (aff_mask) { 12984 /* If found a neighboring online cpu, set affinity */ 12985 if (cpu_select < nr_cpu_ids) 12986 lpfc_irq_set_aff(eqhdl, cpu_select); 12987 12988 /* Assign EQ to cpu_map */ 12989 lpfc_assign_eq_map_info(phba, index, 12990 LPFC_CPU_FIRST_IRQ, 12991 cpu); 12992 12993 /* Iterate to next offline or online cpu in aff_mask */ 12994 cpu = cpumask_next(cpu, aff_mask); 12995 12996 /* Find next online cpu in aff_mask to set affinity */ 12997 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 12998 } else if (vectors == 1) { 12999 cpu = cpumask_first(cpu_present_mask); 13000 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 13001 cpu); 13002 } else { 13003 maskp = pci_irq_get_affinity(phba->pcidev, index); 13004 13005 /* Loop through all CPUs associated with vector index */ 13006 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 13007 cpup = &phba->sli4_hba.cpu_map[cpu]; 13008 13009 /* If this is the first CPU thats assigned to 13010 * this vector, set LPFC_CPU_FIRST_IRQ. 13011 * 13012 * With certain platforms its possible that irq 13013 * vectors are affinitized to all the cpu's. 13014 * This can result in each cpu_map.eq to be set 13015 * to the last vector, resulting in overwrite 13016 * of all the previous cpu_map.eq. Ensure that 13017 * each vector receives a place in cpu_map. 13018 * Later call to lpfc_cpu_affinity_check will 13019 * ensure we are nicely balanced out. 13020 */ 13021 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 13022 continue; 13023 lpfc_assign_eq_map_info(phba, index, 13024 LPFC_CPU_FIRST_IRQ, 13025 cpu); 13026 break; 13027 } 13028 } 13029 } 13030 13031 if (vectors != phba->cfg_irq_chann) { 13032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13033 "3238 Reducing IO channels to match number of " 13034 "MSI-X vectors, requested %d got %d\n", 13035 phba->cfg_irq_chann, vectors); 13036 if (phba->cfg_irq_chann > vectors) 13037 phba->cfg_irq_chann = vectors; 13038 } 13039 13040 return rc; 13041 13042 cfg_fail_out: 13043 /* free the irq already requested */ 13044 for (--index; index >= 0; index--) { 13045 eqhdl = lpfc_get_eq_hdl(index); 13046 lpfc_irq_clear_aff(eqhdl); 13047 free_irq(eqhdl->irq, eqhdl); 13048 } 13049 13050 /* Unconfigure MSI-X capability structure */ 13051 pci_free_irq_vectors(phba->pcidev); 13052 13053 vec_fail_out: 13054 return rc; 13055 } 13056 13057 /** 13058 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13059 * @phba: pointer to lpfc hba data structure. 13060 * 13061 * This routine is invoked to enable the MSI interrupt mode to device with 13062 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13063 * called to enable the MSI vector. The device driver is responsible for 13064 * calling the request_irq() to register MSI vector with a interrupt the 13065 * handler, which is done in this function. 13066 * 13067 * Return codes 13068 * 0 - successful 13069 * other values - error 13070 **/ 13071 static int 13072 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13073 { 13074 int rc, index; 13075 unsigned int cpu; 13076 struct lpfc_hba_eq_hdl *eqhdl; 13077 13078 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13079 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13080 if (rc > 0) 13081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13082 "0487 PCI enable MSI mode success.\n"); 13083 else { 13084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13085 "0488 PCI enable MSI mode failed (%d)\n", rc); 13086 return rc ? rc : -1; 13087 } 13088 13089 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13090 0, LPFC_DRIVER_NAME, phba); 13091 if (rc) { 13092 pci_free_irq_vectors(phba->pcidev); 13093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13094 "0490 MSI request_irq failed (%d)\n", rc); 13095 return rc; 13096 } 13097 13098 eqhdl = lpfc_get_eq_hdl(0); 13099 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13100 13101 cpu = cpumask_first(cpu_present_mask); 13102 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13103 13104 for (index = 0; index < phba->cfg_irq_chann; index++) { 13105 eqhdl = lpfc_get_eq_hdl(index); 13106 eqhdl->idx = index; 13107 } 13108 13109 return 0; 13110 } 13111 13112 /** 13113 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13114 * @phba: pointer to lpfc hba data structure. 13115 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13116 * 13117 * This routine is invoked to enable device interrupt and associate driver's 13118 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13119 * interface spec. Depends on the interrupt mode configured to the driver, 13120 * the driver will try to fallback from the configured interrupt mode to an 13121 * interrupt mode which is supported by the platform, kernel, and device in 13122 * the order of: 13123 * MSI-X -> MSI -> IRQ. 13124 * 13125 * Return codes 13126 * 0 - successful 13127 * other values - error 13128 **/ 13129 static uint32_t 13130 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13131 { 13132 uint32_t intr_mode = LPFC_INTR_ERROR; 13133 int retval, idx; 13134 13135 if (cfg_mode == 2) { 13136 /* Preparation before conf_msi mbox cmd */ 13137 retval = 0; 13138 if (!retval) { 13139 /* Now, try to enable MSI-X interrupt mode */ 13140 retval = lpfc_sli4_enable_msix(phba); 13141 if (!retval) { 13142 /* Indicate initialization to MSI-X mode */ 13143 phba->intr_type = MSIX; 13144 intr_mode = 2; 13145 } 13146 } 13147 } 13148 13149 /* Fallback to MSI if MSI-X initialization failed */ 13150 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13151 retval = lpfc_sli4_enable_msi(phba); 13152 if (!retval) { 13153 /* Indicate initialization to MSI mode */ 13154 phba->intr_type = MSI; 13155 intr_mode = 1; 13156 } 13157 } 13158 13159 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13160 if (phba->intr_type == NONE) { 13161 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13162 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13163 if (!retval) { 13164 struct lpfc_hba_eq_hdl *eqhdl; 13165 unsigned int cpu; 13166 13167 /* Indicate initialization to INTx mode */ 13168 phba->intr_type = INTx; 13169 intr_mode = 0; 13170 13171 eqhdl = lpfc_get_eq_hdl(0); 13172 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 13173 13174 cpu = cpumask_first(cpu_present_mask); 13175 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13176 cpu); 13177 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13178 eqhdl = lpfc_get_eq_hdl(idx); 13179 eqhdl->idx = idx; 13180 } 13181 } 13182 } 13183 return intr_mode; 13184 } 13185 13186 /** 13187 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13188 * @phba: pointer to lpfc hba data structure. 13189 * 13190 * This routine is invoked to disable device interrupt and disassociate 13191 * the driver's interrupt handler(s) from interrupt vector(s) to device 13192 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13193 * will release the interrupt vector(s) for the message signaled interrupt. 13194 **/ 13195 static void 13196 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13197 { 13198 /* Disable the currently initialized interrupt mode */ 13199 if (phba->intr_type == MSIX) { 13200 int index; 13201 struct lpfc_hba_eq_hdl *eqhdl; 13202 13203 /* Free up MSI-X multi-message vectors */ 13204 for (index = 0; index < phba->cfg_irq_chann; index++) { 13205 eqhdl = lpfc_get_eq_hdl(index); 13206 lpfc_irq_clear_aff(eqhdl); 13207 free_irq(eqhdl->irq, eqhdl); 13208 } 13209 } else { 13210 free_irq(phba->pcidev->irq, phba); 13211 } 13212 13213 pci_free_irq_vectors(phba->pcidev); 13214 13215 /* Reset interrupt management states */ 13216 phba->intr_type = NONE; 13217 phba->sli.slistat.sli_intr = 0; 13218 } 13219 13220 /** 13221 * lpfc_unset_hba - Unset SLI3 hba device initialization 13222 * @phba: pointer to lpfc hba data structure. 13223 * 13224 * This routine is invoked to unset the HBA device initialization steps to 13225 * a device with SLI-3 interface spec. 13226 **/ 13227 static void 13228 lpfc_unset_hba(struct lpfc_hba *phba) 13229 { 13230 struct lpfc_vport *vport = phba->pport; 13231 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13232 13233 spin_lock_irq(shost->host_lock); 13234 vport->load_flag |= FC_UNLOADING; 13235 spin_unlock_irq(shost->host_lock); 13236 13237 kfree(phba->vpi_bmask); 13238 kfree(phba->vpi_ids); 13239 13240 lpfc_stop_hba_timers(phba); 13241 13242 phba->pport->work_port_events = 0; 13243 13244 lpfc_sli_hba_down(phba); 13245 13246 lpfc_sli_brdrestart(phba); 13247 13248 lpfc_sli_disable_intr(phba); 13249 13250 return; 13251 } 13252 13253 /** 13254 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13255 * @phba: Pointer to HBA context object. 13256 * 13257 * This function is called in the SLI4 code path to wait for completion 13258 * of device's XRIs exchange busy. It will check the XRI exchange busy 13259 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13260 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13261 * I/Os every 30 seconds, log error message, and wait forever. Only when 13262 * all XRI exchange busy complete, the driver unload shall proceed with 13263 * invoking the function reset ioctl mailbox command to the CNA and the 13264 * the rest of the driver unload resource release. 13265 **/ 13266 static void 13267 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13268 { 13269 struct lpfc_sli4_hdw_queue *qp; 13270 int idx, ccnt; 13271 int wait_time = 0; 13272 int io_xri_cmpl = 1; 13273 int nvmet_xri_cmpl = 1; 13274 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13275 13276 /* Driver just aborted IOs during the hba_unset process. Pause 13277 * here to give the HBA time to complete the IO and get entries 13278 * into the abts lists. 13279 */ 13280 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13281 13282 /* Wait for NVME pending IO to flush back to transport. */ 13283 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13284 lpfc_nvme_wait_for_io_drain(phba); 13285 13286 ccnt = 0; 13287 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13288 qp = &phba->sli4_hba.hdwq[idx]; 13289 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13290 if (!io_xri_cmpl) /* if list is NOT empty */ 13291 ccnt++; 13292 } 13293 if (ccnt) 13294 io_xri_cmpl = 0; 13295 13296 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13297 nvmet_xri_cmpl = 13298 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13299 } 13300 13301 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13302 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13303 if (!nvmet_xri_cmpl) 13304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13305 "6424 NVMET XRI exchange busy " 13306 "wait time: %d seconds.\n", 13307 wait_time/1000); 13308 if (!io_xri_cmpl) 13309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13310 "6100 IO XRI exchange busy " 13311 "wait time: %d seconds.\n", 13312 wait_time/1000); 13313 if (!els_xri_cmpl) 13314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13315 "2878 ELS XRI exchange busy " 13316 "wait time: %d seconds.\n", 13317 wait_time/1000); 13318 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13319 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13320 } else { 13321 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13322 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13323 } 13324 13325 ccnt = 0; 13326 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13327 qp = &phba->sli4_hba.hdwq[idx]; 13328 io_xri_cmpl = list_empty( 13329 &qp->lpfc_abts_io_buf_list); 13330 if (!io_xri_cmpl) /* if list is NOT empty */ 13331 ccnt++; 13332 } 13333 if (ccnt) 13334 io_xri_cmpl = 0; 13335 13336 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13337 nvmet_xri_cmpl = list_empty( 13338 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13339 } 13340 els_xri_cmpl = 13341 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13342 13343 } 13344 } 13345 13346 /** 13347 * lpfc_sli4_hba_unset - Unset the fcoe hba 13348 * @phba: Pointer to HBA context object. 13349 * 13350 * This function is called in the SLI4 code path to reset the HBA's FCoE 13351 * function. The caller is not required to hold any lock. This routine 13352 * issues PCI function reset mailbox command to reset the FCoE function. 13353 * At the end of the function, it calls lpfc_hba_down_post function to 13354 * free any pending commands. 13355 **/ 13356 static void 13357 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13358 { 13359 int wait_cnt = 0; 13360 LPFC_MBOXQ_t *mboxq; 13361 struct pci_dev *pdev = phba->pcidev; 13362 13363 lpfc_stop_hba_timers(phba); 13364 hrtimer_cancel(&phba->cmf_timer); 13365 13366 if (phba->pport) 13367 phba->sli4_hba.intr_enable = 0; 13368 13369 /* 13370 * Gracefully wait out the potential current outstanding asynchronous 13371 * mailbox command. 13372 */ 13373 13374 /* First, block any pending async mailbox command from posted */ 13375 spin_lock_irq(&phba->hbalock); 13376 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13377 spin_unlock_irq(&phba->hbalock); 13378 /* Now, trying to wait it out if we can */ 13379 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13380 msleep(10); 13381 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13382 break; 13383 } 13384 /* Forcefully release the outstanding mailbox command if timed out */ 13385 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13386 spin_lock_irq(&phba->hbalock); 13387 mboxq = phba->sli.mbox_active; 13388 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13389 __lpfc_mbox_cmpl_put(phba, mboxq); 13390 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13391 phba->sli.mbox_active = NULL; 13392 spin_unlock_irq(&phba->hbalock); 13393 } 13394 13395 /* Abort all iocbs associated with the hba */ 13396 lpfc_sli_hba_iocb_abort(phba); 13397 13398 if (!pci_channel_offline(phba->pcidev)) 13399 /* Wait for completion of device XRI exchange busy */ 13400 lpfc_sli4_xri_exchange_busy_wait(phba); 13401 13402 /* per-phba callback de-registration for hotplug event */ 13403 if (phba->pport) 13404 lpfc_cpuhp_remove(phba); 13405 13406 /* Disable PCI subsystem interrupt */ 13407 lpfc_sli4_disable_intr(phba); 13408 13409 /* Disable SR-IOV if enabled */ 13410 if (phba->cfg_sriov_nr_virtfn) 13411 pci_disable_sriov(pdev); 13412 13413 /* Stop kthread signal shall trigger work_done one more time */ 13414 kthread_stop(phba->worker_thread); 13415 13416 /* Disable FW logging to host memory */ 13417 lpfc_ras_stop_fwlog(phba); 13418 13419 /* Reset SLI4 HBA FCoE function */ 13420 lpfc_pci_function_reset(phba); 13421 13422 /* release all queue allocated resources. */ 13423 lpfc_sli4_queue_destroy(phba); 13424 13425 /* Free RAS DMA memory */ 13426 if (phba->ras_fwlog.ras_enabled) 13427 lpfc_sli4_ras_dma_free(phba); 13428 13429 /* Stop the SLI4 device port */ 13430 if (phba->pport) 13431 phba->pport->work_port_events = 0; 13432 } 13433 13434 static uint32_t 13435 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13436 { 13437 uint32_t msb = 0; 13438 uint32_t bit; 13439 13440 for (bit = 0; bit < 8; bit++) { 13441 msb = (crc >> 31) & 1; 13442 crc <<= 1; 13443 13444 if (msb ^ (byte & 1)) { 13445 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13446 crc |= 1; 13447 } 13448 byte >>= 1; 13449 } 13450 return crc; 13451 } 13452 13453 static uint32_t 13454 lpfc_cgn_reverse_bits(uint32_t wd) 13455 { 13456 uint32_t result = 0; 13457 uint32_t i; 13458 13459 for (i = 0; i < 32; i++) { 13460 result <<= 1; 13461 result |= (1 & (wd >> i)); 13462 } 13463 return result; 13464 } 13465 13466 /* 13467 * The routine corresponds with the algorithm the HBA firmware 13468 * uses to validate the data integrity. 13469 */ 13470 uint32_t 13471 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13472 { 13473 uint32_t i; 13474 uint32_t result; 13475 uint8_t *data = (uint8_t *)ptr; 13476 13477 for (i = 0; i < byteLen; ++i) 13478 crc = lpfc_cgn_crc32(crc, data[i]); 13479 13480 result = ~lpfc_cgn_reverse_bits(crc); 13481 return result; 13482 } 13483 13484 void 13485 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13486 { 13487 struct lpfc_cgn_info *cp; 13488 struct timespec64 cmpl_time; 13489 struct tm broken; 13490 uint16_t size; 13491 uint32_t crc; 13492 13493 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13494 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13495 13496 if (!phba->cgn_i) 13497 return; 13498 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13499 13500 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13501 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13502 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13503 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13504 13505 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13506 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13507 atomic64_set(&phba->cgn_latency_evt, 0); 13508 phba->cgn_evt_minute = 0; 13509 phba->hba_flag &= ~HBA_CGN_DAY_WRAP; 13510 13511 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); 13512 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13513 cp->cgn_info_version = LPFC_CGN_INFO_V3; 13514 13515 /* cgn parameters */ 13516 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13517 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13518 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13519 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13520 13521 ktime_get_real_ts64(&cmpl_time); 13522 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13523 13524 cp->cgn_info_month = broken.tm_mon + 1; 13525 cp->cgn_info_day = broken.tm_mday; 13526 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ 13527 cp->cgn_info_hour = broken.tm_hour; 13528 cp->cgn_info_minute = broken.tm_min; 13529 cp->cgn_info_second = broken.tm_sec; 13530 13531 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13532 "2643 CGNInfo Init: Start Time " 13533 "%d/%d/%d %d:%d:%d\n", 13534 cp->cgn_info_day, cp->cgn_info_month, 13535 cp->cgn_info_year, cp->cgn_info_hour, 13536 cp->cgn_info_minute, cp->cgn_info_second); 13537 13538 /* Fill in default LUN qdepth */ 13539 if (phba->pport) { 13540 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13541 cp->cgn_lunq = cpu_to_le16(size); 13542 } 13543 13544 /* last used Index initialized to 0xff already */ 13545 13546 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13547 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13548 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13549 cp->cgn_info_crc = cpu_to_le32(crc); 13550 13551 phba->cgn_evt_timestamp = jiffies + 13552 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13553 } 13554 13555 void 13556 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13557 { 13558 struct lpfc_cgn_info *cp; 13559 struct timespec64 cmpl_time; 13560 struct tm broken; 13561 uint32_t crc; 13562 13563 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13564 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13565 13566 if (!phba->cgn_i) 13567 return; 13568 13569 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13570 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13571 13572 ktime_get_real_ts64(&cmpl_time); 13573 time64_to_tm(cmpl_time.tv_sec, 0, &broken); 13574 13575 cp->cgn_stat_month = broken.tm_mon + 1; 13576 cp->cgn_stat_day = broken.tm_mday; 13577 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ 13578 cp->cgn_stat_hour = broken.tm_hour; 13579 cp->cgn_stat_minute = broken.tm_min; 13580 13581 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 13582 "2647 CGNstat Init: Start Time " 13583 "%d/%d/%d %d:%d\n", 13584 cp->cgn_stat_day, cp->cgn_stat_month, 13585 cp->cgn_stat_year, cp->cgn_stat_hour, 13586 cp->cgn_stat_minute); 13587 13588 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13589 cp->cgn_info_crc = cpu_to_le32(crc); 13590 } 13591 13592 /** 13593 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13594 * @phba: Pointer to hba context object. 13595 * @reg: flag to determine register or unregister. 13596 */ 13597 static int 13598 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13599 { 13600 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13601 union lpfc_sli4_cfg_shdr *shdr; 13602 uint32_t shdr_status, shdr_add_status; 13603 LPFC_MBOXQ_t *mboxq; 13604 int length, rc; 13605 13606 if (!phba->cgn_i) 13607 return -ENXIO; 13608 13609 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13610 if (!mboxq) { 13611 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13612 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13613 "HBA state x%x reg %d\n", 13614 phba->pport->port_state, reg); 13615 return -ENOMEM; 13616 } 13617 13618 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13619 sizeof(struct lpfc_sli4_cfg_mhdr)); 13620 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13621 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13622 LPFC_SLI4_MBX_EMBED); 13623 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13624 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13625 if (reg > 0) 13626 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13627 else 13628 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13629 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13630 reg_congestion_buf->addr_lo = 13631 putPaddrLow(phba->cgn_i->phys); 13632 reg_congestion_buf->addr_hi = 13633 putPaddrHigh(phba->cgn_i->phys); 13634 13635 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13636 shdr = (union lpfc_sli4_cfg_shdr *) 13637 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13638 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13639 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13640 &shdr->response); 13641 mempool_free(mboxq, phba->mbox_mem_pool); 13642 if (shdr_status || shdr_add_status || rc) { 13643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13644 "2642 REG_CONGESTION_BUF mailbox " 13645 "failed with status x%x add_status x%x," 13646 " mbx status x%x reg %d\n", 13647 shdr_status, shdr_add_status, rc, reg); 13648 return -ENXIO; 13649 } 13650 return 0; 13651 } 13652 13653 int 13654 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13655 { 13656 lpfc_cmf_stop(phba); 13657 return __lpfc_reg_congestion_buf(phba, 0); 13658 } 13659 13660 int 13661 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13662 { 13663 return __lpfc_reg_congestion_buf(phba, 1); 13664 } 13665 13666 /** 13667 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13668 * @phba: Pointer to HBA context object. 13669 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13670 * 13671 * This function is called in the SLI4 code path to read the port's 13672 * sli4 capabilities. 13673 * 13674 * This function may be be called from any context that can block-wait 13675 * for the completion. The expectation is that this routine is called 13676 * typically from probe_one or from the online routine. 13677 **/ 13678 int 13679 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13680 { 13681 int rc; 13682 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13683 struct lpfc_pc_sli4_params *sli4_params; 13684 uint32_t mbox_tmo; 13685 int length; 13686 bool exp_wqcq_pages = true; 13687 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13688 13689 /* 13690 * By default, the driver assumes the SLI4 port requires RPI 13691 * header postings. The SLI4_PARAM response will correct this 13692 * assumption. 13693 */ 13694 phba->sli4_hba.rpi_hdrs_in_use = 1; 13695 13696 /* Read the port's SLI4 Config Parameters */ 13697 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13698 sizeof(struct lpfc_sli4_cfg_mhdr)); 13699 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13700 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13701 length, LPFC_SLI4_MBX_EMBED); 13702 if (!phba->sli4_hba.intr_enable) 13703 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13704 else { 13705 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13706 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13707 } 13708 if (unlikely(rc)) 13709 return rc; 13710 sli4_params = &phba->sli4_hba.pc_sli4_params; 13711 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13712 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13713 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13714 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13715 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13716 mbx_sli4_parameters); 13717 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13718 mbx_sli4_parameters); 13719 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13720 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13721 else 13722 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13723 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13724 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13725 mbx_sli4_parameters); 13726 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13727 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13728 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13729 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13730 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13731 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13732 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13733 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13734 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13735 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13736 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13737 mbx_sli4_parameters); 13738 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13739 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13740 mbx_sli4_parameters); 13741 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13742 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13743 13744 /* Check for Extended Pre-Registered SGL support */ 13745 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13746 13747 /* Check for firmware nvme support */ 13748 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13749 bf_get(cfg_xib, mbx_sli4_parameters)); 13750 13751 if (rc) { 13752 /* Save this to indicate the Firmware supports NVME */ 13753 sli4_params->nvme = 1; 13754 13755 /* Firmware NVME support, check driver FC4 NVME support */ 13756 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13758 "6133 Disabling NVME support: " 13759 "FC4 type not supported: x%x\n", 13760 phba->cfg_enable_fc4_type); 13761 goto fcponly; 13762 } 13763 } else { 13764 /* No firmware NVME support, check driver FC4 NVME support */ 13765 sli4_params->nvme = 0; 13766 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13768 "6101 Disabling NVME support: Not " 13769 "supported by firmware (%d %d) x%x\n", 13770 bf_get(cfg_nvme, mbx_sli4_parameters), 13771 bf_get(cfg_xib, mbx_sli4_parameters), 13772 phba->cfg_enable_fc4_type); 13773 fcponly: 13774 phba->nvmet_support = 0; 13775 phba->cfg_nvmet_mrq = 0; 13776 phba->cfg_nvme_seg_cnt = 0; 13777 13778 /* If no FC4 type support, move to just SCSI support */ 13779 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13780 return -ENODEV; 13781 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13782 } 13783 } 13784 13785 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13786 * accommodate 512K and 1M IOs in a single nvme buf. 13787 */ 13788 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13789 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13790 13791 /* Enable embedded Payload BDE if support is indicated */ 13792 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13793 phba->cfg_enable_pbde = 1; 13794 else 13795 phba->cfg_enable_pbde = 0; 13796 13797 /* 13798 * To support Suppress Response feature we must satisfy 3 conditions. 13799 * lpfc_suppress_rsp module parameter must be set (default). 13800 * In SLI4-Parameters Descriptor: 13801 * Extended Inline Buffers (XIB) must be supported. 13802 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13803 * (double negative). 13804 */ 13805 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13806 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13807 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13808 else 13809 phba->cfg_suppress_rsp = 0; 13810 13811 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13812 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13813 13814 /* Make sure that sge_supp_len can be handled by the driver */ 13815 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13816 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13817 13818 /* 13819 * Check whether the adapter supports an embedded copy of the 13820 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13821 * to use this option, 128-byte WQEs must be used. 13822 */ 13823 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13824 phba->fcp_embed_io = 1; 13825 else 13826 phba->fcp_embed_io = 0; 13827 13828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13829 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13830 bf_get(cfg_xib, mbx_sli4_parameters), 13831 phba->cfg_enable_pbde, 13832 phba->fcp_embed_io, sli4_params->nvme, 13833 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13834 13835 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13836 LPFC_SLI_INTF_IF_TYPE_2) && 13837 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13838 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13839 exp_wqcq_pages = false; 13840 13841 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13842 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13843 exp_wqcq_pages && 13844 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13845 phba->enab_exp_wqcq_pages = 1; 13846 else 13847 phba->enab_exp_wqcq_pages = 0; 13848 /* 13849 * Check if the SLI port supports MDS Diagnostics 13850 */ 13851 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13852 phba->mds_diags_support = 1; 13853 else 13854 phba->mds_diags_support = 0; 13855 13856 /* 13857 * Check if the SLI port supports NSLER 13858 */ 13859 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13860 phba->nsler = 1; 13861 else 13862 phba->nsler = 0; 13863 13864 return 0; 13865 } 13866 13867 /** 13868 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13869 * @pdev: pointer to PCI device 13870 * @pid: pointer to PCI device identifier 13871 * 13872 * This routine is to be called to attach a device with SLI-3 interface spec 13873 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13874 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13875 * information of the device and driver to see if the driver state that it can 13876 * support this kind of device. If the match is successful, the driver core 13877 * invokes this routine. If this routine determines it can claim the HBA, it 13878 * does all the initialization that it needs to do to handle the HBA properly. 13879 * 13880 * Return code 13881 * 0 - driver can claim the device 13882 * negative value - driver can not claim the device 13883 **/ 13884 static int 13885 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13886 { 13887 struct lpfc_hba *phba; 13888 struct lpfc_vport *vport = NULL; 13889 struct Scsi_Host *shost = NULL; 13890 int error; 13891 uint32_t cfg_mode, intr_mode; 13892 13893 /* Allocate memory for HBA structure */ 13894 phba = lpfc_hba_alloc(pdev); 13895 if (!phba) 13896 return -ENOMEM; 13897 13898 /* Perform generic PCI device enabling operation */ 13899 error = lpfc_enable_pci_dev(phba); 13900 if (error) 13901 goto out_free_phba; 13902 13903 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13904 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13905 if (error) 13906 goto out_disable_pci_dev; 13907 13908 /* Set up SLI-3 specific device PCI memory space */ 13909 error = lpfc_sli_pci_mem_setup(phba); 13910 if (error) { 13911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13912 "1402 Failed to set up pci memory space.\n"); 13913 goto out_disable_pci_dev; 13914 } 13915 13916 /* Set up SLI-3 specific device driver resources */ 13917 error = lpfc_sli_driver_resource_setup(phba); 13918 if (error) { 13919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13920 "1404 Failed to set up driver resource.\n"); 13921 goto out_unset_pci_mem_s3; 13922 } 13923 13924 /* Initialize and populate the iocb list per host */ 13925 13926 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13927 if (error) { 13928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13929 "1405 Failed to initialize iocb list.\n"); 13930 goto out_unset_driver_resource_s3; 13931 } 13932 13933 /* Set up common device driver resources */ 13934 error = lpfc_setup_driver_resource_phase2(phba); 13935 if (error) { 13936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13937 "1406 Failed to set up driver resource.\n"); 13938 goto out_free_iocb_list; 13939 } 13940 13941 /* Get the default values for Model Name and Description */ 13942 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13943 13944 /* Create SCSI host to the physical port */ 13945 error = lpfc_create_shost(phba); 13946 if (error) { 13947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13948 "1407 Failed to create scsi host.\n"); 13949 goto out_unset_driver_resource; 13950 } 13951 13952 /* Configure sysfs attributes */ 13953 vport = phba->pport; 13954 error = lpfc_alloc_sysfs_attr(vport); 13955 if (error) { 13956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13957 "1476 Failed to allocate sysfs attr\n"); 13958 goto out_destroy_shost; 13959 } 13960 13961 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13962 /* Now, trying to enable interrupt and bring up the device */ 13963 cfg_mode = phba->cfg_use_msi; 13964 while (true) { 13965 /* Put device to a known state before enabling interrupt */ 13966 lpfc_stop_port(phba); 13967 /* Configure and enable interrupt */ 13968 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 13969 if (intr_mode == LPFC_INTR_ERROR) { 13970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13971 "0431 Failed to enable interrupt.\n"); 13972 error = -ENODEV; 13973 goto out_free_sysfs_attr; 13974 } 13975 /* SLI-3 HBA setup */ 13976 if (lpfc_sli_hba_setup(phba)) { 13977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13978 "1477 Failed to set up hba\n"); 13979 error = -ENODEV; 13980 goto out_remove_device; 13981 } 13982 13983 /* Wait 50ms for the interrupts of previous mailbox commands */ 13984 msleep(50); 13985 /* Check active interrupts on message signaled interrupts */ 13986 if (intr_mode == 0 || 13987 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 13988 /* Log the current active interrupt mode */ 13989 phba->intr_mode = intr_mode; 13990 lpfc_log_intr_mode(phba, intr_mode); 13991 break; 13992 } else { 13993 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13994 "0447 Configure interrupt mode (%d) " 13995 "failed active interrupt test.\n", 13996 intr_mode); 13997 /* Disable the current interrupt mode */ 13998 lpfc_sli_disable_intr(phba); 13999 /* Try next level of interrupt mode */ 14000 cfg_mode = --intr_mode; 14001 } 14002 } 14003 14004 /* Perform post initialization setup */ 14005 lpfc_post_init_setup(phba); 14006 14007 /* Check if there are static vports to be created. */ 14008 lpfc_create_static_vport(phba); 14009 14010 return 0; 14011 14012 out_remove_device: 14013 lpfc_unset_hba(phba); 14014 out_free_sysfs_attr: 14015 lpfc_free_sysfs_attr(vport); 14016 out_destroy_shost: 14017 lpfc_destroy_shost(phba); 14018 out_unset_driver_resource: 14019 lpfc_unset_driver_resource_phase2(phba); 14020 out_free_iocb_list: 14021 lpfc_free_iocb_list(phba); 14022 out_unset_driver_resource_s3: 14023 lpfc_sli_driver_resource_unset(phba); 14024 out_unset_pci_mem_s3: 14025 lpfc_sli_pci_mem_unset(phba); 14026 out_disable_pci_dev: 14027 lpfc_disable_pci_dev(phba); 14028 if (shost) 14029 scsi_host_put(shost); 14030 out_free_phba: 14031 lpfc_hba_free(phba); 14032 return error; 14033 } 14034 14035 /** 14036 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 14037 * @pdev: pointer to PCI device 14038 * 14039 * This routine is to be called to disattach a device with SLI-3 interface 14040 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 14041 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14042 * device to be removed from the PCI subsystem properly. 14043 **/ 14044 static void 14045 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14046 { 14047 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14048 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14049 struct lpfc_vport **vports; 14050 struct lpfc_hba *phba = vport->phba; 14051 int i; 14052 14053 spin_lock_irq(&phba->hbalock); 14054 vport->load_flag |= FC_UNLOADING; 14055 spin_unlock_irq(&phba->hbalock); 14056 14057 lpfc_free_sysfs_attr(vport); 14058 14059 /* Release all the vports against this physical port */ 14060 vports = lpfc_create_vport_work_array(phba); 14061 if (vports != NULL) 14062 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14063 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14064 continue; 14065 fc_vport_terminate(vports[i]->fc_vport); 14066 } 14067 lpfc_destroy_vport_work_array(phba, vports); 14068 14069 /* Remove FC host with the physical port */ 14070 fc_remove_host(shost); 14071 scsi_remove_host(shost); 14072 14073 /* Clean up all nodes, mailboxes and IOs. */ 14074 lpfc_cleanup(vport); 14075 14076 /* 14077 * Bring down the SLI Layer. This step disable all interrupts, 14078 * clears the rings, discards all mailbox commands, and resets 14079 * the HBA. 14080 */ 14081 14082 /* HBA interrupt will be disabled after this call */ 14083 lpfc_sli_hba_down(phba); 14084 /* Stop kthread signal shall trigger work_done one more time */ 14085 kthread_stop(phba->worker_thread); 14086 /* Final cleanup of txcmplq and reset the HBA */ 14087 lpfc_sli_brdrestart(phba); 14088 14089 kfree(phba->vpi_bmask); 14090 kfree(phba->vpi_ids); 14091 14092 lpfc_stop_hba_timers(phba); 14093 spin_lock_irq(&phba->port_list_lock); 14094 list_del_init(&vport->listentry); 14095 spin_unlock_irq(&phba->port_list_lock); 14096 14097 lpfc_debugfs_terminate(vport); 14098 14099 /* Disable SR-IOV if enabled */ 14100 if (phba->cfg_sriov_nr_virtfn) 14101 pci_disable_sriov(pdev); 14102 14103 /* Disable interrupt */ 14104 lpfc_sli_disable_intr(phba); 14105 14106 scsi_host_put(shost); 14107 14108 /* 14109 * Call scsi_free before mem_free since scsi bufs are released to their 14110 * corresponding pools here. 14111 */ 14112 lpfc_scsi_free(phba); 14113 lpfc_free_iocb_list(phba); 14114 14115 lpfc_mem_free_all(phba); 14116 14117 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14118 phba->hbqslimp.virt, phba->hbqslimp.phys); 14119 14120 /* Free resources associated with SLI2 interface */ 14121 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14122 phba->slim2p.virt, phba->slim2p.phys); 14123 14124 /* unmap adapter SLIM and Control Registers */ 14125 iounmap(phba->ctrl_regs_memmap_p); 14126 iounmap(phba->slim_memmap_p); 14127 14128 lpfc_hba_free(phba); 14129 14130 pci_release_mem_regions(pdev); 14131 pci_disable_device(pdev); 14132 } 14133 14134 /** 14135 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14136 * @dev_d: pointer to device 14137 * 14138 * This routine is to be called from the kernel's PCI subsystem to support 14139 * system Power Management (PM) to device with SLI-3 interface spec. When 14140 * PM invokes this method, it quiesces the device by stopping the driver's 14141 * worker thread for the device, turning off device's interrupt and DMA, 14142 * and bring the device offline. Note that as the driver implements the 14143 * minimum PM requirements to a power-aware driver's PM support for the 14144 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14145 * to the suspend() method call will be treated as SUSPEND and the driver will 14146 * fully reinitialize its device during resume() method call, the driver will 14147 * set device to PCI_D3hot state in PCI config space instead of setting it 14148 * according to the @msg provided by the PM. 14149 * 14150 * Return code 14151 * 0 - driver suspended the device 14152 * Error otherwise 14153 **/ 14154 static int __maybe_unused 14155 lpfc_pci_suspend_one_s3(struct device *dev_d) 14156 { 14157 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14159 14160 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14161 "0473 PCI device Power Management suspend.\n"); 14162 14163 /* Bring down the device */ 14164 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14165 lpfc_offline(phba); 14166 kthread_stop(phba->worker_thread); 14167 14168 /* Disable interrupt from device */ 14169 lpfc_sli_disable_intr(phba); 14170 14171 return 0; 14172 } 14173 14174 /** 14175 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14176 * @dev_d: pointer to device 14177 * 14178 * This routine is to be called from the kernel's PCI subsystem to support 14179 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14180 * invokes this method, it restores the device's PCI config space state and 14181 * fully reinitializes the device and brings it online. Note that as the 14182 * driver implements the minimum PM requirements to a power-aware driver's 14183 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14184 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14185 * driver will fully reinitialize its device during resume() method call, 14186 * the device will be set to PCI_D0 directly in PCI config space before 14187 * restoring the state. 14188 * 14189 * Return code 14190 * 0 - driver suspended the device 14191 * Error otherwise 14192 **/ 14193 static int __maybe_unused 14194 lpfc_pci_resume_one_s3(struct device *dev_d) 14195 { 14196 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14197 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14198 uint32_t intr_mode; 14199 int error; 14200 14201 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14202 "0452 PCI device Power Management resume.\n"); 14203 14204 /* Startup the kernel thread for this host adapter. */ 14205 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14206 "lpfc_worker_%d", phba->brd_no); 14207 if (IS_ERR(phba->worker_thread)) { 14208 error = PTR_ERR(phba->worker_thread); 14209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14210 "0434 PM resume failed to start worker " 14211 "thread: error=x%x.\n", error); 14212 return error; 14213 } 14214 14215 /* Init cpu_map array */ 14216 lpfc_cpu_map_array_init(phba); 14217 /* Init hba_eq_hdl array */ 14218 lpfc_hba_eq_hdl_array_init(phba); 14219 /* Configure and enable interrupt */ 14220 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14221 if (intr_mode == LPFC_INTR_ERROR) { 14222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14223 "0430 PM resume Failed to enable interrupt\n"); 14224 return -EIO; 14225 } else 14226 phba->intr_mode = intr_mode; 14227 14228 /* Restart HBA and bring it online */ 14229 lpfc_sli_brdrestart(phba); 14230 lpfc_online(phba); 14231 14232 /* Log the current active interrupt mode */ 14233 lpfc_log_intr_mode(phba, phba->intr_mode); 14234 14235 return 0; 14236 } 14237 14238 /** 14239 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14240 * @phba: pointer to lpfc hba data structure. 14241 * 14242 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14243 * aborts all the outstanding SCSI I/Os to the pci device. 14244 **/ 14245 static void 14246 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14247 { 14248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14249 "2723 PCI channel I/O abort preparing for recovery\n"); 14250 14251 /* 14252 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14253 * and let the SCSI mid-layer to retry them to recover. 14254 */ 14255 lpfc_sli_abort_fcp_rings(phba); 14256 } 14257 14258 /** 14259 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14260 * @phba: pointer to lpfc hba data structure. 14261 * 14262 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14263 * disables the device interrupt and pci device, and aborts the internal FCP 14264 * pending I/Os. 14265 **/ 14266 static void 14267 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14268 { 14269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14270 "2710 PCI channel disable preparing for reset\n"); 14271 14272 /* Block any management I/Os to the device */ 14273 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14274 14275 /* Block all SCSI devices' I/Os on the host */ 14276 lpfc_scsi_dev_block(phba); 14277 14278 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14279 lpfc_sli_flush_io_rings(phba); 14280 14281 /* stop all timers */ 14282 lpfc_stop_hba_timers(phba); 14283 14284 /* Disable interrupt and pci device */ 14285 lpfc_sli_disable_intr(phba); 14286 pci_disable_device(phba->pcidev); 14287 } 14288 14289 /** 14290 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14291 * @phba: pointer to lpfc hba data structure. 14292 * 14293 * This routine is called to prepare the SLI3 device for PCI slot permanently 14294 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14295 * pending I/Os. 14296 **/ 14297 static void 14298 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14299 { 14300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14301 "2711 PCI channel permanent disable for failure\n"); 14302 /* Block all SCSI devices' I/Os on the host */ 14303 lpfc_scsi_dev_block(phba); 14304 lpfc_sli4_prep_dev_for_reset(phba); 14305 14306 /* stop all timers */ 14307 lpfc_stop_hba_timers(phba); 14308 14309 /* Clean up all driver's outstanding SCSI I/Os */ 14310 lpfc_sli_flush_io_rings(phba); 14311 } 14312 14313 /** 14314 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14315 * @pdev: pointer to PCI device. 14316 * @state: the current PCI connection state. 14317 * 14318 * This routine is called from the PCI subsystem for I/O error handling to 14319 * device with SLI-3 interface spec. This function is called by the PCI 14320 * subsystem after a PCI bus error affecting this device has been detected. 14321 * When this function is invoked, it will need to stop all the I/Os and 14322 * interrupt(s) to the device. Once that is done, it will return 14323 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14324 * as desired. 14325 * 14326 * Return codes 14327 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14328 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14329 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14330 **/ 14331 static pci_ers_result_t 14332 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14333 { 14334 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14335 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14336 14337 switch (state) { 14338 case pci_channel_io_normal: 14339 /* Non-fatal error, prepare for recovery */ 14340 lpfc_sli_prep_dev_for_recover(phba); 14341 return PCI_ERS_RESULT_CAN_RECOVER; 14342 case pci_channel_io_frozen: 14343 /* Fatal error, prepare for slot reset */ 14344 lpfc_sli_prep_dev_for_reset(phba); 14345 return PCI_ERS_RESULT_NEED_RESET; 14346 case pci_channel_io_perm_failure: 14347 /* Permanent failure, prepare for device down */ 14348 lpfc_sli_prep_dev_for_perm_failure(phba); 14349 return PCI_ERS_RESULT_DISCONNECT; 14350 default: 14351 /* Unknown state, prepare and request slot reset */ 14352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14353 "0472 Unknown PCI error state: x%x\n", state); 14354 lpfc_sli_prep_dev_for_reset(phba); 14355 return PCI_ERS_RESULT_NEED_RESET; 14356 } 14357 } 14358 14359 /** 14360 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14361 * @pdev: pointer to PCI device. 14362 * 14363 * This routine is called from the PCI subsystem for error handling to 14364 * device with SLI-3 interface spec. This is called after PCI bus has been 14365 * reset to restart the PCI card from scratch, as if from a cold-boot. 14366 * During the PCI subsystem error recovery, after driver returns 14367 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14368 * recovery and then call this routine before calling the .resume method 14369 * to recover the device. This function will initialize the HBA device, 14370 * enable the interrupt, but it will just put the HBA to offline state 14371 * without passing any I/O traffic. 14372 * 14373 * Return codes 14374 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14375 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14376 */ 14377 static pci_ers_result_t 14378 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14379 { 14380 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14381 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14382 struct lpfc_sli *psli = &phba->sli; 14383 uint32_t intr_mode; 14384 14385 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14386 if (pci_enable_device_mem(pdev)) { 14387 printk(KERN_ERR "lpfc: Cannot re-enable " 14388 "PCI device after reset.\n"); 14389 return PCI_ERS_RESULT_DISCONNECT; 14390 } 14391 14392 pci_restore_state(pdev); 14393 14394 /* 14395 * As the new kernel behavior of pci_restore_state() API call clears 14396 * device saved_state flag, need to save the restored state again. 14397 */ 14398 pci_save_state(pdev); 14399 14400 if (pdev->is_busmaster) 14401 pci_set_master(pdev); 14402 14403 spin_lock_irq(&phba->hbalock); 14404 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14405 spin_unlock_irq(&phba->hbalock); 14406 14407 /* Configure and enable interrupt */ 14408 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14409 if (intr_mode == LPFC_INTR_ERROR) { 14410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14411 "0427 Cannot re-enable interrupt after " 14412 "slot reset.\n"); 14413 return PCI_ERS_RESULT_DISCONNECT; 14414 } else 14415 phba->intr_mode = intr_mode; 14416 14417 /* Take device offline, it will perform cleanup */ 14418 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14419 lpfc_offline(phba); 14420 lpfc_sli_brdrestart(phba); 14421 14422 /* Log the current active interrupt mode */ 14423 lpfc_log_intr_mode(phba, phba->intr_mode); 14424 14425 return PCI_ERS_RESULT_RECOVERED; 14426 } 14427 14428 /** 14429 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14430 * @pdev: pointer to PCI device 14431 * 14432 * This routine is called from the PCI subsystem for error handling to device 14433 * with SLI-3 interface spec. It is called when kernel error recovery tells 14434 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14435 * error recovery. After this call, traffic can start to flow from this device 14436 * again. 14437 */ 14438 static void 14439 lpfc_io_resume_s3(struct pci_dev *pdev) 14440 { 14441 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14442 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14443 14444 /* Bring device online, it will be no-op for non-fatal error resume */ 14445 lpfc_online(phba); 14446 } 14447 14448 /** 14449 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14450 * @phba: pointer to lpfc hba data structure. 14451 * 14452 * returns the number of ELS/CT IOCBs to reserve 14453 **/ 14454 int 14455 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14456 { 14457 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14458 14459 if (phba->sli_rev == LPFC_SLI_REV4) { 14460 if (max_xri <= 100) 14461 return 10; 14462 else if (max_xri <= 256) 14463 return 25; 14464 else if (max_xri <= 512) 14465 return 50; 14466 else if (max_xri <= 1024) 14467 return 100; 14468 else if (max_xri <= 1536) 14469 return 150; 14470 else if (max_xri <= 2048) 14471 return 200; 14472 else 14473 return 250; 14474 } else 14475 return 0; 14476 } 14477 14478 /** 14479 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14480 * @phba: pointer to lpfc hba data structure. 14481 * 14482 * returns the number of ELS/CT + NVMET IOCBs to reserve 14483 **/ 14484 int 14485 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14486 { 14487 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14488 14489 if (phba->nvmet_support) 14490 max_xri += LPFC_NVMET_BUF_POST; 14491 return max_xri; 14492 } 14493 14494 14495 static int 14496 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14497 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14498 const struct firmware *fw) 14499 { 14500 int rc; 14501 u8 sli_family; 14502 14503 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14504 /* Three cases: (1) FW was not supported on the detected adapter. 14505 * (2) FW update has been locked out administratively. 14506 * (3) Some other error during FW update. 14507 * In each case, an unmaskable message is written to the console 14508 * for admin diagnosis. 14509 */ 14510 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14511 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14512 magic_number != MAGIC_NUMBER_G6) || 14513 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14514 magic_number != MAGIC_NUMBER_G7) || 14515 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14516 magic_number != MAGIC_NUMBER_G7P)) { 14517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14518 "3030 This firmware version is not supported on" 14519 " this HBA model. Device:%x Magic:%x Type:%x " 14520 "ID:%x Size %d %zd\n", 14521 phba->pcidev->device, magic_number, ftype, fid, 14522 fsize, fw->size); 14523 rc = -EINVAL; 14524 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14525 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14526 "3021 Firmware downloads have been prohibited " 14527 "by a system configuration setting on " 14528 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14529 "%zd\n", 14530 phba->pcidev->device, magic_number, ftype, fid, 14531 fsize, fw->size); 14532 rc = -EACCES; 14533 } else { 14534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14535 "3022 FW Download failed. Add Status x%x " 14536 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14537 "%zd\n", 14538 offset, phba->pcidev->device, magic_number, 14539 ftype, fid, fsize, fw->size); 14540 rc = -EIO; 14541 } 14542 return rc; 14543 } 14544 14545 /** 14546 * lpfc_write_firmware - attempt to write a firmware image to the port 14547 * @fw: pointer to firmware image returned from request_firmware. 14548 * @context: pointer to firmware image returned from request_firmware. 14549 * 14550 **/ 14551 static void 14552 lpfc_write_firmware(const struct firmware *fw, void *context) 14553 { 14554 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14555 char fwrev[FW_REV_STR_SIZE]; 14556 struct lpfc_grp_hdr *image; 14557 struct list_head dma_buffer_list; 14558 int i, rc = 0; 14559 struct lpfc_dmabuf *dmabuf, *next; 14560 uint32_t offset = 0, temp_offset = 0; 14561 uint32_t magic_number, ftype, fid, fsize; 14562 14563 /* It can be null in no-wait mode, sanity check */ 14564 if (!fw) { 14565 rc = -ENXIO; 14566 goto out; 14567 } 14568 image = (struct lpfc_grp_hdr *)fw->data; 14569 14570 magic_number = be32_to_cpu(image->magic_number); 14571 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14572 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14573 fsize = be32_to_cpu(image->size); 14574 14575 INIT_LIST_HEAD(&dma_buffer_list); 14576 lpfc_decode_firmware_rev(phba, fwrev, 1); 14577 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14578 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14579 "3023 Updating Firmware, Current Version:%s " 14580 "New Version:%s\n", 14581 fwrev, image->revision); 14582 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14583 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14584 GFP_KERNEL); 14585 if (!dmabuf) { 14586 rc = -ENOMEM; 14587 goto release_out; 14588 } 14589 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14590 SLI4_PAGE_SIZE, 14591 &dmabuf->phys, 14592 GFP_KERNEL); 14593 if (!dmabuf->virt) { 14594 kfree(dmabuf); 14595 rc = -ENOMEM; 14596 goto release_out; 14597 } 14598 list_add_tail(&dmabuf->list, &dma_buffer_list); 14599 } 14600 while (offset < fw->size) { 14601 temp_offset = offset; 14602 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14603 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14604 memcpy(dmabuf->virt, 14605 fw->data + temp_offset, 14606 fw->size - temp_offset); 14607 temp_offset = fw->size; 14608 break; 14609 } 14610 memcpy(dmabuf->virt, fw->data + temp_offset, 14611 SLI4_PAGE_SIZE); 14612 temp_offset += SLI4_PAGE_SIZE; 14613 } 14614 rc = lpfc_wr_object(phba, &dma_buffer_list, 14615 (fw->size - offset), &offset); 14616 if (rc) { 14617 rc = lpfc_log_write_firmware_error(phba, offset, 14618 magic_number, 14619 ftype, 14620 fid, 14621 fsize, 14622 fw); 14623 goto release_out; 14624 } 14625 } 14626 rc = offset; 14627 } else 14628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14629 "3029 Skipped Firmware update, Current " 14630 "Version:%s New Version:%s\n", 14631 fwrev, image->revision); 14632 14633 release_out: 14634 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14635 list_del(&dmabuf->list); 14636 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14637 dmabuf->virt, dmabuf->phys); 14638 kfree(dmabuf); 14639 } 14640 release_firmware(fw); 14641 out: 14642 if (rc < 0) 14643 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14644 "3062 Firmware update error, status %d.\n", rc); 14645 else 14646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14647 "3024 Firmware update success: size %d.\n", rc); 14648 } 14649 14650 /** 14651 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14652 * @phba: pointer to lpfc hba data structure. 14653 * @fw_upgrade: which firmware to update. 14654 * 14655 * This routine is called to perform Linux generic firmware upgrade on device 14656 * that supports such feature. 14657 **/ 14658 int 14659 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14660 { 14661 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 14662 int ret; 14663 const struct firmware *fw; 14664 14665 /* Only supported on SLI4 interface type 2 for now */ 14666 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14667 LPFC_SLI_INTF_IF_TYPE_2) 14668 return -EPERM; 14669 14670 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 14671 14672 if (fw_upgrade == INT_FW_UPGRADE) { 14673 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14674 file_name, &phba->pcidev->dev, 14675 GFP_KERNEL, (void *)phba, 14676 lpfc_write_firmware); 14677 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14678 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14679 if (!ret) 14680 lpfc_write_firmware(fw, (void *)phba); 14681 } else { 14682 ret = -EINVAL; 14683 } 14684 14685 return ret; 14686 } 14687 14688 /** 14689 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14690 * @pdev: pointer to PCI device 14691 * @pid: pointer to PCI device identifier 14692 * 14693 * This routine is called from the kernel's PCI subsystem to device with 14694 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14695 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14696 * information of the device and driver to see if the driver state that it 14697 * can support this kind of device. If the match is successful, the driver 14698 * core invokes this routine. If this routine determines it can claim the HBA, 14699 * it does all the initialization that it needs to do to handle the HBA 14700 * properly. 14701 * 14702 * Return code 14703 * 0 - driver can claim the device 14704 * negative value - driver can not claim the device 14705 **/ 14706 static int 14707 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14708 { 14709 struct lpfc_hba *phba; 14710 struct lpfc_vport *vport = NULL; 14711 struct Scsi_Host *shost = NULL; 14712 int error; 14713 uint32_t cfg_mode, intr_mode; 14714 14715 /* Allocate memory for HBA structure */ 14716 phba = lpfc_hba_alloc(pdev); 14717 if (!phba) 14718 return -ENOMEM; 14719 14720 INIT_LIST_HEAD(&phba->poll_list); 14721 14722 /* Perform generic PCI device enabling operation */ 14723 error = lpfc_enable_pci_dev(phba); 14724 if (error) 14725 goto out_free_phba; 14726 14727 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14728 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14729 if (error) 14730 goto out_disable_pci_dev; 14731 14732 /* Set up SLI-4 specific device PCI memory space */ 14733 error = lpfc_sli4_pci_mem_setup(phba); 14734 if (error) { 14735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14736 "1410 Failed to set up pci memory space.\n"); 14737 goto out_disable_pci_dev; 14738 } 14739 14740 /* Set up SLI-4 Specific device driver resources */ 14741 error = lpfc_sli4_driver_resource_setup(phba); 14742 if (error) { 14743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14744 "1412 Failed to set up driver resource.\n"); 14745 goto out_unset_pci_mem_s4; 14746 } 14747 14748 INIT_LIST_HEAD(&phba->active_rrq_list); 14749 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14750 14751 /* Set up common device driver resources */ 14752 error = lpfc_setup_driver_resource_phase2(phba); 14753 if (error) { 14754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14755 "1414 Failed to set up driver resource.\n"); 14756 goto out_unset_driver_resource_s4; 14757 } 14758 14759 /* Get the default values for Model Name and Description */ 14760 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14761 14762 /* Now, trying to enable interrupt and bring up the device */ 14763 cfg_mode = phba->cfg_use_msi; 14764 14765 /* Put device to a known state before enabling interrupt */ 14766 phba->pport = NULL; 14767 lpfc_stop_port(phba); 14768 14769 /* Init cpu_map array */ 14770 lpfc_cpu_map_array_init(phba); 14771 14772 /* Init hba_eq_hdl array */ 14773 lpfc_hba_eq_hdl_array_init(phba); 14774 14775 /* Configure and enable interrupt */ 14776 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14777 if (intr_mode == LPFC_INTR_ERROR) { 14778 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14779 "0426 Failed to enable interrupt.\n"); 14780 error = -ENODEV; 14781 goto out_unset_driver_resource; 14782 } 14783 /* Default to single EQ for non-MSI-X */ 14784 if (phba->intr_type != MSIX) { 14785 phba->cfg_irq_chann = 1; 14786 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14787 if (phba->nvmet_support) 14788 phba->cfg_nvmet_mrq = 1; 14789 } 14790 } 14791 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14792 14793 /* Create SCSI host to the physical port */ 14794 error = lpfc_create_shost(phba); 14795 if (error) { 14796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14797 "1415 Failed to create scsi host.\n"); 14798 goto out_disable_intr; 14799 } 14800 vport = phba->pport; 14801 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14802 14803 /* Configure sysfs attributes */ 14804 error = lpfc_alloc_sysfs_attr(vport); 14805 if (error) { 14806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14807 "1416 Failed to allocate sysfs attr\n"); 14808 goto out_destroy_shost; 14809 } 14810 14811 /* Set up SLI-4 HBA */ 14812 if (lpfc_sli4_hba_setup(phba)) { 14813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14814 "1421 Failed to set up hba\n"); 14815 error = -ENODEV; 14816 goto out_free_sysfs_attr; 14817 } 14818 14819 /* Log the current active interrupt mode */ 14820 phba->intr_mode = intr_mode; 14821 lpfc_log_intr_mode(phba, intr_mode); 14822 14823 /* Perform post initialization setup */ 14824 lpfc_post_init_setup(phba); 14825 14826 /* NVME support in FW earlier in the driver load corrects the 14827 * FC4 type making a check for nvme_support unnecessary. 14828 */ 14829 if (phba->nvmet_support == 0) { 14830 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14831 /* Create NVME binding with nvme_fc_transport. This 14832 * ensures the vport is initialized. If the localport 14833 * create fails, it should not unload the driver to 14834 * support field issues. 14835 */ 14836 error = lpfc_nvme_create_localport(vport); 14837 if (error) { 14838 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14839 "6004 NVME registration " 14840 "failed, error x%x\n", 14841 error); 14842 } 14843 } 14844 } 14845 14846 /* check for firmware upgrade or downgrade */ 14847 if (phba->cfg_request_firmware_upgrade) 14848 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14849 14850 /* Check if there are static vports to be created. */ 14851 lpfc_create_static_vport(phba); 14852 14853 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14854 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14855 14856 return 0; 14857 14858 out_free_sysfs_attr: 14859 lpfc_free_sysfs_attr(vport); 14860 out_destroy_shost: 14861 lpfc_destroy_shost(phba); 14862 out_disable_intr: 14863 lpfc_sli4_disable_intr(phba); 14864 out_unset_driver_resource: 14865 lpfc_unset_driver_resource_phase2(phba); 14866 out_unset_driver_resource_s4: 14867 lpfc_sli4_driver_resource_unset(phba); 14868 out_unset_pci_mem_s4: 14869 lpfc_sli4_pci_mem_unset(phba); 14870 out_disable_pci_dev: 14871 lpfc_disable_pci_dev(phba); 14872 if (shost) 14873 scsi_host_put(shost); 14874 out_free_phba: 14875 lpfc_hba_free(phba); 14876 return error; 14877 } 14878 14879 /** 14880 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14881 * @pdev: pointer to PCI device 14882 * 14883 * This routine is called from the kernel's PCI subsystem to device with 14884 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14885 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14886 * device to be removed from the PCI subsystem properly. 14887 **/ 14888 static void 14889 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14890 { 14891 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14892 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14893 struct lpfc_vport **vports; 14894 struct lpfc_hba *phba = vport->phba; 14895 int i; 14896 14897 /* Mark the device unloading flag */ 14898 spin_lock_irq(&phba->hbalock); 14899 vport->load_flag |= FC_UNLOADING; 14900 spin_unlock_irq(&phba->hbalock); 14901 if (phba->cgn_i) 14902 lpfc_unreg_congestion_buf(phba); 14903 14904 lpfc_free_sysfs_attr(vport); 14905 14906 /* Release all the vports against this physical port */ 14907 vports = lpfc_create_vport_work_array(phba); 14908 if (vports != NULL) 14909 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14910 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14911 continue; 14912 fc_vport_terminate(vports[i]->fc_vport); 14913 } 14914 lpfc_destroy_vport_work_array(phba, vports); 14915 14916 /* Remove FC host with the physical port */ 14917 fc_remove_host(shost); 14918 scsi_remove_host(shost); 14919 14920 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14921 * localports are destroyed after to cleanup all transport memory. 14922 */ 14923 lpfc_cleanup(vport); 14924 lpfc_nvmet_destroy_targetport(phba); 14925 lpfc_nvme_destroy_localport(vport); 14926 14927 /* De-allocate multi-XRI pools */ 14928 if (phba->cfg_xri_rebalancing) 14929 lpfc_destroy_multixri_pools(phba); 14930 14931 /* 14932 * Bring down the SLI Layer. This step disables all interrupts, 14933 * clears the rings, discards all mailbox commands, and resets 14934 * the HBA FCoE function. 14935 */ 14936 lpfc_debugfs_terminate(vport); 14937 14938 lpfc_stop_hba_timers(phba); 14939 spin_lock_irq(&phba->port_list_lock); 14940 list_del_init(&vport->listentry); 14941 spin_unlock_irq(&phba->port_list_lock); 14942 14943 /* Perform scsi free before driver resource_unset since scsi 14944 * buffers are released to their corresponding pools here. 14945 */ 14946 lpfc_io_free(phba); 14947 lpfc_free_iocb_list(phba); 14948 lpfc_sli4_hba_unset(phba); 14949 14950 lpfc_unset_driver_resource_phase2(phba); 14951 lpfc_sli4_driver_resource_unset(phba); 14952 14953 /* Unmap adapter Control and Doorbell registers */ 14954 lpfc_sli4_pci_mem_unset(phba); 14955 14956 /* Release PCI resources and disable device's PCI function */ 14957 scsi_host_put(shost); 14958 lpfc_disable_pci_dev(phba); 14959 14960 /* Finally, free the driver's device data structure */ 14961 lpfc_hba_free(phba); 14962 14963 return; 14964 } 14965 14966 /** 14967 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 14968 * @dev_d: pointer to device 14969 * 14970 * This routine is called from the kernel's PCI subsystem to support system 14971 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 14972 * this method, it quiesces the device by stopping the driver's worker 14973 * thread for the device, turning off device's interrupt and DMA, and bring 14974 * the device offline. Note that as the driver implements the minimum PM 14975 * requirements to a power-aware driver's PM support for suspend/resume -- all 14976 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 14977 * method call will be treated as SUSPEND and the driver will fully 14978 * reinitialize its device during resume() method call, the driver will set 14979 * device to PCI_D3hot state in PCI config space instead of setting it 14980 * according to the @msg provided by the PM. 14981 * 14982 * Return code 14983 * 0 - driver suspended the device 14984 * Error otherwise 14985 **/ 14986 static int __maybe_unused 14987 lpfc_pci_suspend_one_s4(struct device *dev_d) 14988 { 14989 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14990 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14991 14992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14993 "2843 PCI device Power Management suspend.\n"); 14994 14995 /* Bring down the device */ 14996 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14997 lpfc_offline(phba); 14998 kthread_stop(phba->worker_thread); 14999 15000 /* Disable interrupt from device */ 15001 lpfc_sli4_disable_intr(phba); 15002 lpfc_sli4_queue_destroy(phba); 15003 15004 return 0; 15005 } 15006 15007 /** 15008 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 15009 * @dev_d: pointer to device 15010 * 15011 * This routine is called from the kernel's PCI subsystem to support system 15012 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 15013 * this method, it restores the device's PCI config space state and fully 15014 * reinitializes the device and brings it online. Note that as the driver 15015 * implements the minimum PM requirements to a power-aware driver's PM for 15016 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 15017 * to the suspend() method call will be treated as SUSPEND and the driver 15018 * will fully reinitialize its device during resume() method call, the device 15019 * will be set to PCI_D0 directly in PCI config space before restoring the 15020 * state. 15021 * 15022 * Return code 15023 * 0 - driver suspended the device 15024 * Error otherwise 15025 **/ 15026 static int __maybe_unused 15027 lpfc_pci_resume_one_s4(struct device *dev_d) 15028 { 15029 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15030 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15031 uint32_t intr_mode; 15032 int error; 15033 15034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15035 "0292 PCI device Power Management resume.\n"); 15036 15037 /* Startup the kernel thread for this host adapter. */ 15038 phba->worker_thread = kthread_run(lpfc_do_work, phba, 15039 "lpfc_worker_%d", phba->brd_no); 15040 if (IS_ERR(phba->worker_thread)) { 15041 error = PTR_ERR(phba->worker_thread); 15042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15043 "0293 PM resume failed to start worker " 15044 "thread: error=x%x.\n", error); 15045 return error; 15046 } 15047 15048 /* Configure and enable interrupt */ 15049 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15050 if (intr_mode == LPFC_INTR_ERROR) { 15051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15052 "0294 PM resume Failed to enable interrupt\n"); 15053 return -EIO; 15054 } else 15055 phba->intr_mode = intr_mode; 15056 15057 /* Restart HBA and bring it online */ 15058 lpfc_sli_brdrestart(phba); 15059 lpfc_online(phba); 15060 15061 /* Log the current active interrupt mode */ 15062 lpfc_log_intr_mode(phba, phba->intr_mode); 15063 15064 return 0; 15065 } 15066 15067 /** 15068 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15069 * @phba: pointer to lpfc hba data structure. 15070 * 15071 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15072 * aborts all the outstanding SCSI I/Os to the pci device. 15073 **/ 15074 static void 15075 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15076 { 15077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15078 "2828 PCI channel I/O abort preparing for recovery\n"); 15079 /* 15080 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15081 * and let the SCSI mid-layer to retry them to recover. 15082 */ 15083 lpfc_sli_abort_fcp_rings(phba); 15084 } 15085 15086 /** 15087 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15088 * @phba: pointer to lpfc hba data structure. 15089 * 15090 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15091 * disables the device interrupt and pci device, and aborts the internal FCP 15092 * pending I/Os. 15093 **/ 15094 static void 15095 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15096 { 15097 int offline = pci_channel_offline(phba->pcidev); 15098 15099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15100 "2826 PCI channel disable preparing for reset offline" 15101 " %d\n", offline); 15102 15103 /* Block any management I/Os to the device */ 15104 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15105 15106 15107 /* HBA_PCI_ERR was set in io_error_detect */ 15108 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 15109 /* Flush all driver's outstanding I/Os as we are to reset */ 15110 lpfc_sli_flush_io_rings(phba); 15111 lpfc_offline(phba); 15112 15113 /* stop all timers */ 15114 lpfc_stop_hba_timers(phba); 15115 15116 lpfc_sli4_queue_destroy(phba); 15117 /* Disable interrupt and pci device */ 15118 lpfc_sli4_disable_intr(phba); 15119 pci_disable_device(phba->pcidev); 15120 } 15121 15122 /** 15123 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15124 * @phba: pointer to lpfc hba data structure. 15125 * 15126 * This routine is called to prepare the SLI4 device for PCI slot permanently 15127 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15128 * pending I/Os. 15129 **/ 15130 static void 15131 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15132 { 15133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15134 "2827 PCI channel permanent disable for failure\n"); 15135 15136 /* Block all SCSI devices' I/Os on the host */ 15137 lpfc_scsi_dev_block(phba); 15138 15139 /* stop all timers */ 15140 lpfc_stop_hba_timers(phba); 15141 15142 /* Clean up all driver's outstanding I/Os */ 15143 lpfc_sli_flush_io_rings(phba); 15144 } 15145 15146 /** 15147 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15148 * @pdev: pointer to PCI device. 15149 * @state: the current PCI connection state. 15150 * 15151 * This routine is called from the PCI subsystem for error handling to device 15152 * with SLI-4 interface spec. This function is called by the PCI subsystem 15153 * after a PCI bus error affecting this device has been detected. When this 15154 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15155 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15156 * for the PCI subsystem to perform proper recovery as desired. 15157 * 15158 * Return codes 15159 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15160 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15161 **/ 15162 static pci_ers_result_t 15163 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15164 { 15165 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15166 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15167 bool hba_pci_err; 15168 15169 switch (state) { 15170 case pci_channel_io_normal: 15171 /* Non-fatal error, prepare for recovery */ 15172 lpfc_sli4_prep_dev_for_recover(phba); 15173 return PCI_ERS_RESULT_CAN_RECOVER; 15174 case pci_channel_io_frozen: 15175 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15176 /* Fatal error, prepare for slot reset */ 15177 if (!hba_pci_err) 15178 lpfc_sli4_prep_dev_for_reset(phba); 15179 else 15180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15181 "2832 Already handling PCI error " 15182 "state: x%x\n", state); 15183 return PCI_ERS_RESULT_NEED_RESET; 15184 case pci_channel_io_perm_failure: 15185 set_bit(HBA_PCI_ERR, &phba->bit_flags); 15186 /* Permanent failure, prepare for device down */ 15187 lpfc_sli4_prep_dev_for_perm_failure(phba); 15188 return PCI_ERS_RESULT_DISCONNECT; 15189 default: 15190 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15191 if (!hba_pci_err) 15192 lpfc_sli4_prep_dev_for_reset(phba); 15193 /* Unknown state, prepare and request slot reset */ 15194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15195 "2825 Unknown PCI error state: x%x\n", state); 15196 lpfc_sli4_prep_dev_for_reset(phba); 15197 return PCI_ERS_RESULT_NEED_RESET; 15198 } 15199 } 15200 15201 /** 15202 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15203 * @pdev: pointer to PCI device. 15204 * 15205 * This routine is called from the PCI subsystem for error handling to device 15206 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15207 * restart the PCI card from scratch, as if from a cold-boot. During the 15208 * PCI subsystem error recovery, after the driver returns 15209 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15210 * recovery and then call this routine before calling the .resume method to 15211 * recover the device. This function will initialize the HBA device, enable 15212 * the interrupt, but it will just put the HBA to offline state without 15213 * passing any I/O traffic. 15214 * 15215 * Return codes 15216 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15217 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15218 */ 15219 static pci_ers_result_t 15220 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15221 { 15222 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15223 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15224 struct lpfc_sli *psli = &phba->sli; 15225 uint32_t intr_mode; 15226 bool hba_pci_err; 15227 15228 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15229 if (pci_enable_device_mem(pdev)) { 15230 printk(KERN_ERR "lpfc: Cannot re-enable " 15231 "PCI device after reset.\n"); 15232 return PCI_ERS_RESULT_DISCONNECT; 15233 } 15234 15235 pci_restore_state(pdev); 15236 15237 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); 15238 if (!hba_pci_err) 15239 dev_info(&pdev->dev, 15240 "hba_pci_err was not set, recovering slot reset.\n"); 15241 /* 15242 * As the new kernel behavior of pci_restore_state() API call clears 15243 * device saved_state flag, need to save the restored state again. 15244 */ 15245 pci_save_state(pdev); 15246 15247 if (pdev->is_busmaster) 15248 pci_set_master(pdev); 15249 15250 spin_lock_irq(&phba->hbalock); 15251 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15252 spin_unlock_irq(&phba->hbalock); 15253 15254 /* Init cpu_map array */ 15255 lpfc_cpu_map_array_init(phba); 15256 /* Configure and enable interrupt */ 15257 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15258 if (intr_mode == LPFC_INTR_ERROR) { 15259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15260 "2824 Cannot re-enable interrupt after " 15261 "slot reset.\n"); 15262 return PCI_ERS_RESULT_DISCONNECT; 15263 } else 15264 phba->intr_mode = intr_mode; 15265 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15266 15267 /* Log the current active interrupt mode */ 15268 lpfc_log_intr_mode(phba, phba->intr_mode); 15269 15270 return PCI_ERS_RESULT_RECOVERED; 15271 } 15272 15273 /** 15274 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15275 * @pdev: pointer to PCI device 15276 * 15277 * This routine is called from the PCI subsystem for error handling to device 15278 * with SLI-4 interface spec. It is called when kernel error recovery tells 15279 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15280 * error recovery. After this call, traffic can start to flow from this device 15281 * again. 15282 **/ 15283 static void 15284 lpfc_io_resume_s4(struct pci_dev *pdev) 15285 { 15286 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15287 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15288 15289 /* 15290 * In case of slot reset, as function reset is performed through 15291 * mailbox command which needs DMA to be enabled, this operation 15292 * has to be moved to the io resume phase. Taking device offline 15293 * will perform the necessary cleanup. 15294 */ 15295 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15296 /* Perform device reset */ 15297 lpfc_sli_brdrestart(phba); 15298 /* Bring the device back online */ 15299 lpfc_online(phba); 15300 } 15301 } 15302 15303 /** 15304 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15305 * @pdev: pointer to PCI device 15306 * @pid: pointer to PCI device identifier 15307 * 15308 * This routine is to be registered to the kernel's PCI subsystem. When an 15309 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15310 * at PCI device-specific information of the device and driver to see if the 15311 * driver state that it can support this kind of device. If the match is 15312 * successful, the driver core invokes this routine. This routine dispatches 15313 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15314 * do all the initialization that it needs to do to handle the HBA device 15315 * properly. 15316 * 15317 * Return code 15318 * 0 - driver can claim the device 15319 * negative value - driver can not claim the device 15320 **/ 15321 static int 15322 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15323 { 15324 int rc; 15325 struct lpfc_sli_intf intf; 15326 15327 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15328 return -ENODEV; 15329 15330 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15331 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15332 rc = lpfc_pci_probe_one_s4(pdev, pid); 15333 else 15334 rc = lpfc_pci_probe_one_s3(pdev, pid); 15335 15336 return rc; 15337 } 15338 15339 /** 15340 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15341 * @pdev: pointer to PCI device 15342 * 15343 * This routine is to be registered to the kernel's PCI subsystem. When an 15344 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15345 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15346 * remove routine, which will perform all the necessary cleanup for the 15347 * device to be removed from the PCI subsystem properly. 15348 **/ 15349 static void 15350 lpfc_pci_remove_one(struct pci_dev *pdev) 15351 { 15352 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15353 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15354 15355 switch (phba->pci_dev_grp) { 15356 case LPFC_PCI_DEV_LP: 15357 lpfc_pci_remove_one_s3(pdev); 15358 break; 15359 case LPFC_PCI_DEV_OC: 15360 lpfc_pci_remove_one_s4(pdev); 15361 break; 15362 default: 15363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15364 "1424 Invalid PCI device group: 0x%x\n", 15365 phba->pci_dev_grp); 15366 break; 15367 } 15368 return; 15369 } 15370 15371 /** 15372 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15373 * @dev: pointer to device 15374 * 15375 * This routine is to be registered to the kernel's PCI subsystem to support 15376 * system Power Management (PM). When PM invokes this method, it dispatches 15377 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15378 * suspend the device. 15379 * 15380 * Return code 15381 * 0 - driver suspended the device 15382 * Error otherwise 15383 **/ 15384 static int __maybe_unused 15385 lpfc_pci_suspend_one(struct device *dev) 15386 { 15387 struct Scsi_Host *shost = dev_get_drvdata(dev); 15388 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15389 int rc = -ENODEV; 15390 15391 switch (phba->pci_dev_grp) { 15392 case LPFC_PCI_DEV_LP: 15393 rc = lpfc_pci_suspend_one_s3(dev); 15394 break; 15395 case LPFC_PCI_DEV_OC: 15396 rc = lpfc_pci_suspend_one_s4(dev); 15397 break; 15398 default: 15399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15400 "1425 Invalid PCI device group: 0x%x\n", 15401 phba->pci_dev_grp); 15402 break; 15403 } 15404 return rc; 15405 } 15406 15407 /** 15408 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15409 * @dev: pointer to device 15410 * 15411 * This routine is to be registered to the kernel's PCI subsystem to support 15412 * system Power Management (PM). When PM invokes this method, it dispatches 15413 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15414 * resume the device. 15415 * 15416 * Return code 15417 * 0 - driver suspended the device 15418 * Error otherwise 15419 **/ 15420 static int __maybe_unused 15421 lpfc_pci_resume_one(struct device *dev) 15422 { 15423 struct Scsi_Host *shost = dev_get_drvdata(dev); 15424 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15425 int rc = -ENODEV; 15426 15427 switch (phba->pci_dev_grp) { 15428 case LPFC_PCI_DEV_LP: 15429 rc = lpfc_pci_resume_one_s3(dev); 15430 break; 15431 case LPFC_PCI_DEV_OC: 15432 rc = lpfc_pci_resume_one_s4(dev); 15433 break; 15434 default: 15435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15436 "1426 Invalid PCI device group: 0x%x\n", 15437 phba->pci_dev_grp); 15438 break; 15439 } 15440 return rc; 15441 } 15442 15443 /** 15444 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15445 * @pdev: pointer to PCI device. 15446 * @state: the current PCI connection state. 15447 * 15448 * This routine is registered to the PCI subsystem for error handling. This 15449 * function is called by the PCI subsystem after a PCI bus error affecting 15450 * this device has been detected. When this routine is invoked, it dispatches 15451 * the action to the proper SLI-3 or SLI-4 device error detected handling 15452 * routine, which will perform the proper error detected operation. 15453 * 15454 * Return codes 15455 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15456 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15457 **/ 15458 static pci_ers_result_t 15459 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15460 { 15461 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15462 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15463 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15464 15465 if (phba->link_state == LPFC_HBA_ERROR && 15466 phba->hba_flag & HBA_IOQ_FLUSH) 15467 return PCI_ERS_RESULT_NEED_RESET; 15468 15469 switch (phba->pci_dev_grp) { 15470 case LPFC_PCI_DEV_LP: 15471 rc = lpfc_io_error_detected_s3(pdev, state); 15472 break; 15473 case LPFC_PCI_DEV_OC: 15474 rc = lpfc_io_error_detected_s4(pdev, state); 15475 break; 15476 default: 15477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15478 "1427 Invalid PCI device group: 0x%x\n", 15479 phba->pci_dev_grp); 15480 break; 15481 } 15482 return rc; 15483 } 15484 15485 /** 15486 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15487 * @pdev: pointer to PCI device. 15488 * 15489 * This routine is registered to the PCI subsystem for error handling. This 15490 * function is called after PCI bus has been reset to restart the PCI card 15491 * from scratch, as if from a cold-boot. When this routine is invoked, it 15492 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15493 * routine, which will perform the proper device reset. 15494 * 15495 * Return codes 15496 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15497 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15498 **/ 15499 static pci_ers_result_t 15500 lpfc_io_slot_reset(struct pci_dev *pdev) 15501 { 15502 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15503 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15504 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15505 15506 switch (phba->pci_dev_grp) { 15507 case LPFC_PCI_DEV_LP: 15508 rc = lpfc_io_slot_reset_s3(pdev); 15509 break; 15510 case LPFC_PCI_DEV_OC: 15511 rc = lpfc_io_slot_reset_s4(pdev); 15512 break; 15513 default: 15514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15515 "1428 Invalid PCI device group: 0x%x\n", 15516 phba->pci_dev_grp); 15517 break; 15518 } 15519 return rc; 15520 } 15521 15522 /** 15523 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15524 * @pdev: pointer to PCI device 15525 * 15526 * This routine is registered to the PCI subsystem for error handling. It 15527 * is called when kernel error recovery tells the lpfc driver that it is 15528 * OK to resume normal PCI operation after PCI bus error recovery. When 15529 * this routine is invoked, it dispatches the action to the proper SLI-3 15530 * or SLI-4 device io_resume routine, which will resume the device operation. 15531 **/ 15532 static void 15533 lpfc_io_resume(struct pci_dev *pdev) 15534 { 15535 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15536 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15537 15538 switch (phba->pci_dev_grp) { 15539 case LPFC_PCI_DEV_LP: 15540 lpfc_io_resume_s3(pdev); 15541 break; 15542 case LPFC_PCI_DEV_OC: 15543 lpfc_io_resume_s4(pdev); 15544 break; 15545 default: 15546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15547 "1429 Invalid PCI device group: 0x%x\n", 15548 phba->pci_dev_grp); 15549 break; 15550 } 15551 return; 15552 } 15553 15554 /** 15555 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15556 * @phba: pointer to lpfc hba data structure. 15557 * 15558 * This routine checks to see if OAS is supported for this adapter. If 15559 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15560 * the enable oas flag is cleared and the pool created for OAS device data 15561 * is destroyed. 15562 * 15563 **/ 15564 static void 15565 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15566 { 15567 15568 if (!phba->cfg_EnableXLane) 15569 return; 15570 15571 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15572 phba->cfg_fof = 1; 15573 } else { 15574 phba->cfg_fof = 0; 15575 mempool_destroy(phba->device_data_mem_pool); 15576 phba->device_data_mem_pool = NULL; 15577 } 15578 15579 return; 15580 } 15581 15582 /** 15583 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15584 * @phba: pointer to lpfc hba data structure. 15585 * 15586 * This routine checks to see if RAS is supported by the adapter. Check the 15587 * function through which RAS support enablement is to be done. 15588 **/ 15589 void 15590 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15591 { 15592 /* if ASIC_GEN_NUM >= 0xC) */ 15593 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15594 LPFC_SLI_INTF_IF_TYPE_6) || 15595 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15596 LPFC_SLI_INTF_FAMILY_G6)) { 15597 phba->ras_fwlog.ras_hwsupport = true; 15598 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15599 phba->cfg_ras_fwlog_buffsize) 15600 phba->ras_fwlog.ras_enabled = true; 15601 else 15602 phba->ras_fwlog.ras_enabled = false; 15603 } else { 15604 phba->ras_fwlog.ras_hwsupport = false; 15605 } 15606 } 15607 15608 15609 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15610 15611 static const struct pci_error_handlers lpfc_err_handler = { 15612 .error_detected = lpfc_io_error_detected, 15613 .slot_reset = lpfc_io_slot_reset, 15614 .resume = lpfc_io_resume, 15615 }; 15616 15617 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15618 lpfc_pci_suspend_one, 15619 lpfc_pci_resume_one); 15620 15621 static struct pci_driver lpfc_driver = { 15622 .name = LPFC_DRIVER_NAME, 15623 .id_table = lpfc_id_table, 15624 .probe = lpfc_pci_probe_one, 15625 .remove = lpfc_pci_remove_one, 15626 .shutdown = lpfc_pci_remove_one, 15627 .driver.pm = &lpfc_pci_pm_ops_one, 15628 .err_handler = &lpfc_err_handler, 15629 }; 15630 15631 static const struct file_operations lpfc_mgmt_fop = { 15632 .owner = THIS_MODULE, 15633 }; 15634 15635 static struct miscdevice lpfc_mgmt_dev = { 15636 .minor = MISC_DYNAMIC_MINOR, 15637 .name = "lpfcmgmt", 15638 .fops = &lpfc_mgmt_fop, 15639 }; 15640 15641 /** 15642 * lpfc_init - lpfc module initialization routine 15643 * 15644 * This routine is to be invoked when the lpfc module is loaded into the 15645 * kernel. The special kernel macro module_init() is used to indicate the 15646 * role of this routine to the kernel as lpfc module entry point. 15647 * 15648 * Return codes 15649 * 0 - successful 15650 * -ENOMEM - FC attach transport failed 15651 * all others - failed 15652 */ 15653 static int __init 15654 lpfc_init(void) 15655 { 15656 int error = 0; 15657 15658 pr_info(LPFC_MODULE_DESC "\n"); 15659 pr_info(LPFC_COPYRIGHT "\n"); 15660 15661 error = misc_register(&lpfc_mgmt_dev); 15662 if (error) 15663 printk(KERN_ERR "Could not register lpfcmgmt device, " 15664 "misc_register returned with status %d", error); 15665 15666 error = -ENOMEM; 15667 lpfc_transport_functions.vport_create = lpfc_vport_create; 15668 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15669 lpfc_transport_template = 15670 fc_attach_transport(&lpfc_transport_functions); 15671 if (lpfc_transport_template == NULL) 15672 goto unregister; 15673 lpfc_vport_transport_template = 15674 fc_attach_transport(&lpfc_vport_transport_functions); 15675 if (lpfc_vport_transport_template == NULL) { 15676 fc_release_transport(lpfc_transport_template); 15677 goto unregister; 15678 } 15679 lpfc_wqe_cmd_template(); 15680 lpfc_nvmet_cmd_template(); 15681 15682 /* Initialize in case vector mapping is needed */ 15683 lpfc_present_cpu = num_present_cpus(); 15684 15685 lpfc_pldv_detect = false; 15686 15687 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15688 "lpfc/sli4:online", 15689 lpfc_cpu_online, lpfc_cpu_offline); 15690 if (error < 0) 15691 goto cpuhp_failure; 15692 lpfc_cpuhp_state = error; 15693 15694 error = pci_register_driver(&lpfc_driver); 15695 if (error) 15696 goto unwind; 15697 15698 return error; 15699 15700 unwind: 15701 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15702 cpuhp_failure: 15703 fc_release_transport(lpfc_transport_template); 15704 fc_release_transport(lpfc_vport_transport_template); 15705 unregister: 15706 misc_deregister(&lpfc_mgmt_dev); 15707 15708 return error; 15709 } 15710 15711 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15712 { 15713 unsigned int start_idx; 15714 unsigned int dbg_cnt; 15715 unsigned int temp_idx; 15716 int i; 15717 int j = 0; 15718 unsigned long rem_nsec; 15719 15720 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15721 return; 15722 15723 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15724 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15725 if (!dbg_cnt) 15726 goto out; 15727 temp_idx = start_idx; 15728 if (dbg_cnt >= DBG_LOG_SZ) { 15729 dbg_cnt = DBG_LOG_SZ; 15730 temp_idx -= 1; 15731 } else { 15732 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15733 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15734 } else { 15735 if (start_idx < dbg_cnt) 15736 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15737 else 15738 start_idx -= dbg_cnt; 15739 } 15740 } 15741 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15742 start_idx, temp_idx, dbg_cnt); 15743 15744 for (i = 0; i < dbg_cnt; i++) { 15745 if ((start_idx + i) < DBG_LOG_SZ) 15746 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15747 else 15748 temp_idx = j++; 15749 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15750 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15751 temp_idx, 15752 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15753 rem_nsec / 1000, 15754 phba->dbg_log[temp_idx].log); 15755 } 15756 out: 15757 atomic_set(&phba->dbg_log_cnt, 0); 15758 atomic_set(&phba->dbg_log_dmping, 0); 15759 } 15760 15761 __printf(2, 3) 15762 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15763 { 15764 unsigned int idx; 15765 va_list args; 15766 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15767 struct va_format vaf; 15768 15769 15770 va_start(args, fmt); 15771 if (unlikely(dbg_dmping)) { 15772 vaf.fmt = fmt; 15773 vaf.va = &args; 15774 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15775 va_end(args); 15776 return; 15777 } 15778 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15779 DBG_LOG_SZ; 15780 15781 atomic_inc(&phba->dbg_log_cnt); 15782 15783 vscnprintf(phba->dbg_log[idx].log, 15784 sizeof(phba->dbg_log[idx].log), fmt, args); 15785 va_end(args); 15786 15787 phba->dbg_log[idx].t_ns = local_clock(); 15788 } 15789 15790 /** 15791 * lpfc_exit - lpfc module removal routine 15792 * 15793 * This routine is invoked when the lpfc module is removed from the kernel. 15794 * The special kernel macro module_exit() is used to indicate the role of 15795 * this routine to the kernel as lpfc module exit point. 15796 */ 15797 static void __exit 15798 lpfc_exit(void) 15799 { 15800 misc_deregister(&lpfc_mgmt_dev); 15801 pci_unregister_driver(&lpfc_driver); 15802 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15803 fc_release_transport(lpfc_transport_template); 15804 fc_release_transport(lpfc_vport_transport_template); 15805 idr_destroy(&lpfc_hba_index); 15806 } 15807 15808 module_init(lpfc_init); 15809 module_exit(lpfc_exit); 15810 MODULE_LICENSE("GPL"); 15811 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15812 MODULE_AUTHOR("Broadcom"); 15813 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15814